aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-08-11 02:59:21 -0400
committerIngo Molnar <mingo@elte.hu>2008-08-11 02:59:21 -0400
commitcf206bffbb7542df54043fad9898113172af99d8 (patch)
treec7e7ca9a93443b888f98a0c07e74751a1aa3c947 /drivers
parentc1955a3d4762e7a9bf84035eb3c4886a900f0d15 (diff)
parent796aadeb1b2db9b5d463946766c5bbfd7717158c (diff)
Merge branch 'linus' into sched/clock
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/acpi/asus_acpi.c4
-rw-r--r--drivers/acpi/processor_perflib.c21
-rw-r--r--drivers/ata/ata_piix.c1
-rw-r--r--drivers/ata/libata-core.c57
-rw-r--r--drivers/ata/libata-scsi.c34
-rw-r--r--drivers/ata/libata.h1
-rw-r--r--drivers/ata/pata_ali.c3
-rw-r--r--drivers/ata/pata_at32.c4
-rw-r--r--drivers/ata/pata_it821x.c270
-rw-r--r--drivers/ata/pata_via.c64
-rw-r--r--drivers/atm/iphase.c40
-rw-r--r--drivers/base/class.c1
-rw-r--r--drivers/block/aoe/aoenet.c2
-rw-r--r--drivers/block/cciss.c750
-rw-r--r--drivers/block/cciss.h2
-rw-r--r--drivers/block/cciss_scsi.c195
-rw-r--r--drivers/block/xen-blkfront.c4
-rw-r--r--drivers/bluetooth/bcm203x.c9
-rw-r--r--drivers/bluetooth/bfusb.c10
-rw-r--r--drivers/bluetooth/bpa10x.c10
-rw-r--r--drivers/bluetooth/btusb.c163
-rw-r--r--drivers/bluetooth/hci_usb.c15
-rw-r--r--drivers/cdrom/cdrom.c4
-rw-r--r--drivers/cdrom/gdrom.c4
-rw-r--r--drivers/char/Kconfig2
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/amiserial.c2
-rw-r--r--drivers/char/ds1620.c2
-rw-r--r--drivers/char/efirtc.c1
-rw-r--r--drivers/char/hvc_console.h2
-rw-r--r--drivers/char/hvcs.c2
-rw-r--r--drivers/char/hw_random/ixp4xx-rng.c2
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c4
-rw-r--r--drivers/char/mxser.c6
-rw-r--r--drivers/char/pcmcia/synclink_cs.c4
-rw-r--r--drivers/char/synclink.c4
-rw-r--r--drivers/char/synclink_gt.c5
-rw-r--r--drivers/char/synclinkmp.c4
-rw-r--r--drivers/char/tty_ldisc.c2
-rw-r--r--drivers/char/viocons.c1171
-rw-r--r--drivers/char/vt.c2
-rw-r--r--drivers/cpufreq/cpufreq.c3
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c20
-rw-r--r--drivers/crypto/ixp4xx_crypto.c4
-rw-r--r--drivers/dma/ioat_dma.c2
-rw-r--r--drivers/dma/iop-adma.c2
-rw-r--r--drivers/firewire/fw-cdev.c29
-rw-r--r--drivers/firmware/iscsi_ibft_find.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c2
-rw-r--r--drivers/hwmon/Kconfig39
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/ad7414.c268
-rw-r--r--drivers/hwmon/adt7473.c16
-rw-r--r--drivers/hwmon/dme1737.c523
-rw-r--r--drivers/hwmon/f71882fg.c6
-rw-r--r--drivers/hwmon/hwmon-vid.c144
-rw-r--r--drivers/hwmon/it87.c45
-rw-r--r--drivers/hwmon/lm75.c282
-rw-r--r--drivers/hwmon/lm85.c672
-rw-r--r--drivers/hwmon/thmc50.c28
-rw-r--r--drivers/hwmon/w83627hf.c101
-rw-r--r--drivers/hwmon/w83791d.c24
-rw-r--r--drivers/i2c/busses/i2c-acorn.c2
-rw-r--r--drivers/i2c/busses/i2c-at91.c6
-rw-r--r--drivers/i2c/busses/i2c-davinci.c5
-rw-r--r--drivers/i2c/busses/i2c-ixp2000.c4
-rw-r--r--drivers/i2c/busses/i2c-pnx.c2
-rw-r--r--drivers/i2c/busses/i2c-pxa.c6
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c4
-rw-r--r--drivers/i2c/chips/isp1301_omap.c4
-rw-r--r--drivers/i2c/chips/menelaus.c5
-rw-r--r--drivers/ide/Kconfig10
-rw-r--r--drivers/ide/arm/ide_arm.c3
-rw-r--r--drivers/ide/arm/palm_bk3710.c2
-rw-r--r--drivers/ide/ide-cd.c30
-rw-r--r--drivers/ide/ide-disk.c11
-rw-r--r--drivers/ide/ide-dma.c6
-rw-r--r--drivers/ide/ide-floppy.c11
-rw-r--r--drivers/ide/ide-iops.c6
-rw-r--r--drivers/ide/ide-tape.c11
-rw-r--r--drivers/ide/pci/aec62xx.c2
-rw-r--r--drivers/ide/pci/alim15x3.c2
-rw-r--r--drivers/ide/pci/amd74xx.c2
-rw-r--r--drivers/ide/pci/atiixp.c2
-rw-r--r--drivers/ide/pci/cmd64x.c2
-rw-r--r--drivers/ide/pci/cs5520.c1
-rw-r--r--drivers/ide/pci/cs5535.c2
-rw-r--r--drivers/ide/pci/hpt366.c2
-rw-r--r--drivers/ide/pci/it8213.c2
-rw-r--r--drivers/ide/pci/it821x.c4
-rw-r--r--drivers/ide/pci/jmicron.c2
-rw-r--r--drivers/ide/pci/pdc202xx_new.c2
-rw-r--r--drivers/ide/pci/pdc202xx_old.c2
-rw-r--r--drivers/ide/pci/piix.c2
-rw-r--r--drivers/ide/pci/scc_pata.c2
-rw-r--r--drivers/ide/pci/serverworks.c8
-rw-r--r--drivers/ide/pci/siimage.c4
-rw-r--r--drivers/ide/pci/sis5513.c2
-rw-r--r--drivers/ide/pci/slc90e66.c2
-rw-r--r--drivers/ide/pci/tc86c001.c2
-rw-r--r--drivers/ide/pci/via82cxxx.c2
-rw-r--r--drivers/ide/ppc/pmac.c13
-rw-r--r--drivers/infiniband/core/cma.c37
-rw-r--r--drivers/infiniband/core/mad_rmpp.c2
-rw-r--r--drivers/infiniband/core/ucma.c14
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c6
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c28
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.h7
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c25
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c5
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba7220.c9
-rw-r--r--drivers/infiniband/hw/ipath/ipath_intr.c12
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c6
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c33
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c2
-rw-r--r--drivers/input/keyboard/aaed2000_kbd.c4
-rw-r--r--drivers/input/keyboard/corgikbd.c8
-rw-r--r--drivers/input/keyboard/jornada720_kbd.c4
-rw-r--r--drivers/input/keyboard/maple_keyb.c173
-rw-r--r--drivers/input/keyboard/omap-keypad.c11
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c5
-rw-r--r--drivers/input/keyboard/spitzkbd.c8
-rw-r--r--drivers/input/keyboard/tosakbd.c4
-rw-r--r--drivers/input/misc/ixp4xx-beeper.c2
-rw-r--r--drivers/input/mouse/rpcmouse.c2
-rw-r--r--drivers/input/serio/i8042-sparcio.h19
-rw-r--r--drivers/input/serio/rpckbd.c2
-rw-r--r--drivers/input/touchscreen/corgi_ts.c8
-rw-r--r--drivers/input/touchscreen/h3600_ts_input.c4
-rw-r--r--drivers/input/touchscreen/jornada720_ts.c4
-rw-r--r--drivers/input/touchscreen/mainstone-wm97xx.c2
-rw-r--r--drivers/isdn/Makefile2
-rw-r--r--drivers/isdn/gigaset/isocdata.c5
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c37
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c2
-rw-r--r--drivers/isdn/hysdn/hysdn_pof.h2
-rw-r--r--drivers/isdn/mISDN/l1oip_core.c6
-rw-r--r--drivers/isdn/mISDN/socket.c4
-rw-r--r--drivers/leds/leds-ams-delta.c2
-rw-r--r--drivers/leds/leds-cm-x270.c4
-rw-r--r--drivers/leds/leds-corgi.c7
-rw-r--r--drivers/leds/leds-fsg.c2
-rw-r--r--drivers/leds/leds-h1940.c6
-rw-r--r--drivers/leds/leds-locomo.c2
-rw-r--r--drivers/leds/leds-s3c24xx.c6
-rw-r--r--drivers/leds/leds-spitz.c6
-rw-r--r--drivers/macintosh/mediabay.c1
-rw-r--r--drivers/md/bitmap.c2
-rw-r--r--drivers/md/dm-table.c29
-rw-r--r--drivers/md/md.c8
-rw-r--r--drivers/md/raid10.c3
-rw-r--r--drivers/md/raid5.c29
-rw-r--r--drivers/media/dvb/dvb-usb/cxusb.c7
-rw-r--r--drivers/media/dvb/frontends/Kconfig3
-rw-r--r--drivers/media/video/Kconfig2
-rw-r--r--drivers/media/video/arv.c2
-rw-r--r--drivers/media/video/cs5345.c2
-rw-r--r--drivers/media/video/cs53l32a.c2
-rw-r--r--drivers/media/video/em28xx/em28xx-cards.c1
-rw-r--r--drivers/media/video/gspca/conex.c4
-rw-r--r--drivers/media/video/gspca/etoms.c137
-rw-r--r--drivers/media/video/gspca/gspca.c12
-rw-r--r--drivers/media/video/gspca/gspca.h5
-rw-r--r--drivers/media/video/gspca/ov519.c476
-rw-r--r--drivers/media/video/gspca/pac7311.c54
-rw-r--r--drivers/media/video/gspca/sonixb.c2
-rw-r--r--drivers/media/video/gspca/sonixj.c287
-rw-r--r--drivers/media/video/gspca/spca505.c12
-rw-r--r--drivers/media/video/gspca/spca506.c12
-rw-r--r--drivers/media/video/gspca/spca508.c18
-rw-r--r--drivers/media/video/gspca/spca561.c42
-rw-r--r--drivers/media/video/gspca/vc032x.c4
-rw-r--r--drivers/media/video/gspca/zc3xx.c6
-rw-r--r--drivers/media/video/mt9v022.c2
-rw-r--r--drivers/media/video/planb.c0
-rw-r--r--drivers/media/video/planb.h0
-rw-r--r--drivers/media/video/pxa_camera.c62
-rw-r--r--drivers/media/video/saa7196.h0
-rw-r--r--drivers/media/video/sh_mobile_ceu_camera.c2
-rw-r--r--drivers/media/video/soc_camera.c26
-rw-r--r--drivers/media/video/soc_camera_platform.c2
-rw-r--r--drivers/media/video/uvc/uvc_ctrl.c33
-rw-r--r--drivers/media/video/uvc/uvc_driver.c26
-rw-r--r--drivers/media/video/uvc/uvc_video.c33
-rw-r--r--drivers/media/video/v4l2-dev.c4
-rw-r--r--drivers/media/video/videodev.c0
-rw-r--r--drivers/media/video/vino.c2
-rw-r--r--drivers/mfd/mcp-sa11x0.c6
-rw-r--r--drivers/mfd/ucb1x00-core.c2
-rw-r--r--drivers/mfd/ucb1x00-ts.c2
-rw-r--r--drivers/misc/Kconfig27
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/sgi-gru/Makefile3
-rw-r--r--drivers/misc/sgi-gru/gru.h67
-rw-r--r--drivers/misc/sgi-gru/gru_instructions.h669
-rw-r--r--drivers/misc/sgi-gru/grufault.c633
-rw-r--r--drivers/misc/sgi-gru/grufile.c485
-rw-r--r--drivers/misc/sgi-gru/gruhandles.h663
-rw-r--r--drivers/misc/sgi-gru/grukservices.c679
-rw-r--r--drivers/misc/sgi-gru/grukservices.h134
-rw-r--r--drivers/misc/sgi-gru/grulib.h97
-rw-r--r--drivers/misc/sgi-gru/grumain.c802
-rw-r--r--drivers/misc/sgi-gru/gruprocfs.c336
-rw-r--r--drivers/misc/sgi-gru/grutables.h609
-rw-r--r--drivers/misc/sgi-gru/grutlbpurge.c371
-rw-r--r--drivers/misc/sgi-xp/Makefile10
-rw-r--r--drivers/misc/sgi-xp/xp.h225
-rw-r--r--drivers/misc/sgi-xp/xp_main.c131
-rw-r--r--drivers/misc/sgi-xp/xp_sn2.c146
-rw-r--r--drivers/misc/sgi-xp/xp_uv.c72
-rw-r--r--drivers/misc/sgi-xp/xpc.h1200
-rw-r--r--drivers/misc/sgi-xp/xpc_channel.c1585
-rw-r--r--drivers/misc/sgi-xp/xpc_main.c974
-rw-r--r--drivers/misc/sgi-xp/xpc_partition.c928
-rw-r--r--drivers/misc/sgi-xp/xpc_sn2.c2404
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c1443
-rw-r--r--drivers/misc/sgi-xp/xpnet.c277
-rw-r--r--drivers/mmc/card/block.c17
-rw-r--r--drivers/mmc/card/mmc_test.c85
-rw-r--r--drivers/mmc/core/core.c5
-rw-r--r--drivers/mmc/host/at91_mci.c6
-rw-r--r--drivers/mmc/host/atmel-mci.c2
-rw-r--r--drivers/mmc/host/au1xmmc.c8
-rw-r--r--drivers/mmc/host/imxmmc.c4
-rw-r--r--drivers/mmc/host/omap.c15
-rw-r--r--drivers/mmc/host/pxamci.c4
-rw-r--r--drivers/mmc/host/s3cmci.c4
-rw-r--r--drivers/mmc/host/sdhci-pci.c3
-rw-r--r--drivers/mmc/host/sdhci.c15
-rw-r--r--drivers/mmc/host/sdhci.h2
-rw-r--r--drivers/mtd/chips/jedec_probe.c2
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c130
-rw-r--r--drivers/mtd/maps/autcpu12-nvram.c4
-rw-r--r--drivers/mtd/maps/cdb89712.c2
-rw-r--r--drivers/mtd/maps/ceiva.c2
-rw-r--r--drivers/mtd/maps/h720x-flash.c2
-rw-r--r--drivers/mtd/maps/integrator-flash.c2
-rw-r--r--drivers/mtd/maps/ipaq-flash.c4
-rw-r--r--drivers/mtd/maps/ixp2000.c2
-rw-r--r--drivers/mtd/maps/omap_nor.c4
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c2
-rw-r--r--drivers/mtd/maps/sa1100-flash.c2
-rw-r--r--drivers/mtd/mtdsuper.c42
-rw-r--r--drivers/mtd/nand/Kconfig13
-rw-r--r--drivers/mtd/nand/ams-delta.c6
-rw-r--r--drivers/mtd/nand/atmel_nand.c4
-rw-r--r--drivers/mtd/nand/autcpu12.c4
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c93
-rw-r--r--drivers/mtd/nand/cmx270_nand.c4
-rw-r--r--drivers/mtd/nand/diskonchip.c30
-rw-r--r--drivers/mtd/nand/edb7312.c2
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c2
-rw-r--r--drivers/mtd/nand/h1910.c6
-rw-r--r--drivers/mtd/nand/nandsim.c66
-rw-r--r--drivers/mtd/nand/orion_nand.c2
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c4
-rw-r--r--drivers/mtd/nand/sharpsl.c2
-rw-r--r--drivers/mtd/nand/ts7250.c2
-rw-r--r--drivers/net/3c523.c4
-rw-r--r--drivers/net/3c527.c9
-rw-r--r--drivers/net/3c59x.c14
-rw-r--r--drivers/net/8390.c13
-rw-r--r--drivers/net/8390p.c19
-rw-r--r--drivers/net/Kconfig5
-rw-r--r--drivers/net/arm/am79c961a.c2
-rw-r--r--drivers/net/arm/at91_ether.c6
-rw-r--r--drivers/net/arm/ep93xx_eth.c4
-rw-r--r--drivers/net/arm/ixp4xx_eth.c4
-rw-r--r--drivers/net/atlx/atl1.c19
-rw-r--r--drivers/net/atp.c9
-rw-r--r--drivers/net/bfin_mac.c111
-rw-r--r--drivers/net/bnx2x_main.c2
-rw-r--r--drivers/net/bonding/bond_3ad.c1
-rw-r--r--drivers/net/bonding/bond_main.c394
-rw-r--r--drivers/net/bonding/bond_sysfs.c3
-rw-r--r--drivers/net/cs89x0.c2
-rw-r--r--drivers/net/cxgb3/t3_hw.c2
-rw-r--r--drivers/net/de620.c7
-rw-r--r--drivers/net/dm9000.c5
-rw-r--r--drivers/net/e1000e/e1000.h31
-rw-r--r--drivers/net/e1000e/ethtool.c44
-rw-r--r--drivers/net/e1000e/netdev.c246
-rw-r--r--drivers/net/e1000e/param.c31
-rw-r--r--drivers/net/eepro.c8
-rw-r--r--drivers/net/ehea/ehea_main.c4
-rw-r--r--drivers/net/enc28j60.c6
-rw-r--r--drivers/net/eth16i.c1
-rw-r--r--drivers/net/forcedeth.c174
-rw-r--r--drivers/net/fs_enet/mac-fcc.c2
-rw-r--r--drivers/net/gianfar.c4
-rw-r--r--drivers/net/hamradio/mkiss.c2
-rw-r--r--drivers/net/ifb.c12
-rw-r--r--drivers/net/igb/e1000_82575.c72
-rw-r--r--drivers/net/igb/e1000_82575.h1
-rw-r--r--drivers/net/igb/e1000_defines.h1
-rw-r--r--drivers/net/igb/e1000_hw.h1
-rw-r--r--drivers/net/igb/e1000_mac.c84
-rw-r--r--drivers/net/igb/e1000_mac.h5
-rw-r--r--drivers/net/igb/e1000_regs.h3
-rw-r--r--drivers/net/igb/igb_main.c30
-rw-r--r--drivers/net/irda/act200l-sir.c10
-rw-r--r--drivers/net/irda/actisys-sir.c2
-rw-r--r--drivers/net/irda/ali-ircc.c246
-rw-r--r--drivers/net/irda/donauboe.c68
-rw-r--r--drivers/net/irda/ep7211-sir.c2
-rw-r--r--drivers/net/irda/girbil-sir.c12
-rw-r--r--drivers/net/irda/irda-usb.c92
-rw-r--r--drivers/net/irda/irtty-sir.c10
-rw-r--r--drivers/net/irda/kingsun-sir.c2
-rw-r--r--drivers/net/irda/litelink-sir.c8
-rw-r--r--drivers/net/irda/ma600-sir.c16
-rw-r--r--drivers/net/irda/mcp2120-sir.c12
-rw-r--r--drivers/net/irda/nsc-ircc.c119
-rw-r--r--drivers/net/irda/nsc-ircc.h3
-rw-r--r--drivers/net/irda/old_belkin-sir.c8
-rw-r--r--drivers/net/irda/pxaficp_ir.c4
-rw-r--r--drivers/net/irda/sa1100_ir.c2
-rw-r--r--drivers/net/irda/sir_dev.c63
-rw-r--r--drivers/net/irda/sir_dongle.c2
-rw-r--r--drivers/net/irda/smsc-ircc2.c120
-rw-r--r--drivers/net/irda/tekram-sir.c10
-rw-r--r--drivers/net/irda/toim3232-sir.c10
-rw-r--r--drivers/net/irda/via-ircc.c80
-rw-r--r--drivers/net/irda/vlsi_ir.c92
-rw-r--r--drivers/net/irda/vlsi_ir.h2
-rw-r--r--drivers/net/irda/w83977af_ir.c62
-rw-r--r--drivers/net/ixp2000/ixp2400-msf.c4
-rw-r--r--drivers/net/ixp2000/ixpdev.c1
-rw-r--r--drivers/net/lp486e.c2
-rw-r--r--drivers/net/macb.c4
-rw-r--r--drivers/net/meth.c2
-rw-r--r--drivers/net/mv643xx_eth.c358
-rw-r--r--drivers/net/myri10ge/myri10ge.c1
-rw-r--r--drivers/net/myri10ge/myri10ge_mcp.h52
-rw-r--r--drivers/net/myri10ge/myri10ge_mcp_gen_header.h2
-rw-r--r--drivers/net/ne.c6
-rw-r--r--drivers/net/netconsole.c2
-rw-r--r--drivers/net/netx-eth.c11
-rw-r--r--drivers/net/netxen/netxen_nic.h41
-rw-r--r--drivers/net/netxen/netxen_nic_ctx.c9
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c35
-rw-r--r--drivers/net/netxen/netxen_nic_hdr.h10
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c103
-rw-r--r--drivers/net/netxen/netxen_nic_hw.h13
-rw-r--r--drivers/net/netxen/netxen_nic_init.c5
-rw-r--r--drivers/net/netxen/netxen_nic_main.c99
-rw-r--r--drivers/net/netxen/netxen_nic_niu.c16
-rw-r--r--drivers/net/netxen/netxen_nic_phan_reg.h4
-rw-r--r--drivers/net/ni5010.c1
-rw-r--r--drivers/net/ni52.c2
-rw-r--r--drivers/net/niu.c2
-rw-r--r--drivers/net/ps3_gelic_wireless.c12
-rw-r--r--drivers/net/qla3xxx.c23
-rw-r--r--drivers/net/qla3xxx.h105
-rw-r--r--drivers/net/s2io.c29
-rw-r--r--drivers/net/sh_eth.c257
-rw-r--r--drivers/net/sh_eth.h444
-rw-r--r--drivers/net/skfp/smt.c13
-rw-r--r--drivers/net/sky2.c103
-rw-r--r--drivers/net/sky2.h2
-rw-r--r--drivers/net/smc911x.h2
-rw-r--r--drivers/net/smc91x.h6
-rw-r--r--drivers/net/stnic.c2
-rw-r--r--drivers/net/sun3_82586.c7
-rw-r--r--drivers/net/tg3.c87
-rw-r--r--drivers/net/tokenring/3c359.c8
-rw-r--r--drivers/net/usb/dm9601.c52
-rw-r--r--drivers/net/usb/pegasus.c21
-rw-r--r--drivers/net/via-velocity.c301
-rw-r--r--drivers/net/via-velocity.h50
-rw-r--r--drivers/net/wan/Kconfig15
-rw-r--r--drivers/net/wan/Makefile11
-rw-r--r--drivers/net/wan/cosa.c293
-rw-r--r--drivers/net/wan/dscc4.c1
-rw-r--r--drivers/net/wan/farsync.c5
-rw-r--r--drivers/net/wan/farsync.h6
-rw-r--r--drivers/net/wan/hdlc.c25
-rw-r--r--drivers/net/wan/hdlc_cisco.c29
-rw-r--r--drivers/net/wan/hdlc_fr.c19
-rw-r--r--drivers/net/wan/hdlc_ppp.c15
-rw-r--r--drivers/net/wan/hdlc_raw.c15
-rw-r--r--drivers/net/wan/hdlc_raw_eth.c17
-rw-r--r--drivers/net/wan/hdlc_x25.c17
-rw-r--r--drivers/net/wan/hostess_sv11.c382
-rw-r--r--drivers/net/wan/lmc/lmc.h11
-rw-r--r--drivers/net/wan/lmc/lmc_debug.c7
-rw-r--r--drivers/net/wan/lmc/lmc_debug.h6
-rw-r--r--drivers/net/wan/lmc/lmc_ioctl.h2
-rw-r--r--drivers/net/wan/lmc/lmc_main.c672
-rw-r--r--drivers/net/wan/lmc/lmc_media.c66
-rw-r--r--drivers/net/wan/lmc/lmc_proto.c146
-rw-r--r--drivers/net/wan/lmc/lmc_proto.h14
-rw-r--r--drivers/net/wan/lmc/lmc_var.h360
-rw-r--r--drivers/net/wan/pc300.h228
-rw-r--r--drivers/net/wan/pc300_drv.c146
-rw-r--r--drivers/net/wan/sealevel.c361
-rw-r--r--drivers/net/wan/syncppp.c9
-rw-r--r--drivers/net/wan/z85230.c193
-rw-r--r--drivers/net/wan/z85230.h10
-rw-r--r--drivers/net/wd.c2
-rw-r--r--drivers/net/wireless/Kconfig2
-rw-r--r--drivers/net/wireless/Makefile1
-rw-r--r--drivers/net/wireless/ath5k/ath5k.h8
-rw-r--r--drivers/net/wireless/ath5k/base.c102
-rw-r--r--drivers/net/wireless/ath5k/base.h2
-rw-r--r--drivers/net/wireless/ath5k/debug.c2
-rw-r--r--drivers/net/wireless/ath5k/debug.h1
-rw-r--r--drivers/net/wireless/ath5k/hw.c243
-rw-r--r--drivers/net/wireless/ath5k/initvals.c4
-rw-r--r--drivers/net/wireless/ath5k/phy.c185
-rw-r--r--drivers/net/wireless/ath5k/reg.h934
-rw-r--r--drivers/net/wireless/ath9k/Kconfig8
-rw-r--r--drivers/net/wireless/ath9k/Makefile11
-rw-r--r--drivers/net/wireless/ath9k/ath9k.h1021
-rw-r--r--drivers/net/wireless/ath9k/beacon.c979
-rw-r--r--drivers/net/wireless/ath9k/core.c1923
-rw-r--r--drivers/net/wireless/ath9k/core.h1072
-rw-r--r--drivers/net/wireless/ath9k/hw.c8571
-rw-r--r--drivers/net/wireless/ath9k/hw.h969
-rw-r--r--drivers/net/wireless/ath9k/initvals.h3146
-rw-r--r--drivers/net/wireless/ath9k/main.c1470
-rw-r--r--drivers/net/wireless/ath9k/phy.c436
-rw-r--r--drivers/net/wireless/ath9k/phy.h543
-rw-r--r--drivers/net/wireless/ath9k/rc.c2126
-rw-r--r--drivers/net/wireless/ath9k/rc.h316
-rw-r--r--drivers/net/wireless/ath9k/recv.c1318
-rw-r--r--drivers/net/wireless/ath9k/reg.h1385
-rw-r--r--drivers/net/wireless/ath9k/regd.c1026
-rw-r--r--drivers/net/wireless/ath9k/regd.h412
-rw-r--r--drivers/net/wireless/ath9k/regd_common.h1915
-rw-r--r--drivers/net/wireless/ath9k/xmit.c2871
-rw-r--r--drivers/net/wireless/b43/main.c3
-rw-r--r--drivers/net/wireless/b43/xmit.c2
-rw-r--r--drivers/net/wireless/b43legacy/main.c5
-rw-r--r--drivers/net/wireless/b43legacy/xmit.c2
-rw-r--r--drivers/net/wireless/ipw2100.c3
-rw-r--r--drivers/net/wireless/ipw2200.c7
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig98
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-led.c33
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-led.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c19
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c158
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c71
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c (renamed from drivers/net/wireless/iwlwifi/iwl-4965-rs.c)327
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.h (renamed from drivers/net/wireless/iwlwifi/iwl-4965-rs.h)23
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c (renamed from drivers/net/wireless/iwlwifi/iwl4965-base.c)236
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h44
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h15
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h22
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c73
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c45
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.h33
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c59
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c122
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c63
-rw-r--r--drivers/net/wireless/libertas/main.c15
-rw-r--r--drivers/net/wireless/libertas/persistcfg.c30
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c2
-rw-r--r--drivers/net/wireless/orinoco.c7
-rw-r--r--drivers/net/wireless/p54/p54.h2
-rw-r--r--drivers/net/wireless/p54/p54common.c24
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c33
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h12
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00config.c6
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.c17
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c7
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00lib.h7
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c84
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c37
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c32
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.h22
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c9
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c56
-rw-r--r--drivers/net/wireless/rtl8187.h15
-rw-r--r--drivers/net/wireless/rtl8187_dev.c110
-rw-r--r--drivers/net/wireless/wavelan.c3
-rw-r--r--drivers/net/wireless/wavelan_cs.c6
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c1
-rw-r--r--drivers/net/xen-netfront.c2
-rw-r--r--drivers/pcmcia/at91_cf.c6
-rw-r--r--drivers/pcmcia/omap_cf.c6
-rw-r--r--drivers/pcmcia/pxa2xx_base.c6
-rw-r--r--drivers/pcmcia/pxa2xx_cm_x270.c2
-rw-r--r--drivers/pcmcia/pxa2xx_lubbock.c6
-rw-r--r--drivers/pcmcia/pxa2xx_mainstone.c6
-rw-r--r--drivers/pcmcia/pxa2xx_palmtx.c4
-rw-r--r--drivers/pcmcia/pxa2xx_sharpsl.c2
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c2
-rw-r--r--drivers/pcmcia/sa1100_assabet.c4
-rw-r--r--drivers/pcmcia/sa1100_badge4.c4
-rw-r--r--drivers/pcmcia/sa1100_cerf.c4
-rw-r--r--drivers/pcmcia/sa1100_h3600.c4
-rw-r--r--drivers/pcmcia/sa1100_jornada720.c2
-rw-r--r--drivers/pcmcia/sa1100_neponset.c4
-rw-r--r--drivers/pcmcia/sa1100_shannon.c4
-rw-r--r--drivers/pcmcia/sa1100_simpad.c4
-rw-r--r--drivers/pcmcia/sa1111_generic.c2
-rw-r--r--drivers/pcmcia/sa11xx_base.c2
-rw-r--r--drivers/pcmcia/soc_common.c4
-rw-r--r--drivers/pnp/support.c96
-rw-r--r--drivers/power/Kconfig7
-rw-r--r--drivers/power/Makefile1
-rw-r--r--drivers/power/olpc_battery.c273
-rw-r--r--drivers/power/palmtx_battery.c2
-rw-r--r--drivers/power/power_supply_sysfs.c1
-rw-r--r--drivers/power/tosa_battery.c486
-rw-r--r--drivers/regulator/Kconfig59
-rw-r--r--drivers/regulator/Makefile12
-rw-r--r--drivers/regulator/bq24022.c167
-rw-r--r--drivers/regulator/core.c1903
-rw-r--r--drivers/regulator/fixed.c129
-rw-r--r--drivers/regulator/virtual.c345
-rw-r--r--drivers/rtc/interface.c10
-rw-r--r--drivers/rtc/rtc-at91rm9200.c2
-rw-r--r--drivers/rtc/rtc-at91sam9.c4
-rw-r--r--drivers/rtc/rtc-bfin.c105
-rw-r--r--drivers/rtc/rtc-dev.c4
-rw-r--r--drivers/rtc/rtc-ep93xx.c2
-rw-r--r--drivers/rtc/rtc-s3c.c2
-rw-r--r--drivers/rtc/rtc-sa1100.c4
-rw-r--r--drivers/s390/block/dasd_alias.c4
-rw-r--r--drivers/s390/block/dasd_devmap.c16
-rw-r--r--drivers/s390/block/dasd_eckd.c147
-rw-r--r--drivers/s390/block/dasd_eckd.h184
-rw-r--r--drivers/s390/block/dasd_int.h1
-rw-r--r--drivers/s390/char/sclp.c6
-rw-r--r--drivers/s390/char/sclp_cmd.c5
-rw-r--r--drivers/s390/char/sclp_config.c13
-rw-r--r--drivers/s390/cio/idset.c8
-rw-r--r--drivers/s390/cio/qdio_main.c2
-rw-r--r--drivers/s390/cio/qdio_perf.c2
-rw-r--r--drivers/s390/cio/qdio_setup.c4
-rw-r--r--drivers/s390/kvm/kvm_virtio.c2
-rw-r--r--drivers/s390/net/qeth_core.h5
-rw-r--r--drivers/s390/net/qeth_core_main.c63
-rw-r--r--drivers/s390/net/qeth_l2_main.c50
-rw-r--r--drivers/s390/net/qeth_l3_main.c51
-rw-r--r--drivers/scsi/arm/acornscsi-io.S2
-rw-r--r--drivers/scsi/hptiop.c8
-rw-r--r--drivers/scsi/ide-scsi.c11
-rw-r--r--drivers/scsi/scsi_transport_spi.c8
-rw-r--r--drivers/scsi/ses.c18
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/serial/21285.c2
-rw-r--r--drivers/serial/8250.c4
-rw-r--r--drivers/serial/Makefile1
-rw-r--r--drivers/serial/atmel_serial.c6
-rw-r--r--drivers/serial/bfin_5xx.c2
-rw-r--r--drivers/serial/clps711x.c2
-rw-r--r--drivers/serial/cpm_uart/cpm_uart.h11
-rw-r--r--drivers/serial/cpm_uart/cpm_uart_core.c66
-rw-r--r--drivers/serial/crisv10.c79
-rw-r--r--drivers/serial/crisv10.h3
-rw-r--r--drivers/serial/imx.c4
-rw-r--r--drivers/serial/netx-serial.c4
-rw-r--r--drivers/serial/pxa.c4
-rw-r--r--drivers/serial/s3c2400.c4
-rw-r--r--drivers/serial/s3c2410.c4
-rw-r--r--drivers/serial/s3c2412.c4
-rw-r--r--drivers/serial/s3c2440.c4
-rw-r--r--drivers/serial/sa1100.c2
-rw-r--r--drivers/serial/samsung.c4
-rw-r--r--drivers/serial/serial_ks8695.c4
-rw-r--r--drivers/serial/sh-sci.h12
-rw-r--r--drivers/serial/v850e_uart.c548
-rw-r--r--drivers/sh/maple/maple.c302
-rw-r--r--drivers/spi/Kconfig6
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/atmel_spi.c23
-rw-r--r--drivers/spi/omap2_mcspi.c4
-rw-r--r--drivers/spi/omap_uwire.c6
-rw-r--r--drivers/spi/orion_spi.c574
-rw-r--r--drivers/spi/pxa2xx_spi.c11
-rw-r--r--drivers/spi/spi_imx.c7
-rw-r--r--drivers/spi/spi_s3c24xx.c31
-rw-r--r--drivers/spi/spi_s3c24xx_gpio.c6
-rw-r--r--drivers/usb/core/message.c2
-rw-r--r--drivers/usb/gadget/at91_udc.c9
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.c4
-rw-r--r--drivers/usb/gadget/lh7a40x_udc.h2
-rw-r--r--drivers/usb/gadget/m66592-udc.c2
-rw-r--r--drivers/usb/gadget/omap_udc.c4
-rw-r--r--drivers/usb/gadget/pxa25x_udc.c2
-rw-r--r--drivers/usb/gadget/pxa25x_udc.h2
-rw-r--r--drivers/usb/gadget/pxa27x_udc.c6
-rw-r--r--drivers/usb/gadget/s3c2410_udc.c9
-rw-r--r--drivers/usb/host/ehci-q.c2
-rw-r--r--drivers/usb/host/ohci-at91.c7
-rw-r--r--drivers/usb/host/ohci-ep93xx.c3
-rw-r--r--drivers/usb/host/ohci-lh7a404.c2
-rw-r--r--drivers/usb/host/ohci-omap.c12
-rw-r--r--drivers/usb/host/ohci-pnx4008.c9
-rw-r--r--drivers/usb/host/ohci-pxa27x.c9
-rw-r--r--drivers/usb/host/ohci-s3c2410.c4
-rw-r--r--drivers/usb/host/ohci-sa1111.c6
-rw-r--r--drivers/usb/storage/freecom.c2
-rw-r--r--drivers/video/Makefile1
-rw-r--r--drivers/video/acornfb.c4
-rw-r--r--drivers/video/am200epd.c2
-rw-r--r--drivers/video/arkfb.c27
-rw-r--r--drivers/video/atmel_lcdfb.c6
-rw-r--r--drivers/video/aty/atyfb_base.c29
-rw-r--r--drivers/video/aty/radeon_accel.c4
-rw-r--r--drivers/video/aty/radeon_i2c.c3
-rw-r--r--drivers/video/backlight/hp680_bl.c2
-rw-r--r--drivers/video/backlight/omap1_bl.c6
-rw-r--r--drivers/video/backlight/platform_lcd.c4
-rw-r--r--drivers/video/backlight/pwm_bl.c10
-rw-r--r--drivers/video/clps711xfb.c4
-rw-r--r--drivers/video/console/.gitignore2
-rw-r--r--drivers/video/console/fbcon.c3
-rw-r--r--drivers/video/console/sticore.c30
-rw-r--r--drivers/video/cyber2000fb.c2
-rw-r--r--drivers/video/epson1355fb.c2
-rw-r--r--drivers/video/fsl-diu-fb.c8
-rw-r--r--drivers/video/gbefb.c50
-rw-r--r--drivers/video/hitfb.c2
-rw-r--r--drivers/video/imxfb.c4
-rw-r--r--drivers/video/matrox/matroxfb_base.c2
-rw-r--r--drivers/video/omap/blizzard.c6
-rw-r--r--drivers/video/omap/dispc.c6
-rw-r--r--drivers/video/omap/hwa742.c6
-rw-r--r--drivers/video/omap/lcd_h3.c4
-rw-r--r--drivers/video/omap/lcd_h4.c2
-rw-r--r--drivers/video/omap/lcd_inn1510.c4
-rw-r--r--drivers/video/omap/lcd_inn1610.c4
-rw-r--r--drivers/video/omap/lcd_osk.c6
-rw-r--r--drivers/video/omap/lcd_palmte.c4
-rw-r--r--drivers/video/omap/lcd_palmtt.c4
-rw-r--r--drivers/video/omap/lcd_palmz71.c2
-rw-r--r--drivers/video/omap/lcd_sx1.c8
-rw-r--r--drivers/video/omap/lcdc.c4
-rw-r--r--drivers/video/omap/omapfb_main.c5
-rw-r--r--drivers/video/omap/rfbi.c2
-rw-r--r--drivers/video/omap/sossi.c4
-rw-r--r--drivers/video/pnx4008/dum.h2
-rw-r--r--drivers/video/pnx4008/sdum.c2
-rw-r--r--drivers/video/pvr2fb.c6
-rw-r--r--drivers/video/pxafb.c10
-rw-r--r--drivers/video/s3c2410fb.c6
-rw-r--r--drivers/video/s3fb.c18
-rw-r--r--drivers/video/sa1100fb.c6
-rw-r--r--drivers/video/vt8623fb.c29
-rw-r--r--drivers/watchdog/Kconfig16
-rw-r--r--drivers/watchdog/Makefile4
-rw-r--r--drivers/watchdog/acquirewdt.c130
-rw-r--r--drivers/watchdog/advantechwdt.c162
-rw-r--r--drivers/watchdog/alim1535_wdt.c189
-rw-r--r--drivers/watchdog/alim7101_wdt.c228
-rw-r--r--drivers/watchdog/ar7_wdt.c37
-rw-r--r--drivers/watchdog/at32ap700x_wdt.c43
-rw-r--r--drivers/watchdog/at91rm9200_wdt.c111
-rw-r--r--drivers/watchdog/bfin_wdt.c147
-rw-r--r--drivers/watchdog/booke_wdt.c44
-rw-r--r--drivers/watchdog/cpu5wdt.c144
-rw-r--r--drivers/watchdog/davinci_wdt.c22
-rw-r--r--drivers/watchdog/ep93xx_wdt.c30
-rw-r--r--drivers/watchdog/eurotechwdt.c95
-rw-r--r--drivers/watchdog/geodewdt.c96
-rw-r--r--drivers/watchdog/hpwdt.c8
-rw-r--r--drivers/watchdog/i6300esb.c366
-rw-r--r--drivers/watchdog/iTCO_vendor.h15
-rw-r--r--drivers/watchdog/iTCO_vendor_support.c62
-rw-r--r--drivers/watchdog/iTCO_wdt.c310
-rw-r--r--drivers/watchdog/ib700wdt.c123
-rw-r--r--drivers/watchdog/ibmasr.c151
-rw-r--r--drivers/watchdog/indydog.c114
-rw-r--r--drivers/watchdog/iop_wdt.c64
-rw-r--r--drivers/watchdog/it8712f_wdt.c83
-rw-r--r--drivers/watchdog/ixp2000_wdt.c63
-rw-r--r--drivers/watchdog/ixp4xx_wdt.c69
-rw-r--r--drivers/watchdog/ks8695_wdt.c122
-rw-r--r--drivers/watchdog/machzwd.c108
-rw-r--r--drivers/watchdog/mixcomwd.c133
-rw-r--r--drivers/watchdog/mpc5200_wdt.c24
-rw-r--r--drivers/watchdog/mpc83xx_wdt.c230
-rw-r--r--drivers/watchdog/mpc8xx_wdt.c37
-rw-r--r--drivers/watchdog/mpc8xxx_wdt.c316
-rw-r--r--drivers/watchdog/mpcore_wdt.c73
-rw-r--r--drivers/watchdog/mtx-1_wdt.c111
-rw-r--r--drivers/watchdog/mv64x60_wdt.c21
-rw-r--r--drivers/watchdog/omap_wdt.c45
-rw-r--r--drivers/watchdog/pc87413_wdt.c249
-rw-r--r--drivers/watchdog/pcwd.c191
-rw-r--r--drivers/watchdog/pcwd_pci.c161
-rw-r--r--drivers/watchdog/pcwd_usb.c168
-rw-r--r--drivers/watchdog/pnx4008_wdt.c31
-rw-r--r--drivers/watchdog/rm9k_wdt.c34
-rw-r--r--drivers/watchdog/s3c2410_wdt.c139
-rw-r--r--drivers/watchdog/sa1100_wdt.c50
-rw-r--r--drivers/watchdog/sb_wdog.c92
-rw-r--r--drivers/watchdog/sbc60xxwdt.c223
-rw-r--r--drivers/watchdog/sbc7240_wdt.c70
-rw-r--r--drivers/watchdog/sbc8360.c46
-rw-r--r--drivers/watchdog/sbc_epx_c3.c22
-rw-r--r--drivers/watchdog/sc1200wdt.c208
-rw-r--r--drivers/watchdog/sc520_wdt.c162
-rw-r--r--drivers/watchdog/scx200_wdt.c65
-rw-r--r--drivers/watchdog/shwdt.c139
-rw-r--r--drivers/watchdog/smsc37b787_wdt.c448
-rw-r--r--drivers/watchdog/softdog.c87
-rw-r--r--drivers/watchdog/txx9wdt.c37
-rw-r--r--drivers/watchdog/w83627hf_wdt.c190
-rw-r--r--drivers/watchdog/w83697hf_wdt.c189
-rw-r--r--drivers/watchdog/w83877f_wdt.c199
-rw-r--r--drivers/watchdog/w83977f_wdt.c239
-rw-r--r--drivers/watchdog/wafer5823wdt.c132
-rw-r--r--drivers/watchdog/wd501p.h2
-rw-r--r--drivers/watchdog/wdrtas.c105
-rw-r--r--drivers/watchdog/wdt.c176
-rw-r--r--drivers/watchdog/wdt285.c33
-rw-r--r--drivers/watchdog/wdt977.c162
-rw-r--r--drivers/watchdog/wdt_pci.c299
730 files changed, 62104 insertions, 18599 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index 54ec5e718c0e..a280ab3d0833 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -97,3 +97,4 @@ obj-$(CONFIG_PPC_PS3) += ps3/
97obj-$(CONFIG_OF) += of/ 97obj-$(CONFIG_OF) += of/
98obj-$(CONFIG_SSB) += ssb/ 98obj-$(CONFIG_SSB) += ssb/
99obj-$(CONFIG_VIRTIO) += virtio/ 99obj-$(CONFIG_VIRTIO) += virtio/
100obj-$(CONFIG_REGULATOR) += regulator/
diff --git a/drivers/acpi/asus_acpi.c b/drivers/acpi/asus_acpi.c
index 44ad90c03c2e..d3d0886d637f 100644
--- a/drivers/acpi/asus_acpi.c
+++ b/drivers/acpi/asus_acpi.c
@@ -78,9 +78,9 @@ MODULE_LICENSE("GPL");
78static uid_t asus_uid; 78static uid_t asus_uid;
79static gid_t asus_gid; 79static gid_t asus_gid;
80module_param(asus_uid, uint, 0); 80module_param(asus_uid, uint, 0);
81MODULE_PARM_DESC(asus_uid, "UID for entries in /proc/acpi/asus.\n"); 81MODULE_PARM_DESC(asus_uid, "UID for entries in /proc/acpi/asus");
82module_param(asus_gid, uint, 0); 82module_param(asus_gid, uint, 0);
83MODULE_PARM_DESC(asus_gid, "GID for entries in /proc/acpi/asus.\n"); 83MODULE_PARM_DESC(asus_gid, "GID for entries in /proc/acpi/asus");
84 84
85/* For each model, all features implemented, 85/* For each model, all features implemented,
86 * those marked with R are relative to HOTK, A for absolute */ 86 * those marked with R are relative to HOTK, A for absolute */
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index b4749969c6b4..0133af49cf06 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -64,7 +64,13 @@ static DEFINE_MUTEX(performance_mutex);
64 * policy is adjusted accordingly. 64 * policy is adjusted accordingly.
65 */ 65 */
66 66
67static unsigned int ignore_ppc = 0; 67/* ignore_ppc:
68 * -1 -> cpufreq low level drivers not initialized -> _PSS, etc. not called yet
69 * ignore _PPC
70 * 0 -> cpufreq low level drivers initialized -> consider _PPC values
71 * 1 -> ignore _PPC totally -> forced by user through boot param
72 */
73static unsigned int ignore_ppc = -1;
68module_param(ignore_ppc, uint, 0644); 74module_param(ignore_ppc, uint, 0644);
69MODULE_PARM_DESC(ignore_ppc, "If the frequency of your machine gets wrongly" \ 75MODULE_PARM_DESC(ignore_ppc, "If the frequency of your machine gets wrongly" \
70 "limited by BIOS, this should help"); 76 "limited by BIOS, this should help");
@@ -72,7 +78,7 @@ MODULE_PARM_DESC(ignore_ppc, "If the frequency of your machine gets wrongly" \
72#define PPC_REGISTERED 1 78#define PPC_REGISTERED 1
73#define PPC_IN_USE 2 79#define PPC_IN_USE 2
74 80
75static int acpi_processor_ppc_status = 0; 81static int acpi_processor_ppc_status;
76 82
77static int acpi_processor_ppc_notifier(struct notifier_block *nb, 83static int acpi_processor_ppc_notifier(struct notifier_block *nb,
78 unsigned long event, void *data) 84 unsigned long event, void *data)
@@ -81,13 +87,18 @@ static int acpi_processor_ppc_notifier(struct notifier_block *nb,
81 struct acpi_processor *pr; 87 struct acpi_processor *pr;
82 unsigned int ppc = 0; 88 unsigned int ppc = 0;
83 89
84 if (ignore_ppc) 90 if (event == CPUFREQ_START && ignore_ppc <= 0) {
91 ignore_ppc = 0;
85 return 0; 92 return 0;
93 }
86 94
87 mutex_lock(&performance_mutex); 95 if (ignore_ppc)
96 return 0;
88 97
89 if (event != CPUFREQ_INCOMPATIBLE) 98 if (event != CPUFREQ_INCOMPATIBLE)
90 goto out; 99 return 0;
100
101 mutex_lock(&performance_mutex);
91 102
92 pr = per_cpu(processors, policy->cpu); 103 pr = per_cpu(processors, policy->cpu);
93 if (!pr || !pr->performance) 104 if (!pr || !pr->performance)
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index a90ae03f56b2..c294121fd69e 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -250,6 +250,7 @@ static const struct pci_device_id piix_pci_tbl[] = {
250 /* Mobile SATA Controller IDE (ICH8M), Apple */ 250 /* Mobile SATA Controller IDE (ICH8M), Apple */
251 { 0x8086, 0x2828, 0x106b, 0x00a0, 0, 0, ich8m_apple_sata }, 251 { 0x8086, 0x2828, 0x106b, 0x00a0, 0, 0, ich8m_apple_sata },
252 { 0x8086, 0x2828, 0x106b, 0x00a1, 0, 0, ich8m_apple_sata }, 252 { 0x8086, 0x2828, 0x106b, 0x00a1, 0, 0, ich8m_apple_sata },
253 { 0x8086, 0x2828, 0x106b, 0x00a3, 0, 0, ich8m_apple_sata },
253 /* Mobile SATA Controller IDE (ICH8M) */ 254 /* Mobile SATA Controller IDE (ICH8M) */
254 { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, 255 { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
255 /* SATA Controller IDE (ICH9) */ 256 /* SATA Controller IDE (ICH9) */
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 9bef1a84fe3f..5ba96c5052c8 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -120,7 +120,7 @@ static char ata_force_param_buf[PAGE_SIZE] __initdata;
120module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0); 120module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
121MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)"); 121MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
122 122
123int atapi_enabled = 1; 123static int atapi_enabled = 1;
124module_param(atapi_enabled, int, 0444); 124module_param(atapi_enabled, int, 0444);
125MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)"); 125MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
126 126
@@ -1132,6 +1132,8 @@ void ata_id_string(const u16 *id, unsigned char *s,
1132{ 1132{
1133 unsigned int c; 1133 unsigned int c;
1134 1134
1135 BUG_ON(len & 1);
1136
1135 while (len > 0) { 1137 while (len > 0) {
1136 c = id[ofs] >> 8; 1138 c = id[ofs] >> 8;
1137 *s = c; 1139 *s = c;
@@ -1165,8 +1167,6 @@ void ata_id_c_string(const u16 *id, unsigned char *s,
1165{ 1167{
1166 unsigned char *p; 1168 unsigned char *p;
1167 1169
1168 WARN_ON(!(len & 1));
1169
1170 ata_id_string(id, s, ofs, len - 1); 1170 ata_id_string(id, s, ofs, len - 1);
1171 1171
1172 p = s + strnlen(s, len - 1); 1172 p = s + strnlen(s, len - 1);
@@ -1886,6 +1886,23 @@ static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1886} 1886}
1887 1887
1888/** 1888/**
1889 * ata_do_dev_read_id - default ID read method
1890 * @dev: device
1891 * @tf: proposed taskfile
1892 * @id: data buffer
1893 *
1894 * Issue the identify taskfile and hand back the buffer containing
1895 * identify data. For some RAID controllers and for pre ATA devices
1896 * this function is wrapped or replaced by the driver
1897 */
1898unsigned int ata_do_dev_read_id(struct ata_device *dev,
1899 struct ata_taskfile *tf, u16 *id)
1900{
1901 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1902 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1903}
1904
1905/**
1889 * ata_dev_read_id - Read ID data from the specified device 1906 * ata_dev_read_id - Read ID data from the specified device
1890 * @dev: target device 1907 * @dev: target device
1891 * @p_class: pointer to class of the target device (may be changed) 1908 * @p_class: pointer to class of the target device (may be changed)
@@ -1920,7 +1937,7 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1920 if (ata_msg_ctl(ap)) 1937 if (ata_msg_ctl(ap))
1921 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__); 1938 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
1922 1939
1923 retry: 1940retry:
1924 ata_tf_init(dev, &tf); 1941 ata_tf_init(dev, &tf);
1925 1942
1926 switch (class) { 1943 switch (class) {
@@ -1948,8 +1965,11 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1948 */ 1965 */
1949 tf.flags |= ATA_TFLAG_POLLING; 1966 tf.flags |= ATA_TFLAG_POLLING;
1950 1967
1951 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, 1968 if (ap->ops->read_id)
1952 id, sizeof(id[0]) * ATA_ID_WORDS, 0); 1969 err_mask = ap->ops->read_id(dev, &tf, id);
1970 else
1971 err_mask = ata_do_dev_read_id(dev, &tf, id);
1972
1953 if (err_mask) { 1973 if (err_mask) {
1954 if (err_mask & AC_ERR_NODEV_HINT) { 1974 if (err_mask & AC_ERR_NODEV_HINT) {
1955 ata_dev_printk(dev, KERN_DEBUG, 1975 ata_dev_printk(dev, KERN_DEBUG,
@@ -2142,6 +2162,16 @@ int ata_dev_configure(struct ata_device *dev)
2142 return 0; 2162 return 0;
2143 } 2163 }
2144 2164
2165 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2166 dev->class == ATA_DEV_ATAPI) {
2167 ata_dev_printk(dev, KERN_WARNING,
2168 "WARNING: ATAPI is %s, device ignored.\n",
2169 atapi_enabled ? "not supported with this driver"
2170 : "disabled");
2171 ata_dev_disable(dev);
2172 return 0;
2173 }
2174
2145 /* let ACPI work its magic */ 2175 /* let ACPI work its magic */
2146 rc = ata_acpi_on_devcfg(dev); 2176 rc = ata_acpi_on_devcfg(dev);
2147 if (rc) 2177 if (rc)
@@ -6088,16 +6118,20 @@ static int __init ata_init(void)
6088 6118
6089 ata_wq = create_workqueue("ata"); 6119 ata_wq = create_workqueue("ata");
6090 if (!ata_wq) 6120 if (!ata_wq)
6091 return -ENOMEM; 6121 goto free_force_tbl;
6092 6122
6093 ata_aux_wq = create_singlethread_workqueue("ata_aux"); 6123 ata_aux_wq = create_singlethread_workqueue("ata_aux");
6094 if (!ata_aux_wq) { 6124 if (!ata_aux_wq)
6095 destroy_workqueue(ata_wq); 6125 goto free_wq;
6096 return -ENOMEM;
6097 }
6098 6126
6099 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n"); 6127 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6100 return 0; 6128 return 0;
6129
6130free_wq:
6131 destroy_workqueue(ata_wq);
6132free_force_tbl:
6133 kfree(ata_force_tbl);
6134 return -ENOMEM;
6101} 6135}
6102 6136
6103static void __exit ata_exit(void) 6137static void __exit ata_exit(void)
@@ -6269,6 +6303,7 @@ EXPORT_SYMBOL_GPL(ata_host_resume);
6269#endif /* CONFIG_PM */ 6303#endif /* CONFIG_PM */
6270EXPORT_SYMBOL_GPL(ata_id_string); 6304EXPORT_SYMBOL_GPL(ata_id_string);
6271EXPORT_SYMBOL_GPL(ata_id_c_string); 6305EXPORT_SYMBOL_GPL(ata_id_c_string);
6306EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
6272EXPORT_SYMBOL_GPL(ata_scsi_simulate); 6307EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6273 6308
6274EXPORT_SYMBOL_GPL(ata_pio_need_iordy); 6309EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index f3b4b15a8dc4..b9d3ba423cb2 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -2551,36 +2551,6 @@ static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap,
2551} 2551}
2552 2552
2553/** 2553/**
2554 * ata_scsi_dev_enabled - determine if device is enabled
2555 * @dev: ATA device
2556 *
2557 * Determine if commands should be sent to the specified device.
2558 *
2559 * LOCKING:
2560 * spin_lock_irqsave(host lock)
2561 *
2562 * RETURNS:
2563 * 0 if commands are not allowed / 1 if commands are allowed
2564 */
2565
2566static int ata_scsi_dev_enabled(struct ata_device *dev)
2567{
2568 if (unlikely(!ata_dev_enabled(dev)))
2569 return 0;
2570
2571 if (!atapi_enabled || (dev->link->ap->flags & ATA_FLAG_NO_ATAPI)) {
2572 if (unlikely(dev->class == ATA_DEV_ATAPI)) {
2573 ata_dev_printk(dev, KERN_WARNING,
2574 "WARNING: ATAPI is %s, device ignored.\n",
2575 atapi_enabled ? "not supported with this driver" : "disabled");
2576 return 0;
2577 }
2578 }
2579
2580 return 1;
2581}
2582
2583/**
2584 * ata_scsi_find_dev - lookup ata_device from scsi_cmnd 2554 * ata_scsi_find_dev - lookup ata_device from scsi_cmnd
2585 * @ap: ATA port to which the device is attached 2555 * @ap: ATA port to which the device is attached
2586 * @scsidev: SCSI device from which we derive the ATA device 2556 * @scsidev: SCSI device from which we derive the ATA device
@@ -2601,7 +2571,7 @@ ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev)
2601{ 2571{
2602 struct ata_device *dev = __ata_scsi_find_dev(ap, scsidev); 2572 struct ata_device *dev = __ata_scsi_find_dev(ap, scsidev);
2603 2573
2604 if (unlikely(!dev || !ata_scsi_dev_enabled(dev))) 2574 if (unlikely(!dev || !ata_dev_enabled(dev)))
2605 return NULL; 2575 return NULL;
2606 2576
2607 return dev; 2577 return dev;
@@ -3622,7 +3592,7 @@ int ata_sas_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *),
3622 3592
3623 ata_scsi_dump_cdb(ap, cmd); 3593 ata_scsi_dump_cdb(ap, cmd);
3624 3594
3625 if (likely(ata_scsi_dev_enabled(ap->link.device))) 3595 if (likely(ata_dev_enabled(ap->link.device)))
3626 rc = __ata_scsi_queuecmd(cmd, done, ap->link.device); 3596 rc = __ata_scsi_queuecmd(cmd, done, ap->link.device);
3627 else { 3597 else {
3628 cmd->result = (DID_BAD_TARGET << 16); 3598 cmd->result = (DID_BAD_TARGET << 16);
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index f6f9c28ec7f8..ade5c75b6144 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -66,7 +66,6 @@ enum {
66 66
67extern unsigned int ata_print_id; 67extern unsigned int ata_print_id;
68extern struct workqueue_struct *ata_aux_wq; 68extern struct workqueue_struct *ata_aux_wq;
69extern int atapi_enabled;
70extern int atapi_passthru16; 69extern int atapi_passthru16;
71extern int libata_fua; 70extern int libata_fua;
72extern int libata_noacpi; 71extern int libata_noacpi;
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
index 0f3e659db99a..5ca70fa1f587 100644
--- a/drivers/ata/pata_ali.c
+++ b/drivers/ata/pata_ali.c
@@ -550,8 +550,9 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
550 pci_read_config_byte(isa_bridge, 0x5E, &tmp); 550 pci_read_config_byte(isa_bridge, 0x5E, &tmp);
551 if ((tmp & 0x1E) == 0x12) 551 if ((tmp & 0x1E) == 0x12)
552 ppi[0] = &info_20_udma; 552 ppi[0] = &info_20_udma;
553 pci_dev_put(isa_bridge);
554 } 553 }
554 pci_dev_put(isa_bridge);
555
555 return ata_pci_sff_init_one(pdev, ppi, &ali_sht, NULL); 556 return ata_pci_sff_init_one(pdev, ppi, &ali_sht, NULL);
556} 557}
557 558
diff --git a/drivers/ata/pata_at32.c b/drivers/ata/pata_at32.c
index 82fb6e273169..ab61095093b9 100644
--- a/drivers/ata/pata_at32.c
+++ b/drivers/ata/pata_at32.c
@@ -24,8 +24,8 @@
24#include <linux/err.h> 24#include <linux/err.h>
25#include <linux/io.h> 25#include <linux/io.h>
26 26
27#include <asm/arch/board.h> 27#include <mach/board.h>
28#include <asm/arch/smc.h> 28#include <mach/smc.h>
29 29
30#define DRV_NAME "pata_at32" 30#define DRV_NAME "pata_at32"
31#define DRV_VERSION "0.0.3" 31#define DRV_VERSION "0.0.3"
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
index e10816931b2f..27843c70eb9d 100644
--- a/drivers/ata/pata_it821x.c
+++ b/drivers/ata/pata_it821x.c
@@ -80,7 +80,7 @@
80 80
81 81
82#define DRV_NAME "pata_it821x" 82#define DRV_NAME "pata_it821x"
83#define DRV_VERSION "0.3.8" 83#define DRV_VERSION "0.4.0"
84 84
85struct it821x_dev 85struct it821x_dev
86{ 86{
@@ -425,6 +425,8 @@ static unsigned int it821x_smart_qc_issue(struct ata_queued_cmd *qc)
425 case ATA_CMD_WRITE_MULTI: 425 case ATA_CMD_WRITE_MULTI:
426 case ATA_CMD_WRITE_MULTI_EXT: 426 case ATA_CMD_WRITE_MULTI_EXT:
427 case ATA_CMD_ID_ATA: 427 case ATA_CMD_ID_ATA:
428 case ATA_CMD_INIT_DEV_PARAMS:
429 case 0xFC: /* Internal 'report rebuild state' */
428 /* Arguably should just no-op this one */ 430 /* Arguably should just no-op this one */
429 case ATA_CMD_SET_FEATURES: 431 case ATA_CMD_SET_FEATURES:
430 return ata_sff_qc_issue(qc); 432 return ata_sff_qc_issue(qc);
@@ -509,7 +511,7 @@ static void it821x_dev_config(struct ata_device *adev)
509 511
510 if (strstr(model_num, "Integrated Technology Express")) { 512 if (strstr(model_num, "Integrated Technology Express")) {
511 /* RAID mode */ 513 /* RAID mode */
512 printk(KERN_INFO "IT821x %sRAID%d volume", 514 ata_dev_printk(adev, KERN_INFO, "%sRAID%d volume",
513 adev->id[147]?"Bootable ":"", 515 adev->id[147]?"Bootable ":"",
514 adev->id[129]); 516 adev->id[129]);
515 if (adev->id[129] != 1) 517 if (adev->id[129] != 1)
@@ -519,37 +521,51 @@ static void it821x_dev_config(struct ata_device *adev)
519 /* This is a controller firmware triggered funny, don't 521 /* This is a controller firmware triggered funny, don't
520 report the drive faulty! */ 522 report the drive faulty! */
521 adev->horkage &= ~ATA_HORKAGE_DIAGNOSTIC; 523 adev->horkage &= ~ATA_HORKAGE_DIAGNOSTIC;
524 /* No HPA in 'smart' mode */
525 adev->horkage |= ATA_HORKAGE_BROKEN_HPA;
522} 526}
523 527
524/** 528/**
525 * it821x_ident_hack - Hack identify data up 529 * it821x_read_id - Hack identify data up
526 * @ap: Port 530 * @adev: device to read
531 * @tf: proposed taskfile
532 * @id: buffer for returned ident data
527 * 533 *
528 * Walk the devices on this firmware driven port and slightly 534 * Query the devices on this firmware driven port and slightly
529 * mash the identify data to stop us and common tools trying to 535 * mash the identify data to stop us and common tools trying to
530 * use features not firmware supported. The firmware itself does 536 * use features not firmware supported. The firmware itself does
531 * some masking (eg SMART) but not enough. 537 * some masking (eg SMART) but not enough.
532 *
533 * This is a bit of an abuse of the cable method, but it is the
534 * only method called at the right time. We could modify the libata
535 * core specifically for ident hacking but while we have one offender
536 * it seems better to keep the fallout localised.
537 */ 538 */
538 539
539static int it821x_ident_hack(struct ata_port *ap) 540static unsigned int it821x_read_id(struct ata_device *adev,
541 struct ata_taskfile *tf, u16 *id)
540{ 542{
541 struct ata_device *adev; 543 unsigned int err_mask;
542 ata_link_for_each_dev(adev, &ap->link) { 544 unsigned char model_num[ATA_ID_PROD_LEN + 1];
543 if (ata_dev_enabled(adev)) { 545
544 adev->id[84] &= ~(1 << 6); /* No FUA */ 546 err_mask = ata_do_dev_read_id(adev, tf, id);
545 adev->id[85] &= ~(1 << 10); /* No HPA */ 547 if (err_mask)
546 adev->id[76] = 0; /* No NCQ/AN etc */ 548 return err_mask;
547 } 549 ata_id_c_string(id, model_num, ATA_ID_PROD, sizeof(model_num));
550
551 id[83] &= ~(1 << 12); /* Cache flush is firmware handled */
552 id[83] &= ~(1 << 13); /* Ditto for LBA48 flushes */
553 id[84] &= ~(1 << 6); /* No FUA */
554 id[85] &= ~(1 << 10); /* No HPA */
555 id[76] = 0; /* No NCQ/AN etc */
556
557 if (strstr(model_num, "Integrated Technology Express")) {
558 /* Set feature bits the firmware neglects */
559 id[49] |= 0x0300; /* LBA, DMA */
560 id[82] |= 0x0400; /* LBA48 */
561 id[83] &= 0x7FFF;
562 id[83] |= 0x4000; /* Word 83 is valid */
563 id[86] |= 0x0400; /* LBA48 on */
564 id[ATA_ID_MAJOR_VER] |= 0x1F;
548 } 565 }
549 return ata_cable_unknown(ap); 566 return err_mask;
550} 567}
551 568
552
553/** 569/**
554 * it821x_check_atapi_dma - ATAPI DMA handler 570 * it821x_check_atapi_dma - ATAPI DMA handler
555 * @qc: Command we are about to issue 571 * @qc: Command we are about to issue
@@ -577,6 +593,136 @@ static int it821x_check_atapi_dma(struct ata_queued_cmd *qc)
577 return 0; 593 return 0;
578} 594}
579 595
596/**
597 * it821x_display_disk - display disk setup
598 * @n: Device number
599 * @buf: Buffer block from firmware
600 *
601 * Produce a nice informative display of the device setup as provided
602 * by the firmware.
603 */
604
605static void it821x_display_disk(int n, u8 *buf)
606{
607 unsigned char id[41];
608 int mode = 0;
609 char *mtype;
610 char mbuf[8];
611 char *cbl = "(40 wire cable)";
612
613 static const char *types[5] = {
614 "RAID0", "RAID1" "RAID 0+1", "JBOD", "DISK"
615 };
616
617 if (buf[52] > 4) /* No Disk */
618 return;
619
620 ata_id_c_string((u16 *)buf, id, 0, 41);
621
622 if (buf[51]) {
623 mode = ffs(buf[51]);
624 mtype = "UDMA";
625 } else if (buf[49]) {
626 mode = ffs(buf[49]);
627 mtype = "MWDMA";
628 }
629
630 if (buf[76])
631 cbl = "";
632
633 if (mode)
634 snprintf(mbuf, 8, "%5s%d", mtype, mode - 1);
635 else
636 strcpy(mbuf, "PIO");
637 if (buf[52] == 4)
638 printk(KERN_INFO "%d: %-6s %-8s %s %s\n",
639 n, mbuf, types[buf[52]], id, cbl);
640 else
641 printk(KERN_INFO "%d: %-6s %-8s Volume: %1d %s %s\n",
642 n, mbuf, types[buf[52]], buf[53], id, cbl);
643 if (buf[125] < 100)
644 printk(KERN_INFO "%d: Rebuilding: %d%%\n", n, buf[125]);
645}
646
647/**
648 * it821x_firmware_command - issue firmware command
649 * @ap: IT821x port to interrogate
650 * @cmd: command
651 * @len: length
652 *
653 * Issue firmware commands expecting data back from the controller. We
654 * use this to issue commands that do not go via the normal paths. Other
655 * commands such as 0xFC can be issued normally.
656 */
657
658static u8 *it821x_firmware_command(struct ata_port *ap, u8 cmd, int len)
659{
660 u8 status;
661 int n = 0;
662 u16 *buf = kmalloc(len, GFP_KERNEL);
663 if (buf == NULL) {
664 printk(KERN_ERR "it821x_firmware_command: Out of memory\n");
665 return NULL;
666 }
667 /* This isn't quite a normal ATA command as we are talking to the
668 firmware not the drives */
669 ap->ctl |= ATA_NIEN;
670 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
671 ata_wait_idle(ap);
672 iowrite8(ATA_DEVICE_OBS, ap->ioaddr.device_addr);
673 iowrite8(cmd, ap->ioaddr.command_addr);
674 udelay(1);
675 /* This should be almost immediate but a little paranoia goes a long
676 way. */
677 while(n++ < 10) {
678 status = ioread8(ap->ioaddr.status_addr);
679 if (status & ATA_ERR) {
680 kfree(buf);
681 printk(KERN_ERR "it821x_firmware_command: rejected\n");
682 return NULL;
683 }
684 if (status & ATA_DRQ) {
685 ioread16_rep(ap->ioaddr.data_addr, buf, len/2);
686 return (u8 *)buf;
687 }
688 mdelay(1);
689 }
690 kfree(buf);
691 printk(KERN_ERR "it821x_firmware_command: timeout\n");
692 return NULL;
693}
694
695/**
696 * it821x_probe_firmware - firmware reporting/setup
697 * @ap: IT821x port being probed
698 *
699 * Probe the firmware of the controller by issuing firmware command
700 * 0xFA and analysing the returned data.
701 */
702
703static void it821x_probe_firmware(struct ata_port *ap)
704{
705 u8 *buf;
706 int i;
707
708 /* This is a bit ugly as we can't just issue a task file to a device
709 as this is controller magic */
710
711 buf = it821x_firmware_command(ap, 0xFA, 512);
712
713 if (buf != NULL) {
714 printk(KERN_INFO "pata_it821x: Firmware %02X/%02X/%02X%02X\n",
715 buf[505],
716 buf[506],
717 buf[507],
718 buf[508]);
719 for (i = 0; i < 4; i++)
720 it821x_display_disk(i, buf + 128 * i);
721 kfree(buf);
722 }
723}
724
725
580 726
581/** 727/**
582 * it821x_port_start - port setup 728 * it821x_port_start - port setup
@@ -610,6 +756,8 @@ static int it821x_port_start(struct ata_port *ap)
610 /* Long I/O's although allowed in LBA48 space cause the 756 /* Long I/O's although allowed in LBA48 space cause the
611 onboard firmware to enter the twighlight zone */ 757 onboard firmware to enter the twighlight zone */
612 /* No ATAPI DMA in this mode either */ 758 /* No ATAPI DMA in this mode either */
759 if (ap->port_no == 0)
760 it821x_probe_firmware(ap);
613 } 761 }
614 /* Pull the current clocks from 0x50 */ 762 /* Pull the current clocks from 0x50 */
615 if (conf & (1 << (1 + ap->port_no))) 763 if (conf & (1 << (1 + ap->port_no)))
@@ -631,6 +779,25 @@ static int it821x_port_start(struct ata_port *ap)
631 return 0; 779 return 0;
632} 780}
633 781
782/**
783 * it821x_rdc_cable - Cable detect for RDC1010
784 * @ap: port we are checking
785 *
786 * Return the RDC1010 cable type. Unlike the IT821x we know how to do
787 * this and can do host side cable detect
788 */
789
790static int it821x_rdc_cable(struct ata_port *ap)
791{
792 u16 r40;
793 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
794
795 pci_read_config_word(pdev, 0x40, &r40);
796 if (r40 & (1 << (2 + ap->port_no)))
797 return ATA_CBL_PATA40;
798 return ATA_CBL_PATA80;
799}
800
634static struct scsi_host_template it821x_sht = { 801static struct scsi_host_template it821x_sht = {
635 ATA_BMDMA_SHT(DRV_NAME), 802 ATA_BMDMA_SHT(DRV_NAME),
636}; 803};
@@ -641,9 +808,10 @@ static struct ata_port_operations it821x_smart_port_ops = {
641 .check_atapi_dma= it821x_check_atapi_dma, 808 .check_atapi_dma= it821x_check_atapi_dma,
642 .qc_issue = it821x_smart_qc_issue, 809 .qc_issue = it821x_smart_qc_issue,
643 810
644 .cable_detect = it821x_ident_hack, 811 .cable_detect = ata_cable_80wire,
645 .set_mode = it821x_smart_set_mode, 812 .set_mode = it821x_smart_set_mode,
646 .dev_config = it821x_dev_config, 813 .dev_config = it821x_dev_config,
814 .read_id = it821x_read_id,
647 815
648 .port_start = it821x_port_start, 816 .port_start = it821x_port_start,
649}; 817};
@@ -664,8 +832,29 @@ static struct ata_port_operations it821x_passthru_port_ops = {
664 .port_start = it821x_port_start, 832 .port_start = it821x_port_start,
665}; 833};
666 834
835static struct ata_port_operations it821x_rdc_port_ops = {
836 .inherits = &ata_bmdma_port_ops,
837
838 .check_atapi_dma= it821x_check_atapi_dma,
839 .sff_dev_select = it821x_passthru_dev_select,
840 .bmdma_start = it821x_passthru_bmdma_start,
841 .bmdma_stop = it821x_passthru_bmdma_stop,
842 .qc_issue = it821x_passthru_qc_issue,
843
844 .cable_detect = it821x_rdc_cable,
845 .set_piomode = it821x_passthru_set_piomode,
846 .set_dmamode = it821x_passthru_set_dmamode,
847
848 .port_start = it821x_port_start,
849};
850
667static void it821x_disable_raid(struct pci_dev *pdev) 851static void it821x_disable_raid(struct pci_dev *pdev)
668{ 852{
853 /* Neither the RDC nor the IT8211 */
854 if (pdev->vendor != PCI_VENDOR_ID_ITE ||
855 pdev->device != PCI_DEVICE_ID_ITE_8212)
856 return;
857
669 /* Reset local CPU, and set BIOS not ready */ 858 /* Reset local CPU, and set BIOS not ready */
670 pci_write_config_byte(pdev, 0x5E, 0x01); 859 pci_write_config_byte(pdev, 0x5E, 0x01);
671 860
@@ -690,6 +879,7 @@ static int it821x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
690 .flags = ATA_FLAG_SLAVE_POSS, 879 .flags = ATA_FLAG_SLAVE_POSS,
691 .pio_mask = 0x1f, 880 .pio_mask = 0x1f,
692 .mwdma_mask = 0x07, 881 .mwdma_mask = 0x07,
882 .udma_mask = ATA_UDMA6,
693 .port_ops = &it821x_smart_port_ops 883 .port_ops = &it821x_smart_port_ops
694 }; 884 };
695 static const struct ata_port_info info_passthru = { 885 static const struct ata_port_info info_passthru = {
@@ -699,6 +889,13 @@ static int it821x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
699 .udma_mask = ATA_UDMA6, 889 .udma_mask = ATA_UDMA6,
700 .port_ops = &it821x_passthru_port_ops 890 .port_ops = &it821x_passthru_port_ops
701 }; 891 };
892 static const struct ata_port_info info_rdc = {
893 .flags = ATA_FLAG_SLAVE_POSS,
894 .pio_mask = 0x1f,
895 .mwdma_mask = 0x07,
896 /* No UDMA */
897 .port_ops = &it821x_rdc_port_ops
898 };
702 899
703 const struct ata_port_info *ppi[] = { NULL, NULL }; 900 const struct ata_port_info *ppi[] = { NULL, NULL };
704 static char *mode[2] = { "pass through", "smart" }; 901 static char *mode[2] = { "pass through", "smart" };
@@ -707,21 +904,25 @@ static int it821x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
707 rc = pcim_enable_device(pdev); 904 rc = pcim_enable_device(pdev);
708 if (rc) 905 if (rc)
709 return rc; 906 return rc;
907
908 if (pdev->vendor == PCI_VENDOR_ID_RDC) {
909 ppi[0] = &info_rdc;
910 } else {
911 /* Force the card into bypass mode if so requested */
912 if (it8212_noraid) {
913 printk(KERN_INFO DRV_NAME ": forcing bypass mode.\n");
914 it821x_disable_raid(pdev);
915 }
916 pci_read_config_byte(pdev, 0x50, &conf);
917 conf &= 1;
710 918
711 /* Force the card into bypass mode if so requested */ 919 printk(KERN_INFO DRV_NAME": controller in %s mode.\n",
712 if (it8212_noraid) { 920 mode[conf]);
713 printk(KERN_INFO DRV_NAME ": forcing bypass mode.\n"); 921 if (conf == 0)
714 it821x_disable_raid(pdev); 922 ppi[0] = &info_passthru;
923 else
924 ppi[0] = &info_smart;
715 } 925 }
716 pci_read_config_byte(pdev, 0x50, &conf);
717 conf &= 1;
718
719 printk(KERN_INFO DRV_NAME ": controller in %s mode.\n", mode[conf]);
720 if (conf == 0)
721 ppi[0] = &info_passthru;
722 else
723 ppi[0] = &info_smart;
724
725 return ata_pci_sff_init_one(pdev, ppi, &it821x_sht, NULL); 926 return ata_pci_sff_init_one(pdev, ppi, &it821x_sht, NULL);
726} 927}
727 928
@@ -745,6 +946,7 @@ static int it821x_reinit_one(struct pci_dev *pdev)
745static const struct pci_device_id it821x[] = { 946static const struct pci_device_id it821x[] = {
746 { PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8211), }, 947 { PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8211), },
747 { PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8212), }, 948 { PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8212), },
949 { PCI_VDEVICE(RDC, 0x1010), },
748 950
749 { }, 951 { },
750}; 952};
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index 708ed144ede9..57d951b11f2d 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -98,7 +98,8 @@ static const struct via_isa_bridge {
98 u8 rev_max; 98 u8 rev_max;
99 u16 flags; 99 u16 flags;
100} via_isa_bridges[] = { 100} via_isa_bridges[] = {
101 { "vx800", PCI_DEVICE_ID_VIA_VX800, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, 101 { "vx800", PCI_DEVICE_ID_VIA_VX800, 0x00, 0x2f, VIA_UDMA_133 |
102 VIA_BAD_AST | VIA_SATA_PATA },
102 { "vt8237s", PCI_DEVICE_ID_VIA_8237S, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, 103 { "vt8237s", PCI_DEVICE_ID_VIA_8237S, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
103 { "vt8251", PCI_DEVICE_ID_VIA_8251, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, 104 { "vt8251", PCI_DEVICE_ID_VIA_8251, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
104 { "cx700", PCI_DEVICE_ID_VIA_CX700, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST | VIA_SATA_PATA }, 105 { "cx700", PCI_DEVICE_ID_VIA_CX700, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST | VIA_SATA_PATA },
@@ -322,6 +323,65 @@ static void via_set_dmamode(struct ata_port *ap, struct ata_device *adev)
322 via_do_set_mode(ap, adev, adev->dma_mode, tclock[mode], set_ast, udma[mode]); 323 via_do_set_mode(ap, adev, adev->dma_mode, tclock[mode], set_ast, udma[mode]);
323} 324}
324 325
326/**
327 * via_ata_sff_tf_load - send taskfile registers to host controller
328 * @ap: Port to which output is sent
329 * @tf: ATA taskfile register set
330 *
331 * Outputs ATA taskfile to standard ATA host controller.
332 *
333 * Note: This is to fix the internal bug of via chipsets, which
334 * will reset the device register after changing the IEN bit on
335 * ctl register
336 */
337static void via_ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
338{
339 struct ata_ioports *ioaddr = &ap->ioaddr;
340 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
341
342 if (tf->ctl != ap->last_ctl) {
343 iowrite8(tf->ctl, ioaddr->ctl_addr);
344 iowrite8(tf->device, ioaddr->device_addr);
345 ap->last_ctl = tf->ctl;
346 ata_wait_idle(ap);
347 }
348
349 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
350 iowrite8(tf->hob_feature, ioaddr->feature_addr);
351 iowrite8(tf->hob_nsect, ioaddr->nsect_addr);
352 iowrite8(tf->hob_lbal, ioaddr->lbal_addr);
353 iowrite8(tf->hob_lbam, ioaddr->lbam_addr);
354 iowrite8(tf->hob_lbah, ioaddr->lbah_addr);
355 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
356 tf->hob_feature,
357 tf->hob_nsect,
358 tf->hob_lbal,
359 tf->hob_lbam,
360 tf->hob_lbah);
361 }
362
363 if (is_addr) {
364 iowrite8(tf->feature, ioaddr->feature_addr);
365 iowrite8(tf->nsect, ioaddr->nsect_addr);
366 iowrite8(tf->lbal, ioaddr->lbal_addr);
367 iowrite8(tf->lbam, ioaddr->lbam_addr);
368 iowrite8(tf->lbah, ioaddr->lbah_addr);
369 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
370 tf->feature,
371 tf->nsect,
372 tf->lbal,
373 tf->lbam,
374 tf->lbah);
375 }
376
377 if (tf->flags & ATA_TFLAG_DEVICE) {
378 iowrite8(tf->device, ioaddr->device_addr);
379 VPRINTK("device 0x%X\n", tf->device);
380 }
381
382 ata_wait_idle(ap);
383}
384
325static struct scsi_host_template via_sht = { 385static struct scsi_host_template via_sht = {
326 ATA_BMDMA_SHT(DRV_NAME), 386 ATA_BMDMA_SHT(DRV_NAME),
327}; 387};
@@ -332,11 +392,13 @@ static struct ata_port_operations via_port_ops = {
332 .set_piomode = via_set_piomode, 392 .set_piomode = via_set_piomode,
333 .set_dmamode = via_set_dmamode, 393 .set_dmamode = via_set_dmamode,
334 .prereset = via_pre_reset, 394 .prereset = via_pre_reset,
395 .sff_tf_load = via_ata_tf_load,
335}; 396};
336 397
337static struct ata_port_operations via_port_ops_noirq = { 398static struct ata_port_operations via_port_ops_noirq = {
338 .inherits = &via_port_ops, 399 .inherits = &via_port_ops,
339 .sff_data_xfer = ata_sff_data_xfer_noirq, 400 .sff_data_xfer = ata_sff_data_xfer_noirq,
401 .sff_tf_load = via_ata_tf_load,
340}; 402};
341 403
342/** 404/**
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 24df73ad326d..088885ed51b9 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -156,8 +156,8 @@ static void ia_hack_tcq(IADEV *dev) {
156 } 156 }
157 iavcc_r->vc_desc_cnt--; 157 iavcc_r->vc_desc_cnt--;
158 dev->desc_tbl[desc1 -1].timestamp = 0; 158 dev->desc_tbl[desc1 -1].timestamp = 0;
159 IF_EVENT(printk("ia_hack: return_q skb = 0x%x desc = %d\n", 159 IF_EVENT(printk("ia_hack: return_q skb = 0x%p desc = %d\n",
160 (u32)dev->desc_tbl[desc1 -1].txskb, desc1);) 160 dev->desc_tbl[desc1 -1].txskb, desc1);)
161 if (iavcc_r->pcr < dev->rate_limit) { 161 if (iavcc_r->pcr < dev->rate_limit) {
162 IA_SKB_STATE (dev->desc_tbl[desc1-1].txskb) |= IA_TX_DONE; 162 IA_SKB_STATE (dev->desc_tbl[desc1-1].txskb) |= IA_TX_DONE;
163 if (ia_enque_rtn_q(&dev->tx_return_q, dev->desc_tbl[desc1 -1]) < 0) 163 if (ia_enque_rtn_q(&dev->tx_return_q, dev->desc_tbl[desc1 -1]) < 0)
@@ -527,8 +527,8 @@ static int ia_cbr_setup (IADEV *dev, struct atm_vcc *vcc) {
527 inc = 0; 527 inc = 0;
528 testSlot = idealSlot; 528 testSlot = idealSlot;
529 TstSchedTbl = (u16*)(SchedTbl+testSlot); //set index and read in value 529 TstSchedTbl = (u16*)(SchedTbl+testSlot); //set index and read in value
530 IF_CBR(printk("CBR Testslot 0x%x AT Location 0x%x, NumToAssign=%d\n", 530 IF_CBR(printk("CBR Testslot 0x%x AT Location 0x%p, NumToAssign=%d\n",
531 testSlot, (u32)TstSchedTbl,toBeAssigned);) 531 testSlot, TstSchedTbl,toBeAssigned);)
532 memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC)); 532 memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
533 while (cbrVC) // If another VC at this location, we have to keep looking 533 while (cbrVC) // If another VC at this location, we have to keep looking
534 { 534 {
@@ -536,8 +536,8 @@ static int ia_cbr_setup (IADEV *dev, struct atm_vcc *vcc) {
536 testSlot = idealSlot - inc; 536 testSlot = idealSlot - inc;
537 if (testSlot < 0) { // Wrap if necessary 537 if (testSlot < 0) { // Wrap if necessary
538 testSlot += dev->CbrTotEntries; 538 testSlot += dev->CbrTotEntries;
539 IF_CBR(printk("Testslot Wrap. STable Start=0x%x,Testslot=%d\n", 539 IF_CBR(printk("Testslot Wrap. STable Start=0x%p,Testslot=%d\n",
540 (u32)SchedTbl,testSlot);) 540 SchedTbl,testSlot);)
541 } 541 }
542 TstSchedTbl = (u16 *)(SchedTbl + testSlot); // set table index 542 TstSchedTbl = (u16 *)(SchedTbl + testSlot); // set table index
543 memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC)); 543 memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
@@ -552,8 +552,8 @@ static int ia_cbr_setup (IADEV *dev, struct atm_vcc *vcc) {
552 } 552 }
553 // set table index and read in value 553 // set table index and read in value
554 TstSchedTbl = (u16*)(SchedTbl + testSlot); 554 TstSchedTbl = (u16*)(SchedTbl + testSlot);
555 IF_CBR(printk("Reading CBR Tbl from 0x%x, CbrVal=0x%x Iteration %d\n", 555 IF_CBR(printk("Reading CBR Tbl from 0x%p, CbrVal=0x%x Iteration %d\n",
556 (u32)TstSchedTbl,cbrVC,inc);) 556 TstSchedTbl,cbrVC,inc);)
557 memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC)); 557 memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
558 } /* while */ 558 } /* while */
559 // Move this VCI number into this location of the CBR Sched table. 559 // Move this VCI number into this location of the CBR Sched table.
@@ -1427,11 +1427,11 @@ static int rx_init(struct atm_dev *dev)
1427 /* We know this is 32bit bus addressed so the following is safe */ 1427 /* We know this is 32bit bus addressed so the following is safe */
1428 writel(iadev->rx_dle_dma & 0xfffff000, 1428 writel(iadev->rx_dle_dma & 0xfffff000,
1429 iadev->dma + IPHASE5575_RX_LIST_ADDR); 1429 iadev->dma + IPHASE5575_RX_LIST_ADDR);
1430 IF_INIT(printk("Tx Dle list addr: 0x%08x value: 0x%0x\n", 1430 IF_INIT(printk("Tx Dle list addr: 0x%p value: 0x%0x\n",
1431 (u32)(iadev->dma+IPHASE5575_TX_LIST_ADDR), 1431 iadev->dma+IPHASE5575_TX_LIST_ADDR,
1432 *(u32*)(iadev->dma+IPHASE5575_TX_LIST_ADDR)); 1432 *(u32*)(iadev->dma+IPHASE5575_TX_LIST_ADDR));
1433 printk("Rx Dle list addr: 0x%08x value: 0x%0x\n", 1433 printk("Rx Dle list addr: 0x%p value: 0x%0x\n",
1434 (u32)(iadev->dma+IPHASE5575_RX_LIST_ADDR), 1434 iadev->dma+IPHASE5575_RX_LIST_ADDR,
1435 *(u32*)(iadev->dma+IPHASE5575_RX_LIST_ADDR));) 1435 *(u32*)(iadev->dma+IPHASE5575_RX_LIST_ADDR));)
1436 1436
1437 writew(0xffff, iadev->reass_reg+REASS_MASK_REG); 1437 writew(0xffff, iadev->reass_reg+REASS_MASK_REG);
@@ -1470,7 +1470,7 @@ static int rx_init(struct atm_dev *dev)
1470 buf_desc_ptr++; 1470 buf_desc_ptr++;
1471 rx_pkt_start += iadev->rx_buf_sz; 1471 rx_pkt_start += iadev->rx_buf_sz;
1472 } 1472 }
1473 IF_INIT(printk("Rx Buffer desc ptr: 0x%0x\n", (u32)(buf_desc_ptr));) 1473 IF_INIT(printk("Rx Buffer desc ptr: 0x%p\n", buf_desc_ptr);)
1474 i = FREE_BUF_DESC_Q*iadev->memSize; 1474 i = FREE_BUF_DESC_Q*iadev->memSize;
1475 writew(i >> 16, iadev->reass_reg+REASS_QUEUE_BASE); 1475 writew(i >> 16, iadev->reass_reg+REASS_QUEUE_BASE);
1476 writew(i, iadev->reass_reg+FREEQ_ST_ADR); 1476 writew(i, iadev->reass_reg+FREEQ_ST_ADR);
@@ -1487,7 +1487,7 @@ static int rx_init(struct atm_dev *dev)
1487 *freeq_start = (u_short)i; 1487 *freeq_start = (u_short)i;
1488 freeq_start++; 1488 freeq_start++;
1489 } 1489 }
1490 IF_INIT(printk("freeq_start: 0x%0x\n", (u32)freeq_start);) 1490 IF_INIT(printk("freeq_start: 0x%p\n", freeq_start);)
1491 /* Packet Complete Queue */ 1491 /* Packet Complete Queue */
1492 i = (PKT_COMP_Q * iadev->memSize) & 0xffff; 1492 i = (PKT_COMP_Q * iadev->memSize) & 0xffff;
1493 writew(i, iadev->reass_reg+PCQ_ST_ADR); 1493 writew(i, iadev->reass_reg+PCQ_ST_ADR);
@@ -1713,7 +1713,7 @@ static void tx_dle_intr(struct atm_dev *dev)
1713 IA_SKB_STATE(skb) |= IA_DLED; 1713 IA_SKB_STATE(skb) |= IA_DLED;
1714 skb_queue_tail(&iavcc->txing_skb, skb); 1714 skb_queue_tail(&iavcc->txing_skb, skb);
1715 } 1715 }
1716 IF_EVENT(printk("tx_dle_intr: enque skb = 0x%x \n", (u32)skb);) 1716 IF_EVENT(printk("tx_dle_intr: enque skb = 0x%p \n", skb);)
1717 if (++dle == iadev->tx_dle_q.end) 1717 if (++dle == iadev->tx_dle_q.end)
1718 dle = iadev->tx_dle_q.start; 1718 dle = iadev->tx_dle_q.start;
1719 } 1719 }
@@ -2044,8 +2044,8 @@ static int tx_init(struct atm_dev *dev)
2044 writew(tmp16, iadev->seg_reg+CBR_TAB_END+1); // CBR_PTR; 2044 writew(tmp16, iadev->seg_reg+CBR_TAB_END+1); // CBR_PTR;
2045 tmp16 = (CBR_SCHED_TABLE*iadev->memSize + iadev->num_vc*6 - 2) >> 1; 2045 tmp16 = (CBR_SCHED_TABLE*iadev->memSize + iadev->num_vc*6 - 2) >> 1;
2046 writew(tmp16, iadev->seg_reg+CBR_TAB_END); 2046 writew(tmp16, iadev->seg_reg+CBR_TAB_END);
2047 IF_INIT(printk("iadev->seg_reg = 0x%x CBR_PTR_BASE = 0x%x\n", 2047 IF_INIT(printk("iadev->seg_reg = 0x%p CBR_PTR_BASE = 0x%x\n",
2048 (u32)iadev->seg_reg, readw(iadev->seg_reg+CBR_PTR_BASE));) 2048 iadev->seg_reg, readw(iadev->seg_reg+CBR_PTR_BASE));)
2049 IF_INIT(printk("CBR_TAB_BEG = 0x%x, CBR_TAB_END = 0x%x, CBR_PTR = 0x%x\n", 2049 IF_INIT(printk("CBR_TAB_BEG = 0x%x, CBR_TAB_END = 0x%x, CBR_PTR = 0x%x\n",
2050 readw(iadev->seg_reg+CBR_TAB_BEG), readw(iadev->seg_reg+CBR_TAB_END), 2050 readw(iadev->seg_reg+CBR_TAB_BEG), readw(iadev->seg_reg+CBR_TAB_END),
2051 readw(iadev->seg_reg+CBR_TAB_END+1));) 2051 readw(iadev->seg_reg+CBR_TAB_END+1));)
@@ -2963,8 +2963,8 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
2963 2963
2964 /* Put the packet in a tx buffer */ 2964 /* Put the packet in a tx buffer */
2965 trailer = iadev->tx_buf[desc-1].cpcs; 2965 trailer = iadev->tx_buf[desc-1].cpcs;
2966 IF_TX(printk("Sent: skb = 0x%x skb->data: 0x%x len: %d, desc: %d\n", 2966 IF_TX(printk("Sent: skb = 0x%p skb->data: 0x%p len: %d, desc: %d\n",
2967 (u32)skb, (u32)skb->data, skb->len, desc);) 2967 skb, skb->data, skb->len, desc);)
2968 trailer->control = 0; 2968 trailer->control = 0;
2969 /*big endian*/ 2969 /*big endian*/
2970 trailer->length = ((skb->len & 0xff) << 8) | ((skb->len & 0xff00) >> 8); 2970 trailer->length = ((skb->len & 0xff) << 8) | ((skb->len & 0xff00) >> 8);
@@ -3181,7 +3181,7 @@ static int __devinit ia_init_one(struct pci_dev *pdev,
3181 } 3181 }
3182 dev->dev_data = iadev; 3182 dev->dev_data = iadev;
3183 IF_INIT(printk(DEV_LABEL "registered at (itf :%d)\n", dev->number);) 3183 IF_INIT(printk(DEV_LABEL "registered at (itf :%d)\n", dev->number);)
3184 IF_INIT(printk("dev_id = 0x%x iadev->LineRate = %d \n", (u32)dev, 3184 IF_INIT(printk("dev_id = 0x%p iadev->LineRate = %d \n", dev,
3185 iadev->LineRate);) 3185 iadev->LineRate);)
3186 3186
3187 pci_set_drvdata(pdev, dev); 3187 pci_set_drvdata(pdev, dev);
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 839d27cecb36..5667c2f02c51 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -198,6 +198,7 @@ static void class_create_release(struct class *cls)
198 * class_create - create a struct class structure 198 * class_create - create a struct class structure
199 * @owner: pointer to the module that is to "own" this struct class 199 * @owner: pointer to the module that is to "own" this struct class
200 * @name: pointer to a string for the name of this class. 200 * @name: pointer to a string for the name of this class.
201 * @key: the lock_class_key for this class; used by mutex lock debugging
201 * 202 *
202 * This is used to create a struct class pointer that can then be used 203 * This is used to create a struct class pointer that can then be used
203 * in calls to device_create(). 204 * in calls to device_create().
diff --git a/drivers/block/aoe/aoenet.c b/drivers/block/aoe/aoenet.c
index d625169c8e48..0c81ca731287 100644
--- a/drivers/block/aoe/aoenet.c
+++ b/drivers/block/aoe/aoenet.c
@@ -30,7 +30,7 @@ enum {
30 30
31static char aoe_iflist[IFLISTSZ]; 31static char aoe_iflist[IFLISTSZ];
32module_param_string(aoe_iflist, aoe_iflist, IFLISTSZ, 0600); 32module_param_string(aoe_iflist, aoe_iflist, IFLISTSZ, 0600);
33MODULE_PARM_DESC(aoe_iflist, "aoe_iflist=\"dev1 [dev2 ...]\"\n"); 33MODULE_PARM_DESC(aoe_iflist, "aoe_iflist=\"dev1 [dev2 ...]\"");
34 34
35#ifndef MODULE 35#ifndef MODULE
36static int __init aoe_iflist_setup(char *str) 36static int __init aoe_iflist_setup(char *str)
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index d81632cd7d06..b73116ef9236 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -159,7 +159,7 @@ static int cciss_ioctl(struct inode *inode, struct file *filep,
159static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo); 159static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
160 160
161static int cciss_revalidate(struct gendisk *disk); 161static int cciss_revalidate(struct gendisk *disk);
162static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk); 162static int rebuild_lun_table(ctlr_info_t *h, int first_time);
163static int deregister_disk(struct gendisk *disk, drive_info_struct *drv, 163static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
164 int clear_all); 164 int clear_all);
165 165
@@ -171,7 +171,6 @@ static void cciss_geometry_inquiry(int ctlr, int logvol,
171 int withirq, sector_t total_size, 171 int withirq, sector_t total_size,
172 unsigned int block_size, InquiryData_struct *inq_buff, 172 unsigned int block_size, InquiryData_struct *inq_buff,
173 drive_info_struct *drv); 173 drive_info_struct *drv);
174static void cciss_getgeometry(int cntl_num);
175static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *, 174static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *,
176 __u32); 175 __u32);
177static void start_io(ctlr_info_t *h); 176static void start_io(ctlr_info_t *h);
@@ -929,8 +928,10 @@ static int cciss_ioctl(struct inode *inode, struct file *filep,
929 return 0; 928 return 0;
930 } 929 }
931 930
931 case CCISS_DEREGDISK:
932 case CCISS_REGNEWD:
932 case CCISS_REVALIDVOLS: 933 case CCISS_REVALIDVOLS:
933 return rebuild_lun_table(host, NULL); 934 return rebuild_lun_table(host, 0);
934 935
935 case CCISS_GETLUNINFO:{ 936 case CCISS_GETLUNINFO:{
936 LogvolInfo_struct luninfo; 937 LogvolInfo_struct luninfo;
@@ -943,12 +944,6 @@ static int cciss_ioctl(struct inode *inode, struct file *filep,
943 return -EFAULT; 944 return -EFAULT;
944 return 0; 945 return 0;
945 } 946 }
946 case CCISS_DEREGDISK:
947 return rebuild_lun_table(host, disk);
948
949 case CCISS_REGNEWD:
950 return rebuild_lun_table(host, NULL);
951
952 case CCISS_PASSTHRU: 947 case CCISS_PASSTHRU:
953 { 948 {
954 IOCTL_Command_struct iocommand; 949 IOCTL_Command_struct iocommand;
@@ -1134,7 +1129,7 @@ static int cciss_ioctl(struct inode *inode, struct file *filep,
1134 if (ioc->Request.Type.Direction == XFER_WRITE) { 1129 if (ioc->Request.Type.Direction == XFER_WRITE) {
1135 if (copy_from_user 1130 if (copy_from_user
1136 (buff[sg_used], data_ptr, sz)) { 1131 (buff[sg_used], data_ptr, sz)) {
1137 status = -ENOMEM; 1132 status = -EFAULT;
1138 goto cleanup1; 1133 goto cleanup1;
1139 } 1134 }
1140 } else { 1135 } else {
@@ -1292,8 +1287,6 @@ static void cciss_check_queues(ctlr_info_t *h)
1292 h->next_to_run = curr_queue; 1287 h->next_to_run = curr_queue;
1293 break; 1288 break;
1294 } 1289 }
1295 } else {
1296 curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
1297 } 1290 }
1298 } 1291 }
1299} 1292}
@@ -1332,15 +1325,84 @@ static void cciss_softirq_done(struct request *rq)
1332 spin_unlock_irqrestore(&h->lock, flags); 1325 spin_unlock_irqrestore(&h->lock, flags);
1333} 1326}
1334 1327
1328/* This function gets the serial number of a logical drive via
1329 * inquiry page 0x83. Serial no. is 16 bytes. If the serial
1330 * number cannot be had, for whatever reason, 16 bytes of 0xff
1331 * are returned instead.
1332 */
1333static void cciss_get_serial_no(int ctlr, int logvol, int withirq,
1334 unsigned char *serial_no, int buflen)
1335{
1336#define PAGE_83_INQ_BYTES 64
1337 int rc;
1338 unsigned char *buf;
1339
1340 if (buflen > 16)
1341 buflen = 16;
1342 memset(serial_no, 0xff, buflen);
1343 buf = kzalloc(PAGE_83_INQ_BYTES, GFP_KERNEL);
1344 if (!buf)
1345 return;
1346 memset(serial_no, 0, buflen);
1347 if (withirq)
1348 rc = sendcmd_withirq(CISS_INQUIRY, ctlr, buf,
1349 PAGE_83_INQ_BYTES, 1, logvol, 0x83, TYPE_CMD);
1350 else
1351 rc = sendcmd(CISS_INQUIRY, ctlr, buf,
1352 PAGE_83_INQ_BYTES, 1, logvol, 0x83, NULL, TYPE_CMD);
1353 if (rc == IO_OK)
1354 memcpy(serial_no, &buf[8], buflen);
1355 kfree(buf);
1356 return;
1357}
1358
1359static void cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
1360 int drv_index)
1361{
1362 disk->queue = blk_init_queue(do_cciss_request, &h->lock);
1363 sprintf(disk->disk_name, "cciss/c%dd%d", h->ctlr, drv_index);
1364 disk->major = h->major;
1365 disk->first_minor = drv_index << NWD_SHIFT;
1366 disk->fops = &cciss_fops;
1367 disk->private_data = &h->drv[drv_index];
1368
1369 /* Set up queue information */
1370 blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask);
1371
1372 /* This is a hardware imposed limit. */
1373 blk_queue_max_hw_segments(disk->queue, MAXSGENTRIES);
1374
1375 /* This is a limit in the driver and could be eliminated. */
1376 blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES);
1377
1378 blk_queue_max_sectors(disk->queue, h->cciss_max_sectors);
1379
1380 blk_queue_softirq_done(disk->queue, cciss_softirq_done);
1381
1382 disk->queue->queuedata = h;
1383
1384 blk_queue_hardsect_size(disk->queue,
1385 h->drv[drv_index].block_size);
1386
1387 /* Make sure all queue data is written out before */
1388 /* setting h->drv[drv_index].queue, as setting this */
1389 /* allows the interrupt handler to start the queue */
1390 wmb();
1391 h->drv[drv_index].queue = disk->queue;
1392 add_disk(disk);
1393}
1394
1335/* This function will check the usage_count of the drive to be updated/added. 1395/* This function will check the usage_count of the drive to be updated/added.
1336 * If the usage_count is zero then the drive information will be updated and 1396 * If the usage_count is zero and it is a heretofore unknown drive, or,
1337 * the disk will be re-registered with the kernel. If not then it will be 1397 * the drive's capacity, geometry, or serial number has changed,
1338 * left alone for the next reboot. The exception to this is disk 0 which 1398 * then the drive information will be updated and the disk will be
1339 * will always be left registered with the kernel since it is also the 1399 * re-registered with the kernel. If these conditions don't hold,
1340 * controller node. Any changes to disk 0 will show up on the next 1400 * then it will be left alone for the next reboot. The exception to this
1341 * reboot. 1401 * is disk 0 which will always be left registered with the kernel since it
1402 * is also the controller node. Any changes to disk 0 will show up on
1403 * the next reboot.
1342 */ 1404 */
1343static void cciss_update_drive_info(int ctlr, int drv_index) 1405static void cciss_update_drive_info(int ctlr, int drv_index, int first_time)
1344{ 1406{
1345 ctlr_info_t *h = hba[ctlr]; 1407 ctlr_info_t *h = hba[ctlr];
1346 struct gendisk *disk; 1408 struct gendisk *disk;
@@ -1349,16 +1411,81 @@ static void cciss_update_drive_info(int ctlr, int drv_index)
1349 sector_t total_size; 1411 sector_t total_size;
1350 unsigned long flags = 0; 1412 unsigned long flags = 0;
1351 int ret = 0; 1413 int ret = 0;
1414 drive_info_struct *drvinfo;
1415 int was_only_controller_node;
1416
1417 /* Get information about the disk and modify the driver structure */
1418 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
1419 drvinfo = kmalloc(sizeof(*drvinfo), GFP_KERNEL);
1420 if (inq_buff == NULL || drvinfo == NULL)
1421 goto mem_msg;
1422
1423 /* See if we're trying to update the "controller node"
1424 * this will happen the when the first logical drive gets
1425 * created by ACU.
1426 */
1427 was_only_controller_node = (drv_index == 0 &&
1428 h->drv[0].raid_level == -1);
1352 1429
1353 /* if the disk already exists then deregister it before proceeding */ 1430 /* testing to see if 16-byte CDBs are already being used */
1354 if (h->drv[drv_index].raid_level != -1) { 1431 if (h->cciss_read == CCISS_READ_16) {
1432 cciss_read_capacity_16(h->ctlr, drv_index, 1,
1433 &total_size, &block_size);
1434
1435 } else {
1436 cciss_read_capacity(ctlr, drv_index, 1,
1437 &total_size, &block_size);
1438
1439 /* if read_capacity returns all F's this volume is >2TB */
1440 /* in size so we switch to 16-byte CDB's for all */
1441 /* read/write ops */
1442 if (total_size == 0xFFFFFFFFULL) {
1443 cciss_read_capacity_16(ctlr, drv_index, 1,
1444 &total_size, &block_size);
1445 h->cciss_read = CCISS_READ_16;
1446 h->cciss_write = CCISS_WRITE_16;
1447 } else {
1448 h->cciss_read = CCISS_READ_10;
1449 h->cciss_write = CCISS_WRITE_10;
1450 }
1451 }
1452
1453 cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size,
1454 inq_buff, drvinfo);
1455 drvinfo->block_size = block_size;
1456 drvinfo->nr_blocks = total_size + 1;
1457
1458 cciss_get_serial_no(ctlr, drv_index, 1, drvinfo->serial_no,
1459 sizeof(drvinfo->serial_no));
1460
1461 /* Is it the same disk we already know, and nothing's changed? */
1462 if (h->drv[drv_index].raid_level != -1 &&
1463 ((memcmp(drvinfo->serial_no,
1464 h->drv[drv_index].serial_no, 16) == 0) &&
1465 drvinfo->block_size == h->drv[drv_index].block_size &&
1466 drvinfo->nr_blocks == h->drv[drv_index].nr_blocks &&
1467 drvinfo->heads == h->drv[drv_index].heads &&
1468 drvinfo->sectors == h->drv[drv_index].sectors &&
1469 drvinfo->cylinders == h->drv[drv_index].cylinders))
1470 /* The disk is unchanged, nothing to update */
1471 goto freeret;
1472
1473 /* If we get here it's not the same disk, or something's changed,
1474 * so we need to * deregister it, and re-register it, if it's not
1475 * in use.
1476 * If the disk already exists then deregister it before proceeding
1477 * (unless it's the first disk (for the controller node).
1478 */
1479 if (h->drv[drv_index].raid_level != -1 && drv_index != 0) {
1480 printk(KERN_WARNING "disk %d has changed.\n", drv_index);
1355 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); 1481 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1356 h->drv[drv_index].busy_configuring = 1; 1482 h->drv[drv_index].busy_configuring = 1;
1357 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 1483 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1358 1484
1359 /* deregister_disk sets h->drv[drv_index].queue = NULL */ 1485 /* deregister_disk sets h->drv[drv_index].queue = NULL
1360 /* which keeps the interrupt handler from starting */ 1486 * which keeps the interrupt handler from starting
1361 /* the queue. */ 1487 * the queue.
1488 */
1362 ret = deregister_disk(h->gendisk[drv_index], 1489 ret = deregister_disk(h->gendisk[drv_index],
1363 &h->drv[drv_index], 0); 1490 &h->drv[drv_index], 0);
1364 h->drv[drv_index].busy_configuring = 0; 1491 h->drv[drv_index].busy_configuring = 0;
@@ -1366,81 +1493,37 @@ static void cciss_update_drive_info(int ctlr, int drv_index)
1366 1493
1367 /* If the disk is in use return */ 1494 /* If the disk is in use return */
1368 if (ret) 1495 if (ret)
1369 return; 1496 goto freeret;
1370 1497
1371 /* Get information about the disk and modify the driver structure */ 1498 /* Save the new information from cciss_geometry_inquiry
1372 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL); 1499 * and serial number inquiry.
1373 if (inq_buff == NULL) 1500 */
1374 goto mem_msg; 1501 h->drv[drv_index].block_size = drvinfo->block_size;
1375 1502 h->drv[drv_index].nr_blocks = drvinfo->nr_blocks;
1376 /* testing to see if 16-byte CDBs are already being used */ 1503 h->drv[drv_index].heads = drvinfo->heads;
1377 if (h->cciss_read == CCISS_READ_16) { 1504 h->drv[drv_index].sectors = drvinfo->sectors;
1378 cciss_read_capacity_16(h->ctlr, drv_index, 1, 1505 h->drv[drv_index].cylinders = drvinfo->cylinders;
1379 &total_size, &block_size); 1506 h->drv[drv_index].raid_level = drvinfo->raid_level;
1380 goto geo_inq; 1507 memcpy(h->drv[drv_index].serial_no, drvinfo->serial_no, 16);
1381 }
1382
1383 cciss_read_capacity(ctlr, drv_index, 1,
1384 &total_size, &block_size);
1385
1386 /* if read_capacity returns all F's this volume is >2TB in size */
1387 /* so we switch to 16-byte CDB's for all read/write ops */
1388 if (total_size == 0xFFFFFFFFULL) {
1389 cciss_read_capacity_16(ctlr, drv_index, 1,
1390 &total_size, &block_size);
1391 h->cciss_read = CCISS_READ_16;
1392 h->cciss_write = CCISS_WRITE_16;
1393 } else {
1394 h->cciss_read = CCISS_READ_10;
1395 h->cciss_write = CCISS_WRITE_10;
1396 }
1397geo_inq:
1398 cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size,
1399 inq_buff, &h->drv[drv_index]);
1400 1508
1401 ++h->num_luns; 1509 ++h->num_luns;
1402 disk = h->gendisk[drv_index]; 1510 disk = h->gendisk[drv_index];
1403 set_capacity(disk, h->drv[drv_index].nr_blocks); 1511 set_capacity(disk, h->drv[drv_index].nr_blocks);
1404 1512
1405 /* if it's the controller it's already added */ 1513 /* If it's not disk 0 (drv_index != 0)
1406 if (drv_index) { 1514 * or if it was disk 0, but there was previously
1407 disk->queue = blk_init_queue(do_cciss_request, &h->lock); 1515 * no actual corresponding configured logical drive
1408 sprintf(disk->disk_name, "cciss/c%dd%d", ctlr, drv_index); 1516 * (raid_leve == -1) then we want to update the
1409 disk->major = h->major; 1517 * logical drive's information.
1410 disk->first_minor = drv_index << NWD_SHIFT; 1518 */
1411 disk->fops = &cciss_fops; 1519 if (drv_index || first_time)
1412 disk->private_data = &h->drv[drv_index]; 1520 cciss_add_disk(h, disk, drv_index);
1413
1414 /* Set up queue information */
1415 blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask);
1416
1417 /* This is a hardware imposed limit. */
1418 blk_queue_max_hw_segments(disk->queue, MAXSGENTRIES);
1419
1420 /* This is a limit in the driver and could be eliminated. */
1421 blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES);
1422
1423 blk_queue_max_sectors(disk->queue, h->cciss_max_sectors);
1424
1425 blk_queue_softirq_done(disk->queue, cciss_softirq_done);
1426
1427 disk->queue->queuedata = hba[ctlr];
1428
1429 blk_queue_hardsect_size(disk->queue,
1430 hba[ctlr]->drv[drv_index].block_size);
1431
1432 /* Make sure all queue data is written out before */
1433 /* setting h->drv[drv_index].queue, as setting this */
1434 /* allows the interrupt handler to start the queue */
1435 wmb();
1436 h->drv[drv_index].queue = disk->queue;
1437 add_disk(disk);
1438 }
1439 1521
1440 freeret: 1522freeret:
1441 kfree(inq_buff); 1523 kfree(inq_buff);
1524 kfree(drvinfo);
1442 return; 1525 return;
1443 mem_msg: 1526mem_msg:
1444 printk(KERN_ERR "cciss: out of memory\n"); 1527 printk(KERN_ERR "cciss: out of memory\n");
1445 goto freeret; 1528 goto freeret;
1446} 1529}
@@ -1450,21 +1533,91 @@ geo_inq:
1450 * where new drives will be added. If the index to be returned is greater 1533 * where new drives will be added. If the index to be returned is greater
1451 * than the highest_lun index for the controller then highest_lun is set 1534 * than the highest_lun index for the controller then highest_lun is set
1452 * to this new index. If there are no available indexes then -1 is returned. 1535 * to this new index. If there are no available indexes then -1 is returned.
1536 * "controller_node" is used to know if this is a real logical drive, or just
1537 * the controller node, which determines if this counts towards highest_lun.
1453 */ 1538 */
1454static int cciss_find_free_drive_index(int ctlr) 1539static int cciss_find_free_drive_index(int ctlr, int controller_node)
1455{ 1540{
1456 int i; 1541 int i;
1457 1542
1458 for (i = 0; i < CISS_MAX_LUN; i++) { 1543 for (i = 0; i < CISS_MAX_LUN; i++) {
1459 if (hba[ctlr]->drv[i].raid_level == -1) { 1544 if (hba[ctlr]->drv[i].raid_level == -1) {
1460 if (i > hba[ctlr]->highest_lun) 1545 if (i > hba[ctlr]->highest_lun)
1461 hba[ctlr]->highest_lun = i; 1546 if (!controller_node)
1547 hba[ctlr]->highest_lun = i;
1462 return i; 1548 return i;
1463 } 1549 }
1464 } 1550 }
1465 return -1; 1551 return -1;
1466} 1552}
1467 1553
1554/* cciss_add_gendisk finds a free hba[]->drv structure
1555 * and allocates a gendisk if needed, and sets the lunid
1556 * in the drvinfo structure. It returns the index into
1557 * the ->drv[] array, or -1 if none are free.
1558 * is_controller_node indicates whether highest_lun should
1559 * count this disk, or if it's only being added to provide
1560 * a means to talk to the controller in case no logical
1561 * drives have yet been configured.
1562 */
1563static int cciss_add_gendisk(ctlr_info_t *h, __u32 lunid, int controller_node)
1564{
1565 int drv_index;
1566
1567 drv_index = cciss_find_free_drive_index(h->ctlr, controller_node);
1568 if (drv_index == -1)
1569 return -1;
1570 /*Check if the gendisk needs to be allocated */
1571 if (!h->gendisk[drv_index]) {
1572 h->gendisk[drv_index] =
1573 alloc_disk(1 << NWD_SHIFT);
1574 if (!h->gendisk[drv_index]) {
1575 printk(KERN_ERR "cciss%d: could not "
1576 "allocate a new disk %d\n",
1577 h->ctlr, drv_index);
1578 return -1;
1579 }
1580 }
1581 h->drv[drv_index].LunID = lunid;
1582
1583 /* Don't need to mark this busy because nobody */
1584 /* else knows about this disk yet to contend */
1585 /* for access to it. */
1586 h->drv[drv_index].busy_configuring = 0;
1587 wmb();
1588 return drv_index;
1589}
1590
1591/* This is for the special case of a controller which
1592 * has no logical drives. In this case, we still need
1593 * to register a disk so the controller can be accessed
1594 * by the Array Config Utility.
1595 */
1596static void cciss_add_controller_node(ctlr_info_t *h)
1597{
1598 struct gendisk *disk;
1599 int drv_index;
1600
1601 if (h->gendisk[0] != NULL) /* already did this? Then bail. */
1602 return;
1603
1604 drv_index = cciss_add_gendisk(h, 0, 1);
1605 if (drv_index == -1) {
1606 printk(KERN_WARNING "cciss%d: could not "
1607 "add disk 0.\n", h->ctlr);
1608 return;
1609 }
1610 h->drv[drv_index].block_size = 512;
1611 h->drv[drv_index].nr_blocks = 0;
1612 h->drv[drv_index].heads = 0;
1613 h->drv[drv_index].sectors = 0;
1614 h->drv[drv_index].cylinders = 0;
1615 h->drv[drv_index].raid_level = -1;
1616 memset(h->drv[drv_index].serial_no, 0, 16);
1617 disk = h->gendisk[drv_index];
1618 cciss_add_disk(h, disk, drv_index);
1619}
1620
1468/* This function will add and remove logical drives from the Logical 1621/* This function will add and remove logical drives from the Logical
1469 * drive array of the controller and maintain persistency of ordering 1622 * drive array of the controller and maintain persistency of ordering
1470 * so that mount points are preserved until the next reboot. This allows 1623 * so that mount points are preserved until the next reboot. This allows
@@ -1472,15 +1625,12 @@ static int cciss_find_free_drive_index(int ctlr)
1472 * without a re-ordering of those drives. 1625 * without a re-ordering of those drives.
1473 * INPUT 1626 * INPUT
1474 * h = The controller to perform the operations on 1627 * h = The controller to perform the operations on
1475 * del_disk = The disk to remove if specified. If the value given
1476 * is NULL then no disk is removed.
1477 */ 1628 */
1478static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk) 1629static int rebuild_lun_table(ctlr_info_t *h, int first_time)
1479{ 1630{
1480 int ctlr = h->ctlr; 1631 int ctlr = h->ctlr;
1481 int num_luns; 1632 int num_luns;
1482 ReportLunData_struct *ld_buff = NULL; 1633 ReportLunData_struct *ld_buff = NULL;
1483 drive_info_struct *drv = NULL;
1484 int return_code; 1634 int return_code;
1485 int listlength = 0; 1635 int listlength = 0;
1486 int i; 1636 int i;
@@ -1489,6 +1639,9 @@ static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk)
1489 __u32 lunid = 0; 1639 __u32 lunid = 0;
1490 unsigned long flags; 1640 unsigned long flags;
1491 1641
1642 if (!capable(CAP_SYS_RAWIO))
1643 return -EPERM;
1644
1492 /* Set busy_configuring flag for this operation */ 1645 /* Set busy_configuring flag for this operation */
1493 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); 1646 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1494 if (h->busy_configuring) { 1647 if (h->busy_configuring) {
@@ -1496,100 +1649,100 @@ static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk)
1496 return -EBUSY; 1649 return -EBUSY;
1497 } 1650 }
1498 h->busy_configuring = 1; 1651 h->busy_configuring = 1;
1652 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1499 1653
1500 /* if del_disk is NULL then we are being called to add a new disk 1654 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
1501 * and update the logical drive table. If it is not NULL then 1655 if (ld_buff == NULL)
1502 * we will check if the disk is in use or not. 1656 goto mem_msg;
1503 */
1504 if (del_disk != NULL) {
1505 drv = get_drv(del_disk);
1506 drv->busy_configuring = 1;
1507 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1508 return_code = deregister_disk(del_disk, drv, 1);
1509 drv->busy_configuring = 0;
1510 h->busy_configuring = 0;
1511 return return_code;
1512 } else {
1513 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1514 if (!capable(CAP_SYS_RAWIO))
1515 return -EPERM;
1516 1657
1517 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL); 1658 return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
1518 if (ld_buff == NULL) 1659 sizeof(ReportLunData_struct), 0,
1519 goto mem_msg; 1660 0, 0, TYPE_CMD);
1520 1661
1521 return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff, 1662 if (return_code == IO_OK)
1522 sizeof(ReportLunData_struct), 0, 1663 listlength = be32_to_cpu(*(__be32 *) ld_buff->LUNListLength);
1523 0, 0, TYPE_CMD); 1664 else { /* reading number of logical volumes failed */
1524 1665 printk(KERN_WARNING "cciss: report logical volume"
1525 if (return_code == IO_OK) { 1666 " command failed\n");
1526 listlength = 1667 listlength = 0;
1527 be32_to_cpu(*(__be32 *) ld_buff->LUNListLength); 1668 goto freeret;
1528 } else { /* reading number of logical volumes failed */ 1669 }
1529 printk(KERN_WARNING "cciss: report logical volume" 1670
1530 " command failed\n"); 1671 num_luns = listlength / 8; /* 8 bytes per entry */
1531 listlength = 0; 1672 if (num_luns > CISS_MAX_LUN) {
1532 goto freeret; 1673 num_luns = CISS_MAX_LUN;
1533 } 1674 printk(KERN_WARNING "cciss: more luns configured"
1675 " on controller than can be handled by"
1676 " this driver.\n");
1677 }
1534 1678
1535 num_luns = listlength / 8; /* 8 bytes per entry */ 1679 if (num_luns == 0)
1536 if (num_luns > CISS_MAX_LUN) { 1680 cciss_add_controller_node(h);
1537 num_luns = CISS_MAX_LUN; 1681
1538 printk(KERN_WARNING "cciss: more luns configured" 1682 /* Compare controller drive array to driver's drive array
1539 " on controller than can be handled by" 1683 * to see if any drives are missing on the controller due
1540 " this driver.\n"); 1684 * to action of Array Config Utility (user deletes drive)
1685 * and deregister logical drives which have disappeared.
1686 */
1687 for (i = 0; i <= h->highest_lun; i++) {
1688 int j;
1689 drv_found = 0;
1690 for (j = 0; j < num_luns; j++) {
1691 memcpy(&lunid, &ld_buff->LUN[j][0], 4);
1692 lunid = le32_to_cpu(lunid);
1693 if (h->drv[i].LunID == lunid) {
1694 drv_found = 1;
1695 break;
1696 }
1697 }
1698 if (!drv_found) {
1699 /* Deregister it from the OS, it's gone. */
1700 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1701 h->drv[i].busy_configuring = 1;
1702 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1703 return_code = deregister_disk(h->gendisk[i],
1704 &h->drv[i], 1);
1705 h->drv[i].busy_configuring = 0;
1541 } 1706 }
1707 }
1708
1709 /* Compare controller drive array to driver's drive array.
1710 * Check for updates in the drive information and any new drives
1711 * on the controller due to ACU adding logical drives, or changing
1712 * a logical drive's size, etc. Reregister any new/changed drives
1713 */
1714 for (i = 0; i < num_luns; i++) {
1715 int j;
1716
1717 drv_found = 0;
1718
1719 memcpy(&lunid, &ld_buff->LUN[i][0], 4);
1720 lunid = le32_to_cpu(lunid);
1542 1721
1543 /* Compare controller drive array to drivers drive array. 1722 /* Find if the LUN is already in the drive array
1544 * Check for updates in the drive information and any new drives 1723 * of the driver. If so then update its info
1545 * on the controller. 1724 * if not in use. If it does not exist then find
1725 * the first free index and add it.
1546 */ 1726 */
1547 for (i = 0; i < num_luns; i++) { 1727 for (j = 0; j <= h->highest_lun; j++) {
1548 int j; 1728 if (h->drv[j].raid_level != -1 &&
1549 1729 h->drv[j].LunID == lunid) {
1550 drv_found = 0; 1730 drv_index = j;
1551 1731 drv_found = 1;
1552 lunid = (0xff & 1732 break;
1553 (unsigned int)(ld_buff->LUN[i][3])) << 24;
1554 lunid |= (0xff &
1555 (unsigned int)(ld_buff->LUN[i][2])) << 16;
1556 lunid |= (0xff &
1557 (unsigned int)(ld_buff->LUN[i][1])) << 8;
1558 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
1559
1560 /* Find if the LUN is already in the drive array
1561 * of the controller. If so then update its info
1562 * if not is use. If it does not exist then find
1563 * the first free index and add it.
1564 */
1565 for (j = 0; j <= h->highest_lun; j++) {
1566 if (h->drv[j].LunID == lunid) {
1567 drv_index = j;
1568 drv_found = 1;
1569 }
1570 } 1733 }
1734 }
1571 1735
1572 /* check if the drive was found already in the array */ 1736 /* check if the drive was found already in the array */
1573 if (!drv_found) { 1737 if (!drv_found) {
1574 drv_index = cciss_find_free_drive_index(ctlr); 1738 drv_index = cciss_add_gendisk(h, lunid, 0);
1575 if (drv_index == -1) 1739 if (drv_index == -1)
1576 goto freeret; 1740 goto freeret;
1577 1741 }
1578 /*Check if the gendisk needs to be allocated */ 1742 cciss_update_drive_info(ctlr, drv_index, first_time);
1579 if (!h->gendisk[drv_index]){ 1743 } /* end for */
1580 h->gendisk[drv_index] = alloc_disk(1 << NWD_SHIFT);
1581 if (!h->gendisk[drv_index]){
1582 printk(KERN_ERR "cciss: could not allocate new disk %d\n", drv_index);
1583 goto mem_msg;
1584 }
1585 }
1586 }
1587 h->drv[drv_index].LunID = lunid;
1588 cciss_update_drive_info(ctlr, drv_index);
1589 } /* end for */
1590 } /* end else */
1591 1744
1592 freeret: 1745freeret:
1593 kfree(ld_buff); 1746 kfree(ld_buff);
1594 h->busy_configuring = 0; 1747 h->busy_configuring = 0;
1595 /* We return -1 here to tell the ACU that we have registered/updated 1748 /* We return -1 here to tell the ACU that we have registered/updated
@@ -1597,8 +1750,9 @@ static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk)
1597 * additional times. 1750 * additional times.
1598 */ 1751 */
1599 return -1; 1752 return -1;
1600 mem_msg: 1753mem_msg:
1601 printk(KERN_ERR "cciss: out of memory\n"); 1754 printk(KERN_ERR "cciss: out of memory\n");
1755 h->busy_configuring = 0;
1602 goto freeret; 1756 goto freeret;
1603} 1757}
1604 1758
@@ -1654,15 +1808,15 @@ static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
1654 * other than disk 0 we will call put_disk. We do not 1808 * other than disk 0 we will call put_disk. We do not
1655 * do this for disk 0 as we need it to be able to 1809 * do this for disk 0 as we need it to be able to
1656 * configure the controller. 1810 * configure the controller.
1657 */ 1811 */
1658 if (clear_all){ 1812 if (clear_all){
1659 /* This isn't pretty, but we need to find the 1813 /* This isn't pretty, but we need to find the
1660 * disk in our array and NULL our the pointer. 1814 * disk in our array and NULL our the pointer.
1661 * This is so that we will call alloc_disk if 1815 * This is so that we will call alloc_disk if
1662 * this index is used again later. 1816 * this index is used again later.
1663 */ 1817 */
1664 for (i=0; i < CISS_MAX_LUN; i++){ 1818 for (i=0; i < CISS_MAX_LUN; i++){
1665 if(h->gendisk[i] == disk){ 1819 if (h->gendisk[i] == disk) {
1666 h->gendisk[i] = NULL; 1820 h->gendisk[i] = NULL;
1667 break; 1821 break;
1668 } 1822 }
@@ -1690,7 +1844,7 @@ static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
1690 if (drv == h->drv + h->highest_lun) { 1844 if (drv == h->drv + h->highest_lun) {
1691 /* if so, find the new hightest lun */ 1845 /* if so, find the new hightest lun */
1692 int i, newhighest = -1; 1846 int i, newhighest = -1;
1693 for (i = 0; i < h->highest_lun; i++) { 1847 for (i = 0; i <= h->highest_lun; i++) {
1694 /* if the disk has size > 0, it is available */ 1848 /* if the disk has size > 0, it is available */
1695 if (h->drv[i].heads) 1849 if (h->drv[i].heads)
1696 newhighest = i; 1850 newhighest = i;
@@ -3201,136 +3355,9 @@ err_out_free_res:
3201 return err; 3355 return err;
3202} 3356}
3203 3357
3204/* 3358/* Function to find the first free pointer into our hba[] array
3205 * Gets information about the local volumes attached to the controller. 3359 * Returns -1 if no free entries are left.
3206 */ 3360 */
3207static void cciss_getgeometry(int cntl_num)
3208{
3209 ReportLunData_struct *ld_buff;
3210 InquiryData_struct *inq_buff;
3211 int return_code;
3212 int i;
3213 int listlength = 0;
3214 __u32 lunid = 0;
3215 unsigned block_size;
3216 sector_t total_size;
3217
3218 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
3219 if (ld_buff == NULL) {
3220 printk(KERN_ERR "cciss: out of memory\n");
3221 return;
3222 }
3223 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
3224 if (inq_buff == NULL) {
3225 printk(KERN_ERR "cciss: out of memory\n");
3226 kfree(ld_buff);
3227 return;
3228 }
3229 /* Get the firmware version */
3230 return_code = sendcmd(CISS_INQUIRY, cntl_num, inq_buff,
3231 sizeof(InquiryData_struct), 0, 0, 0, NULL,
3232 TYPE_CMD);
3233 if (return_code == IO_OK) {
3234 hba[cntl_num]->firm_ver[0] = inq_buff->data_byte[32];
3235 hba[cntl_num]->firm_ver[1] = inq_buff->data_byte[33];
3236 hba[cntl_num]->firm_ver[2] = inq_buff->data_byte[34];
3237 hba[cntl_num]->firm_ver[3] = inq_buff->data_byte[35];
3238 } else { /* send command failed */
3239
3240 printk(KERN_WARNING "cciss: unable to determine firmware"
3241 " version of controller\n");
3242 }
3243 /* Get the number of logical volumes */
3244 return_code = sendcmd(CISS_REPORT_LOG, cntl_num, ld_buff,
3245 sizeof(ReportLunData_struct), 0, 0, 0, NULL,
3246 TYPE_CMD);
3247
3248 if (return_code == IO_OK) {
3249#ifdef CCISS_DEBUG
3250 printk("LUN Data\n--------------------------\n");
3251#endif /* CCISS_DEBUG */
3252
3253 listlength |=
3254 (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
3255 listlength |=
3256 (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
3257 listlength |=
3258 (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
3259 listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
3260 } else { /* reading number of logical volumes failed */
3261
3262 printk(KERN_WARNING "cciss: report logical volume"
3263 " command failed\n");
3264 listlength = 0;
3265 }
3266 hba[cntl_num]->num_luns = listlength / 8; // 8 bytes pre entry
3267 if (hba[cntl_num]->num_luns > CISS_MAX_LUN) {
3268 printk(KERN_ERR
3269 "ciss: only %d number of logical volumes supported\n",
3270 CISS_MAX_LUN);
3271 hba[cntl_num]->num_luns = CISS_MAX_LUN;
3272 }
3273#ifdef CCISS_DEBUG
3274 printk(KERN_DEBUG "Length = %x %x %x %x = %d\n",
3275 ld_buff->LUNListLength[0], ld_buff->LUNListLength[1],
3276 ld_buff->LUNListLength[2], ld_buff->LUNListLength[3],
3277 hba[cntl_num]->num_luns);
3278#endif /* CCISS_DEBUG */
3279
3280 hba[cntl_num]->highest_lun = hba[cntl_num]->num_luns - 1;
3281 for (i = 0; i < CISS_MAX_LUN; i++) {
3282 if (i < hba[cntl_num]->num_luns) {
3283 lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3]))
3284 << 24;
3285 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2]))
3286 << 16;
3287 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1]))
3288 << 8;
3289 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
3290
3291 hba[cntl_num]->drv[i].LunID = lunid;
3292
3293#ifdef CCISS_DEBUG
3294 printk(KERN_DEBUG "LUN[%d]: %x %x %x %x = %x\n", i,
3295 ld_buff->LUN[i][0], ld_buff->LUN[i][1],
3296 ld_buff->LUN[i][2], ld_buff->LUN[i][3],
3297 hba[cntl_num]->drv[i].LunID);
3298#endif /* CCISS_DEBUG */
3299
3300 /* testing to see if 16-byte CDBs are already being used */
3301 if(hba[cntl_num]->cciss_read == CCISS_READ_16) {
3302 cciss_read_capacity_16(cntl_num, i, 0,
3303 &total_size, &block_size);
3304 goto geo_inq;
3305 }
3306 cciss_read_capacity(cntl_num, i, 0, &total_size, &block_size);
3307
3308 /* If read_capacity returns all F's the logical is >2TB */
3309 /* so we switch to 16-byte CDBs for all read/write ops */
3310 if(total_size == 0xFFFFFFFFULL) {
3311 cciss_read_capacity_16(cntl_num, i, 0,
3312 &total_size, &block_size);
3313 hba[cntl_num]->cciss_read = CCISS_READ_16;
3314 hba[cntl_num]->cciss_write = CCISS_WRITE_16;
3315 } else {
3316 hba[cntl_num]->cciss_read = CCISS_READ_10;
3317 hba[cntl_num]->cciss_write = CCISS_WRITE_10;
3318 }
3319geo_inq:
3320 cciss_geometry_inquiry(cntl_num, i, 0, total_size,
3321 block_size, inq_buff,
3322 &hba[cntl_num]->drv[i]);
3323 } else {
3324 /* initialize raid_level to indicate a free space */
3325 hba[cntl_num]->drv[i].raid_level = -1;
3326 }
3327 }
3328 kfree(ld_buff);
3329 kfree(inq_buff);
3330}
3331
3332/* Function to find the first free pointer into our hba[] array */
3333/* Returns -1 if no free entries are left. */
3334static int alloc_cciss_hba(void) 3361static int alloc_cciss_hba(void)
3335{ 3362{
3336 int i; 3363 int i;
@@ -3342,11 +3369,6 @@ static int alloc_cciss_hba(void)
3342 p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL); 3369 p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL);
3343 if (!p) 3370 if (!p)
3344 goto Enomem; 3371 goto Enomem;
3345 p->gendisk[0] = alloc_disk(1 << NWD_SHIFT);
3346 if (!p->gendisk[0]) {
3347 kfree(p);
3348 goto Enomem;
3349 }
3350 hba[i] = p; 3372 hba[i] = p;
3351 return i; 3373 return i;
3352 } 3374 }
@@ -3474,11 +3496,13 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
3474 ((hba[i]->nr_cmds + BITS_PER_LONG - 3496 ((hba[i]->nr_cmds + BITS_PER_LONG -
3475 1) / BITS_PER_LONG) * sizeof(unsigned long)); 3497 1) / BITS_PER_LONG) * sizeof(unsigned long));
3476 3498
3477#ifdef CCISS_DEBUG 3499 hba[i]->num_luns = 0;
3478 printk(KERN_DEBUG "Scanning for drives on controller cciss%d\n", i); 3500 hba[i]->highest_lun = -1;
3479#endif /* CCISS_DEBUG */ 3501 for (j = 0; j < CISS_MAX_LUN; j++) {
3480 3502 hba[i]->drv[j].raid_level = -1;
3481 cciss_getgeometry(i); 3503 hba[i]->drv[j].queue = NULL;
3504 hba[i]->gendisk[j] = NULL;
3505 }
3482 3506
3483 cciss_scsi_setup(i); 3507 cciss_scsi_setup(i);
3484 3508
@@ -3491,76 +3515,10 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
3491 3515
3492 hba[i]->busy_initializing = 0; 3516 hba[i]->busy_initializing = 0;
3493 3517
3494 do { 3518 rebuild_lun_table(hba[i], 1);
3495 drive_info_struct *drv = &(hba[i]->drv[j]);
3496 struct gendisk *disk = hba[i]->gendisk[j];
3497 struct request_queue *q;
3498
3499 /* Check if the disk was allocated already */
3500 if (!disk){
3501 hba[i]->gendisk[j] = alloc_disk(1 << NWD_SHIFT);
3502 disk = hba[i]->gendisk[j];
3503 }
3504
3505 /* Check that the disk was able to be allocated */
3506 if (!disk) {
3507 printk(KERN_ERR "cciss: unable to allocate memory for disk %d\n", j);
3508 goto clean4;
3509 }
3510
3511 q = blk_init_queue(do_cciss_request, &hba[i]->lock);
3512 if (!q) {
3513 printk(KERN_ERR
3514 "cciss: unable to allocate queue for disk %d\n",
3515 j);
3516 goto clean4;
3517 }
3518 drv->queue = q;
3519
3520 blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
3521
3522 /* This is a hardware imposed limit. */
3523 blk_queue_max_hw_segments(q, MAXSGENTRIES);
3524
3525 /* This is a limit in the driver and could be eliminated. */
3526 blk_queue_max_phys_segments(q, MAXSGENTRIES);
3527
3528 blk_queue_max_sectors(q, hba[i]->cciss_max_sectors);
3529
3530 blk_queue_softirq_done(q, cciss_softirq_done);
3531
3532 q->queuedata = hba[i];
3533 sprintf(disk->disk_name, "cciss/c%dd%d", i, j);
3534 disk->major = hba[i]->major;
3535 disk->first_minor = j << NWD_SHIFT;
3536 disk->fops = &cciss_fops;
3537 disk->queue = q;
3538 disk->private_data = drv;
3539 disk->driverfs_dev = &pdev->dev;
3540 /* we must register the controller even if no disks exist */
3541 /* this is for the online array utilities */
3542 if (!drv->heads && j)
3543 continue;
3544 blk_queue_hardsect_size(q, drv->block_size);
3545 set_capacity(disk, drv->nr_blocks);
3546 j++;
3547 } while (j <= hba[i]->highest_lun);
3548
3549 /* Make sure all queue data is written out before */
3550 /* interrupt handler, triggered by add_disk, */
3551 /* is allowed to start them. */
3552 wmb();
3553
3554 for (j = 0; j <= hba[i]->highest_lun; j++)
3555 add_disk(hba[i]->gendisk[j]);
3556
3557 /* we must register the controller even if no disks exist */
3558 if (hba[i]->highest_lun == -1)
3559 add_disk(hba[i]->gendisk[0]);
3560
3561 return 1; 3519 return 1;
3562 3520
3563 clean4: 3521clean4:
3564#ifdef CONFIG_CISS_SCSI_TAPE 3522#ifdef CONFIG_CISS_SCSI_TAPE
3565 kfree(hba[i]->scsi_rejects.complete); 3523 kfree(hba[i]->scsi_rejects.complete);
3566#endif 3524#endif
@@ -3575,9 +3533,9 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
3575 hba[i]->errinfo_pool, 3533 hba[i]->errinfo_pool,
3576 hba[i]->errinfo_pool_dhandle); 3534 hba[i]->errinfo_pool_dhandle);
3577 free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]); 3535 free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]);
3578 clean2: 3536clean2:
3579 unregister_blkdev(hba[i]->major, hba[i]->devname); 3537 unregister_blkdev(hba[i]->major, hba[i]->devname);
3580 clean1: 3538clean1:
3581 hba[i]->busy_initializing = 0; 3539 hba[i]->busy_initializing = 0;
3582 /* cleanup any queues that may have been initialized */ 3540 /* cleanup any queues that may have been initialized */
3583 for (j=0; j <= hba[i]->highest_lun; j++){ 3541 for (j=0; j <= hba[i]->highest_lun; j++){
@@ -3656,7 +3614,9 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev)
3656 } 3614 }
3657 } 3615 }
3658 3616
3617#ifdef CONFIG_CISS_SCSI_TAPE
3659 cciss_unregister_scsi(i); /* unhook from SCSI subsystem */ 3618 cciss_unregister_scsi(i); /* unhook from SCSI subsystem */
3619#endif
3660 3620
3661 cciss_shutdown(pdev); 3621 cciss_shutdown(pdev);
3662 3622
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
index b70988dd33ec..24a7efa993ab 100644
--- a/drivers/block/cciss.h
+++ b/drivers/block/cciss.h
@@ -39,6 +39,8 @@ typedef struct _drive_info_struct
39 *to prevent it from being opened or it's queue 39 *to prevent it from being opened or it's queue
40 *from being started. 40 *from being started.
41 */ 41 */
42 __u8 serial_no[16]; /* from inquiry page 0x83, */
43 /* not necc. null terminated. */
42} drive_info_struct; 44} drive_info_struct;
43 45
44#ifdef CONFIG_CISS_SCSI_TAPE 46#ifdef CONFIG_CISS_SCSI_TAPE
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index e4bf9a11ca0d..e1233aabda77 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -358,23 +358,68 @@ find_bus_target_lun(int ctlr, int *bus, int *target, int *lun)
358 } 358 }
359 return (!found); 359 return (!found);
360} 360}
361struct scsi2map {
362 char scsi3addr[8];
363 int bus, target, lun;
364};
361 365
362static int 366static int
363cciss_scsi_add_entry(int ctlr, int hostno, 367cciss_scsi_add_entry(int ctlr, int hostno,
364 unsigned char *scsi3addr, int devtype) 368 unsigned char *scsi3addr, int devtype,
369 struct scsi2map *added, int *nadded)
365{ 370{
366 /* assumes hba[ctlr]->scsi_ctlr->lock is held */ 371 /* assumes hba[ctlr]->scsi_ctlr->lock is held */
367 int n = ccissscsi[ctlr].ndevices; 372 int n = ccissscsi[ctlr].ndevices;
368 struct cciss_scsi_dev_t *sd; 373 struct cciss_scsi_dev_t *sd;
374 int i, bus, target, lun;
375 unsigned char addr1[8], addr2[8];
369 376
370 if (n >= CCISS_MAX_SCSI_DEVS_PER_HBA) { 377 if (n >= CCISS_MAX_SCSI_DEVS_PER_HBA) {
371 printk("cciss%d: Too many devices, " 378 printk("cciss%d: Too many devices, "
372 "some will be inaccessible.\n", ctlr); 379 "some will be inaccessible.\n", ctlr);
373 return -1; 380 return -1;
374 } 381 }
382
383 bus = target = -1;
384 lun = 0;
385 /* Is this device a non-zero lun of a multi-lun device */
386 /* byte 4 of the 8-byte LUN addr will contain the logical unit no. */
387 if (scsi3addr[4] != 0) {
388 /* Search through our list and find the device which */
389 /* has the same 8 byte LUN address, excepting byte 4. */
390 /* Assign the same bus and target for this new LUN. */
391 /* Use the logical unit number from the firmware. */
392 memcpy(addr1, scsi3addr, 8);
393 addr1[4] = 0;
394 for (i = 0; i < n; i++) {
395 sd = &ccissscsi[ctlr].dev[i];
396 memcpy(addr2, sd->scsi3addr, 8);
397 addr2[4] = 0;
398 /* differ only in byte 4? */
399 if (memcmp(addr1, addr2, 8) == 0) {
400 bus = sd->bus;
401 target = sd->target;
402 lun = scsi3addr[4];
403 break;
404 }
405 }
406 }
407
375 sd = &ccissscsi[ctlr].dev[n]; 408 sd = &ccissscsi[ctlr].dev[n];
376 if (find_bus_target_lun(ctlr, &sd->bus, &sd->target, &sd->lun) != 0) 409 if (lun == 0) {
377 return -1; 410 if (find_bus_target_lun(ctlr,
411 &sd->bus, &sd->target, &sd->lun) != 0)
412 return -1;
413 } else {
414 sd->bus = bus;
415 sd->target = target;
416 sd->lun = lun;
417 }
418 added[*nadded].bus = sd->bus;
419 added[*nadded].target = sd->target;
420 added[*nadded].lun = sd->lun;
421 (*nadded)++;
422
378 memcpy(&sd->scsi3addr[0], scsi3addr, 8); 423 memcpy(&sd->scsi3addr[0], scsi3addr, 8);
379 sd->devtype = devtype; 424 sd->devtype = devtype;
380 ccissscsi[ctlr].ndevices++; 425 ccissscsi[ctlr].ndevices++;
@@ -390,7 +435,8 @@ cciss_scsi_add_entry(int ctlr, int hostno,
390} 435}
391 436
392static void 437static void
393cciss_scsi_remove_entry(int ctlr, int hostno, int entry) 438cciss_scsi_remove_entry(int ctlr, int hostno, int entry,
439 struct scsi2map *removed, int *nremoved)
394{ 440{
395 /* assumes hba[ctlr]->scsi_ctlr->lock is held */ 441 /* assumes hba[ctlr]->scsi_ctlr->lock is held */
396 int i; 442 int i;
@@ -398,6 +444,10 @@ cciss_scsi_remove_entry(int ctlr, int hostno, int entry)
398 444
399 if (entry < 0 || entry >= CCISS_MAX_SCSI_DEVS_PER_HBA) return; 445 if (entry < 0 || entry >= CCISS_MAX_SCSI_DEVS_PER_HBA) return;
400 sd = ccissscsi[ctlr].dev[entry]; 446 sd = ccissscsi[ctlr].dev[entry];
447 removed[*nremoved].bus = sd.bus;
448 removed[*nremoved].target = sd.target;
449 removed[*nremoved].lun = sd.lun;
450 (*nremoved)++;
401 for (i=entry;i<ccissscsi[ctlr].ndevices-1;i++) 451 for (i=entry;i<ccissscsi[ctlr].ndevices-1;i++)
402 ccissscsi[ctlr].dev[i] = ccissscsi[ctlr].dev[i+1]; 452 ccissscsi[ctlr].dev[i] = ccissscsi[ctlr].dev[i+1];
403 ccissscsi[ctlr].ndevices--; 453 ccissscsi[ctlr].ndevices--;
@@ -417,6 +467,26 @@ cciss_scsi_remove_entry(int ctlr, int hostno, int entry)
417 (a)[1] == (b)[1] && \ 467 (a)[1] == (b)[1] && \
418 (a)[0] == (b)[0]) 468 (a)[0] == (b)[0])
419 469
470static void fixup_botched_add(int ctlr, char *scsi3addr)
471{
472 /* called when scsi_add_device fails in order to re-adjust */
473 /* ccissscsi[] to match the mid layer's view. */
474 unsigned long flags;
475 int i, j;
476 CPQ_TAPE_LOCK(ctlr, flags);
477 for (i = 0; i < ccissscsi[ctlr].ndevices; i++) {
478 if (memcmp(scsi3addr,
479 ccissscsi[ctlr].dev[i].scsi3addr, 8) == 0) {
480 for (j = i; j < ccissscsi[ctlr].ndevices-1; j++)
481 ccissscsi[ctlr].dev[j] =
482 ccissscsi[ctlr].dev[j+1];
483 ccissscsi[ctlr].ndevices--;
484 break;
485 }
486 }
487 CPQ_TAPE_UNLOCK(ctlr, flags);
488}
489
420static int 490static int
421adjust_cciss_scsi_table(int ctlr, int hostno, 491adjust_cciss_scsi_table(int ctlr, int hostno,
422 struct cciss_scsi_dev_t sd[], int nsds) 492 struct cciss_scsi_dev_t sd[], int nsds)
@@ -429,13 +499,33 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
429 int i,j, found, changes=0; 499 int i,j, found, changes=0;
430 struct cciss_scsi_dev_t *csd; 500 struct cciss_scsi_dev_t *csd;
431 unsigned long flags; 501 unsigned long flags;
502 struct scsi2map *added, *removed;
503 int nadded, nremoved;
504 struct Scsi_Host *sh = NULL;
505
506 added = kzalloc(sizeof(*added) * CCISS_MAX_SCSI_DEVS_PER_HBA,
507 GFP_KERNEL);
508 removed = kzalloc(sizeof(*removed) * CCISS_MAX_SCSI_DEVS_PER_HBA,
509 GFP_KERNEL);
510
511 if (!added || !removed) {
512 printk(KERN_WARNING "cciss%d: Out of memory in "
513 "adjust_cciss_scsi_table\n", ctlr);
514 goto free_and_out;
515 }
432 516
433 CPQ_TAPE_LOCK(ctlr, flags); 517 CPQ_TAPE_LOCK(ctlr, flags);
434 518
519 if (hostno != -1) /* if it's not the first time... */
520 sh = ((struct cciss_scsi_adapter_data_t *)
521 hba[ctlr]->scsi_ctlr)->scsi_host;
522
435 /* find any devices in ccissscsi[] that are not in 523 /* find any devices in ccissscsi[] that are not in
436 sd[] and remove them from ccissscsi[] */ 524 sd[] and remove them from ccissscsi[] */
437 525
438 i = 0; 526 i = 0;
527 nremoved = 0;
528 nadded = 0;
439 while(i<ccissscsi[ctlr].ndevices) { 529 while(i<ccissscsi[ctlr].ndevices) {
440 csd = &ccissscsi[ctlr].dev[i]; 530 csd = &ccissscsi[ctlr].dev[i];
441 found=0; 531 found=0;
@@ -455,8 +545,9 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
455 /* printk("cciss%d: %s device c%db%dt%dl%d removed.\n", 545 /* printk("cciss%d: %s device c%db%dt%dl%d removed.\n",
456 ctlr, scsi_device_type(csd->devtype), hostno, 546 ctlr, scsi_device_type(csd->devtype), hostno,
457 csd->bus, csd->target, csd->lun); */ 547 csd->bus, csd->target, csd->lun); */
458 cciss_scsi_remove_entry(ctlr, hostno, i); 548 cciss_scsi_remove_entry(ctlr, hostno, i,
459 /* note, i not incremented */ 549 removed, &nremoved);
550 /* remove ^^^, hence i not incremented */
460 } 551 }
461 else if (found == 1) { /* device is different kind */ 552 else if (found == 1) { /* device is different kind */
462 changes++; 553 changes++;
@@ -464,8 +555,15 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
464 "(device type now %s).\n", 555 "(device type now %s).\n",
465 ctlr, hostno, csd->bus, csd->target, csd->lun, 556 ctlr, hostno, csd->bus, csd->target, csd->lun,
466 scsi_device_type(csd->devtype)); 557 scsi_device_type(csd->devtype));
558 cciss_scsi_remove_entry(ctlr, hostno, i,
559 removed, &nremoved);
560 /* remove ^^^, hence i not incremented */
561 if (cciss_scsi_add_entry(ctlr, hostno,
562 &sd[j].scsi3addr[0], sd[j].devtype,
563 added, &nadded) != 0)
564 /* we just removed one, so add can't fail. */
565 BUG();
467 csd->devtype = sd[j].devtype; 566 csd->devtype = sd[j].devtype;
468 i++; /* so just move along. */
469 } else /* device is same as it ever was, */ 567 } else /* device is same as it ever was, */
470 i++; /* so just move along. */ 568 i++; /* so just move along. */
471 } 569 }
@@ -489,7 +587,9 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
489 if (!found) { 587 if (!found) {
490 changes++; 588 changes++;
491 if (cciss_scsi_add_entry(ctlr, hostno, 589 if (cciss_scsi_add_entry(ctlr, hostno,
492 &sd[i].scsi3addr[0], sd[i].devtype) != 0) 590
591 &sd[i].scsi3addr[0], sd[i].devtype,
592 added, &nadded) != 0)
493 break; 593 break;
494 } else if (found == 1) { 594 } else if (found == 1) {
495 /* should never happen... */ 595 /* should never happen... */
@@ -501,9 +601,50 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
501 } 601 }
502 CPQ_TAPE_UNLOCK(ctlr, flags); 602 CPQ_TAPE_UNLOCK(ctlr, flags);
503 603
504 if (!changes) 604 /* Don't notify scsi mid layer of any changes the first time through */
505 printk("cciss%d: No device changes detected.\n", ctlr); 605 /* (or if there are no changes) scsi_scan_host will do it later the */
606 /* first time through. */
607 if (hostno == -1 || !changes)
608 goto free_and_out;
609
610 /* Notify scsi mid layer of any removed devices */
611 for (i = 0; i < nremoved; i++) {
612 struct scsi_device *sdev =
613 scsi_device_lookup(sh, removed[i].bus,
614 removed[i].target, removed[i].lun);
615 if (sdev != NULL) {
616 scsi_remove_device(sdev);
617 scsi_device_put(sdev);
618 } else {
619 /* We don't expect to get here. */
620 /* future cmds to this device will get selection */
621 /* timeout as if the device was gone. */
622 printk(KERN_WARNING "cciss%d: didn't find "
623 "c%db%dt%dl%d\n for removal.",
624 ctlr, hostno, removed[i].bus,
625 removed[i].target, removed[i].lun);
626 }
627 }
628
629 /* Notify scsi mid layer of any added devices */
630 for (i = 0; i < nadded; i++) {
631 int rc;
632 rc = scsi_add_device(sh, added[i].bus,
633 added[i].target, added[i].lun);
634 if (rc == 0)
635 continue;
636 printk(KERN_WARNING "cciss%d: scsi_add_device "
637 "c%db%dt%dl%d failed, device not added.\n",
638 ctlr, hostno,
639 added[i].bus, added[i].target, added[i].lun);
640 /* now we have to remove it from ccissscsi, */
641 /* since it didn't get added to scsi mid layer */
642 fixup_botched_add(ctlr, added[i].scsi3addr);
643 }
506 644
645free_and_out:
646 kfree(added);
647 kfree(removed);
507 return 0; 648 return 0;
508} 649}
509 650
@@ -1355,32 +1496,6 @@ cciss_unregister_scsi(int ctlr)
1355} 1496}
1356 1497
1357static int 1498static int
1358cciss_register_scsi(int ctlr)
1359{
1360 unsigned long flags;
1361
1362 CPQ_TAPE_LOCK(ctlr, flags);
1363
1364 /* Since this is really a block driver, the SCSI core may not be
1365 initialized at init time, in which case, calling scsi_register_host
1366 would hang. Instead, we do it later, via /proc filesystem
1367 and rc scripts, when we know SCSI core is good to go. */
1368
1369 /* Only register if SCSI devices are detected. */
1370 if (ccissscsi[ctlr].ndevices != 0) {
1371 ((struct cciss_scsi_adapter_data_t *)
1372 hba[ctlr]->scsi_ctlr)->registered = 1;
1373 CPQ_TAPE_UNLOCK(ctlr, flags);
1374 return cciss_scsi_detect(ctlr);
1375 }
1376 CPQ_TAPE_UNLOCK(ctlr, flags);
1377 printk(KERN_INFO
1378 "cciss%d: No appropriate SCSI device detected, "
1379 "SCSI subsystem not engaged.\n", ctlr);
1380 return 0;
1381}
1382
1383static int
1384cciss_engage_scsi(int ctlr) 1499cciss_engage_scsi(int ctlr)
1385{ 1500{
1386 struct cciss_scsi_adapter_data_t *sa; 1501 struct cciss_scsi_adapter_data_t *sa;
@@ -1391,15 +1506,15 @@ cciss_engage_scsi(int ctlr)
1391 sa = (struct cciss_scsi_adapter_data_t *) hba[ctlr]->scsi_ctlr; 1506 sa = (struct cciss_scsi_adapter_data_t *) hba[ctlr]->scsi_ctlr;
1392 stk = &sa->cmd_stack; 1507 stk = &sa->cmd_stack;
1393 1508
1394 if (((struct cciss_scsi_adapter_data_t *) 1509 if (sa->registered) {
1395 hba[ctlr]->scsi_ctlr)->registered) {
1396 printk("cciss%d: SCSI subsystem already engaged.\n", ctlr); 1510 printk("cciss%d: SCSI subsystem already engaged.\n", ctlr);
1397 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); 1511 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1398 return ENXIO; 1512 return ENXIO;
1399 } 1513 }
1514 sa->registered = 1;
1400 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); 1515 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1401 cciss_update_non_disk_devices(ctlr, -1); 1516 cciss_update_non_disk_devices(ctlr, -1);
1402 cciss_register_scsi(ctlr); 1517 cciss_scsi_detect(ctlr);
1403 return 0; 1518 return 0;
1404} 1519}
1405 1520
@@ -1493,7 +1608,5 @@ static int cciss_eh_abort_handler(struct scsi_cmnd *scsicmd)
1493/* If no tape support, then these become defined out of existence */ 1608/* If no tape support, then these become defined out of existence */
1494 1609
1495#define cciss_scsi_setup(cntl_num) 1610#define cciss_scsi_setup(cntl_num)
1496#define cciss_unregister_scsi(ctlr)
1497#define cciss_register_scsi(ctlr)
1498 1611
1499#endif /* CONFIG_CISS_SCSI_TAPE */ 1612#endif /* CONFIG_CISS_SCSI_TAPE */
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 9ae05c584234..3ca643cafccd 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -154,8 +154,8 @@ static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
154 return 0; 154 return 0;
155} 155}
156 156
157int blkif_ioctl(struct inode *inode, struct file *filep, 157static int blkif_ioctl(struct inode *inode, struct file *filep,
158 unsigned command, unsigned long argument) 158 unsigned command, unsigned long argument)
159{ 159{
160 struct blkfront_info *info = 160 struct blkfront_info *info =
161 inode->i_bdev->bd_disk->private_data; 161 inode->i_bdev->bd_disk->private_data;
diff --git a/drivers/bluetooth/bcm203x.c b/drivers/bluetooth/bcm203x.c
index 8919ccf8274b..ee40201c7278 100644
--- a/drivers/bluetooth/bcm203x.c
+++ b/drivers/bluetooth/bcm203x.c
@@ -42,9 +42,7 @@
42#define BT_DBG(D...) 42#define BT_DBG(D...)
43#endif 43#endif
44 44
45#define VERSION "1.1" 45#define VERSION "1.2"
46
47static int ignore = 0;
48 46
49static struct usb_device_id bcm203x_table[] = { 47static struct usb_device_id bcm203x_table[] = {
50 /* Broadcom Blutonium (BCM2033) */ 48 /* Broadcom Blutonium (BCM2033) */
@@ -175,7 +173,7 @@ static int bcm203x_probe(struct usb_interface *intf, const struct usb_device_id
175 173
176 BT_DBG("intf %p id %p", intf, id); 174 BT_DBG("intf %p id %p", intf, id);
177 175
178 if (ignore || (intf->cur_altsetting->desc.bInterfaceNumber != 0)) 176 if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
179 return -ENODEV; 177 return -ENODEV;
180 178
181 data = kzalloc(sizeof(*data), GFP_KERNEL); 179 data = kzalloc(sizeof(*data), GFP_KERNEL);
@@ -300,9 +298,6 @@ static void __exit bcm203x_exit(void)
300module_init(bcm203x_init); 298module_init(bcm203x_init);
301module_exit(bcm203x_exit); 299module_exit(bcm203x_exit);
302 300
303module_param(ignore, bool, 0644);
304MODULE_PARM_DESC(ignore, "Ignore devices from the matching table");
305
306MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); 301MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
307MODULE_DESCRIPTION("Broadcom Blutonium firmware driver ver " VERSION); 302MODULE_DESCRIPTION("Broadcom Blutonium firmware driver ver " VERSION);
308MODULE_VERSION(VERSION); 303MODULE_VERSION(VERSION);
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c
index 0c211adbc063..90a094634630 100644
--- a/drivers/bluetooth/bfusb.c
+++ b/drivers/bluetooth/bfusb.c
@@ -43,9 +43,7 @@
43#define BT_DBG(D...) 43#define BT_DBG(D...)
44#endif 44#endif
45 45
46#define VERSION "1.1" 46#define VERSION "1.2"
47
48static int ignore = 0;
49 47
50static struct usb_driver bfusb_driver; 48static struct usb_driver bfusb_driver;
51 49
@@ -656,9 +654,6 @@ static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *i
656 654
657 BT_DBG("intf %p id %p", intf, id); 655 BT_DBG("intf %p id %p", intf, id);
658 656
659 if (ignore)
660 return -ENODEV;
661
662 /* Check number of endpoints */ 657 /* Check number of endpoints */
663 if (intf->cur_altsetting->desc.bNumEndpoints < 2) 658 if (intf->cur_altsetting->desc.bNumEndpoints < 2)
664 return -EIO; 659 return -EIO;
@@ -795,9 +790,6 @@ static void __exit bfusb_exit(void)
795module_init(bfusb_init); 790module_init(bfusb_init);
796module_exit(bfusb_exit); 791module_exit(bfusb_exit);
797 792
798module_param(ignore, bool, 0644);
799MODULE_PARM_DESC(ignore, "Ignore devices from the matching table");
800
801MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); 793MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
802MODULE_DESCRIPTION("BlueFRITZ! USB driver ver " VERSION); 794MODULE_DESCRIPTION("BlueFRITZ! USB driver ver " VERSION);
803MODULE_VERSION(VERSION); 795MODULE_VERSION(VERSION);
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index 3b28658f5a1f..1e55a658e6ce 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -40,9 +40,7 @@
40#define BT_DBG(D...) 40#define BT_DBG(D...)
41#endif 41#endif
42 42
43#define VERSION "0.9" 43#define VERSION "0.10"
44
45static int ignore = 0;
46 44
47static struct usb_device_id bpa10x_table[] = { 45static struct usb_device_id bpa10x_table[] = {
48 /* Tektronix BPA 100/105 (Digianswer) */ 46 /* Tektronix BPA 100/105 (Digianswer) */
@@ -460,9 +458,6 @@ static int bpa10x_probe(struct usb_interface *intf, const struct usb_device_id *
460 458
461 BT_DBG("intf %p id %p", intf, id); 459 BT_DBG("intf %p id %p", intf, id);
462 460
463 if (ignore)
464 return -ENODEV;
465
466 if (intf->cur_altsetting->desc.bInterfaceNumber != 0) 461 if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
467 return -ENODEV; 462 return -ENODEV;
468 463
@@ -546,9 +541,6 @@ static void __exit bpa10x_exit(void)
546module_init(bpa10x_init); 541module_init(bpa10x_init);
547module_exit(bpa10x_exit); 542module_exit(bpa10x_exit);
548 543
549module_param(ignore, bool, 0644);
550MODULE_PARM_DESC(ignore, "Ignore devices from the matching table");
551
552MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); 544MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
553MODULE_DESCRIPTION("Digianswer Bluetooth USB driver ver " VERSION); 545MODULE_DESCRIPTION("Digianswer Bluetooth USB driver ver " VERSION);
554MODULE_VERSION(VERSION); 546MODULE_VERSION(VERSION);
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 12e108914f19..95ae9ba5661e 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -41,18 +41,122 @@
41#define BT_DBG(D...) 41#define BT_DBG(D...)
42#endif 42#endif
43 43
44#define VERSION "0.1" 44#define VERSION "0.2"
45
46static int ignore_dga;
47static int ignore_csr;
48static int ignore_sniffer;
49static int disable_scofix;
50static int force_scofix;
51static int reset;
52
53static struct usb_driver btusb_driver;
54
55#define BTUSB_IGNORE 0x01
56#define BTUSB_RESET 0x02
57#define BTUSB_DIGIANSWER 0x04
58#define BTUSB_CSR 0x08
59#define BTUSB_SNIFFER 0x10
60#define BTUSB_BCM92035 0x20
61#define BTUSB_BROKEN_ISOC 0x40
62#define BTUSB_WRONG_SCO_MTU 0x80
45 63
46static struct usb_device_id btusb_table[] = { 64static struct usb_device_id btusb_table[] = {
47 /* Generic Bluetooth USB device */ 65 /* Generic Bluetooth USB device */
48 { USB_DEVICE_INFO(0xe0, 0x01, 0x01) }, 66 { USB_DEVICE_INFO(0xe0, 0x01, 0x01) },
49 67
68 /* AVM BlueFRITZ! USB v2.0 */
69 { USB_DEVICE(0x057c, 0x3800) },
70
71 /* Bluetooth Ultraport Module from IBM */
72 { USB_DEVICE(0x04bf, 0x030a) },
73
74 /* ALPS Modules with non-standard id */
75 { USB_DEVICE(0x044e, 0x3001) },
76 { USB_DEVICE(0x044e, 0x3002) },
77
78 /* Ericsson with non-standard id */
79 { USB_DEVICE(0x0bdb, 0x1002) },
80
81 /* Canyon CN-BTU1 with HID interfaces */
82 { USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_RESET },
83
50 { } /* Terminating entry */ 84 { } /* Terminating entry */
51}; 85};
52 86
53MODULE_DEVICE_TABLE(usb, btusb_table); 87MODULE_DEVICE_TABLE(usb, btusb_table);
54 88
55static struct usb_device_id blacklist_table[] = { 89static struct usb_device_id blacklist_table[] = {
90 /* CSR BlueCore devices */
91 { USB_DEVICE(0x0a12, 0x0001), .driver_info = BTUSB_CSR },
92
93 /* Broadcom BCM2033 without firmware */
94 { USB_DEVICE(0x0a5c, 0x2033), .driver_info = BTUSB_IGNORE },
95
96 /* Broadcom BCM2035 */
97 { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU },
98 { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU },
99
100 /* Broadcom BCM2045 */
101 { USB_DEVICE(0x0a5c, 0x2039), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU },
102 { USB_DEVICE(0x0a5c, 0x2101), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU },
103
104 /* Broadcom BCM2046 */
105 { USB_DEVICE(0x0a5c, 0x2151), .driver_info = BTUSB_RESET },
106
107 /* IBM/Lenovo ThinkPad with Broadcom chip */
108 { USB_DEVICE(0x0a5c, 0x201e), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU },
109 { USB_DEVICE(0x0a5c, 0x2110), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU },
110
111 /* Targus ACB10US */
112 { USB_DEVICE(0x0a5c, 0x2100), .driver_info = BTUSB_RESET },
113
114 /* ANYCOM Bluetooth USB-200 and USB-250 */
115 { USB_DEVICE(0x0a5c, 0x2111), .driver_info = BTUSB_RESET },
116
117 /* HP laptop with Broadcom chip */
118 { USB_DEVICE(0x03f0, 0x171d), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU },
119
120 /* Dell laptop with Broadcom chip */
121 { USB_DEVICE(0x413c, 0x8126), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU },
122
123 /* Dell Wireless 370 */
124 { USB_DEVICE(0x413c, 0x8156), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU },
125
126 /* Dell Wireless 410 */
127 { USB_DEVICE(0x413c, 0x8152), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU },
128
129 /* Microsoft Wireless Transceiver for Bluetooth 2.0 */
130 { USB_DEVICE(0x045e, 0x009c), .driver_info = BTUSB_RESET },
131
132 /* Kensington Bluetooth USB adapter */
133 { USB_DEVICE(0x047d, 0x105d), .driver_info = BTUSB_RESET },
134 { USB_DEVICE(0x047d, 0x105e), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU },
135
136 /* ISSC Bluetooth Adapter v3.1 */
137 { USB_DEVICE(0x1131, 0x1001), .driver_info = BTUSB_RESET },
138
139 /* RTX Telecom based adapters with buggy SCO support */
140 { USB_DEVICE(0x0400, 0x0807), .driver_info = BTUSB_BROKEN_ISOC },
141 { USB_DEVICE(0x0400, 0x080a), .driver_info = BTUSB_BROKEN_ISOC },
142
143 /* CONWISE Technology based adapters with buggy SCO support */
144 { USB_DEVICE(0x0e5e, 0x6622), .driver_info = BTUSB_BROKEN_ISOC },
145
146 /* Belkin F8T012 and F8T013 devices */
147 { USB_DEVICE(0x050d, 0x0012), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU },
148 { USB_DEVICE(0x050d, 0x0013), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU },
149
150 /* Digianswer devices */
151 { USB_DEVICE(0x08fd, 0x0001), .driver_info = BTUSB_DIGIANSWER },
152 { USB_DEVICE(0x08fd, 0x0002), .driver_info = BTUSB_IGNORE },
153
154 /* CSR BlueCore Bluetooth Sniffer */
155 { USB_DEVICE(0x0a12, 0x0002), .driver_info = BTUSB_SNIFFER },
156
157 /* Frontline ComProbe Bluetooth Sniffer */
158 { USB_DEVICE(0x16d3, 0x0002), .driver_info = BTUSB_SNIFFER },
159
56 { } /* Terminating entry */ 160 { } /* Terminating entry */
57}; 161};
58 162
@@ -433,6 +537,7 @@ static int btusb_probe(struct usb_interface *intf,
433 537
434 BT_DBG("intf %p id %p", intf, id); 538 BT_DBG("intf %p id %p", intf, id);
435 539
540 /* interface numbers are hardcoded in the spec */
436 if (intf->cur_altsetting->desc.bInterfaceNumber != 0) 541 if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
437 return -ENODEV; 542 return -ENODEV;
438 543
@@ -443,6 +548,18 @@ static int btusb_probe(struct usb_interface *intf,
443 id = match; 548 id = match;
444 } 549 }
445 550
551 if (id->driver_info == BTUSB_IGNORE)
552 return -ENODEV;
553
554 if (ignore_dga && id->driver_info & BTUSB_DIGIANSWER)
555 return -ENODEV;
556
557 if (ignore_csr && id->driver_info & BTUSB_CSR)
558 return -ENODEV;
559
560 if (ignore_sniffer && id->driver_info & BTUSB_SNIFFER)
561 return -ENODEV;
562
446 data = kzalloc(sizeof(*data), GFP_KERNEL); 563 data = kzalloc(sizeof(*data), GFP_KERNEL);
447 if (!data) 564 if (!data)
448 return -ENOMEM; 565 return -ENOMEM;
@@ -503,7 +620,31 @@ static int btusb_probe(struct usb_interface *intf,
503 620
504 hdev->owner = THIS_MODULE; 621 hdev->owner = THIS_MODULE;
505 622
506 set_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks); 623 if (reset || id->driver_info & BTUSB_RESET)
624 set_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks);
625
626 if (force_scofix || id->driver_info & BTUSB_WRONG_SCO_MTU) {
627 if (!disable_scofix)
628 set_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks);
629 }
630
631 if (id->driver_info & BTUSB_SNIFFER) {
632 struct usb_device *udev = interface_to_usbdev(intf);
633
634 if (le16_to_cpu(udev->descriptor.bcdDevice) > 0x997)
635 set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
636 }
637
638 if (id->driver_info & BTUSB_BCM92035) {
639 unsigned char cmd[] = { 0x3b, 0xfc, 0x01, 0x00 };
640 struct sk_buff *skb;
641
642 skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL);
643 if (skb) {
644 memcpy(skb_put(skb, sizeof(cmd)), cmd, sizeof(cmd));
645 skb_queue_tail(&hdev->driver_init, skb);
646 }
647 }
507 648
508 err = hci_register_dev(hdev); 649 err = hci_register_dev(hdev);
509 if (err < 0) { 650 if (err < 0) {
@@ -558,6 +699,24 @@ static void __exit btusb_exit(void)
558module_init(btusb_init); 699module_init(btusb_init);
559module_exit(btusb_exit); 700module_exit(btusb_exit);
560 701
702module_param(ignore_dga, bool, 0644);
703MODULE_PARM_DESC(ignore_dga, "Ignore devices with id 08fd:0001");
704
705module_param(ignore_csr, bool, 0644);
706MODULE_PARM_DESC(ignore_csr, "Ignore devices with id 0a12:0001");
707
708module_param(ignore_sniffer, bool, 0644);
709MODULE_PARM_DESC(ignore_sniffer, "Ignore devices with id 0a12:0002");
710
711module_param(disable_scofix, bool, 0644);
712MODULE_PARM_DESC(disable_scofix, "Disable fixup of wrong SCO buffer size");
713
714module_param(force_scofix, bool, 0644);
715MODULE_PARM_DESC(force_scofix, "Force fixup of wrong SCO buffers size");
716
717module_param(reset, bool, 0644);
718MODULE_PARM_DESC(reset, "Send HCI reset command on initialization");
719
561MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); 720MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
562MODULE_DESCRIPTION("Generic Bluetooth USB driver ver " VERSION); 721MODULE_DESCRIPTION("Generic Bluetooth USB driver ver " VERSION);
563MODULE_VERSION(VERSION); 722MODULE_VERSION(VERSION);
diff --git a/drivers/bluetooth/hci_usb.c b/drivers/bluetooth/hci_usb.c
index 192522ebb771..e397572bf574 100644
--- a/drivers/bluetooth/hci_usb.c
+++ b/drivers/bluetooth/hci_usb.c
@@ -62,7 +62,6 @@
62#define URB_ZERO_PACKET 0 62#define URB_ZERO_PACKET 0
63#endif 63#endif
64 64
65static int ignore;
66static int ignore_dga; 65static int ignore_dga;
67static int ignore_csr; 66static int ignore_csr;
68static int ignore_sniffer; 67static int ignore_sniffer;
@@ -74,7 +73,7 @@ static int reset;
74static int isoc = 2; 73static int isoc = 2;
75#endif 74#endif
76 75
77#define VERSION "2.9" 76#define VERSION "2.10"
78 77
79static struct usb_driver hci_usb_driver; 78static struct usb_driver hci_usb_driver;
80 79
@@ -134,6 +133,13 @@ static struct usb_device_id blacklist_ids[] = {
134 133
135 /* Dell laptop with Broadcom chip */ 134 /* Dell laptop with Broadcom chip */
136 { USB_DEVICE(0x413c, 0x8126), .driver_info = HCI_RESET | HCI_WRONG_SCO_MTU }, 135 { USB_DEVICE(0x413c, 0x8126), .driver_info = HCI_RESET | HCI_WRONG_SCO_MTU },
136 /* Dell Wireless 370 */
137 { USB_DEVICE(0x413c, 0x8156), .driver_info = HCI_RESET | HCI_WRONG_SCO_MTU },
138 /* Dell Wireless 410 */
139 { USB_DEVICE(0x413c, 0x8152), .driver_info = HCI_RESET | HCI_WRONG_SCO_MTU },
140
141 /* Broadcom 2046 */
142 { USB_DEVICE(0x0a5c, 0x2151), .driver_info = HCI_RESET },
137 143
138 /* Microsoft Wireless Transceiver for Bluetooth 2.0 */ 144 /* Microsoft Wireless Transceiver for Bluetooth 2.0 */
139 { USB_DEVICE(0x045e, 0x009c), .driver_info = HCI_RESET }, 145 { USB_DEVICE(0x045e, 0x009c), .driver_info = HCI_RESET },
@@ -794,7 +800,7 @@ static int hci_usb_probe(struct usb_interface *intf, const struct usb_device_id
794 id = match; 800 id = match;
795 } 801 }
796 802
797 if (ignore || id->driver_info & HCI_IGNORE) 803 if (id->driver_info & HCI_IGNORE)
798 return -ENODEV; 804 return -ENODEV;
799 805
800 if (ignore_dga && id->driver_info & HCI_DIGIANSWER) 806 if (ignore_dga && id->driver_info & HCI_DIGIANSWER)
@@ -1101,9 +1107,6 @@ static void __exit hci_usb_exit(void)
1101module_init(hci_usb_init); 1107module_init(hci_usb_init);
1102module_exit(hci_usb_exit); 1108module_exit(hci_usb_exit);
1103 1109
1104module_param(ignore, bool, 0644);
1105MODULE_PARM_DESC(ignore, "Ignore devices from the matching table");
1106
1107module_param(ignore_dga, bool, 0644); 1110module_param(ignore_dga, bool, 0644);
1108MODULE_PARM_DESC(ignore_dga, "Ignore devices with id 08fd:0001"); 1111MODULE_PARM_DESC(ignore_dga, "Ignore devices with id 08fd:0001");
1109 1112
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index a5da35632651..d9d1b65d206c 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -1436,10 +1436,6 @@ static void cdrom_count_tracks(struct cdrom_device_info *cdi, tracktype* tracks)
1436 tracks->xa=0; 1436 tracks->xa=0;
1437 tracks->error=0; 1437 tracks->error=0;
1438 cdinfo(CD_COUNT_TRACKS, "entering cdrom_count_tracks\n"); 1438 cdinfo(CD_COUNT_TRACKS, "entering cdrom_count_tracks\n");
1439 if (!CDROM_CAN(CDC_PLAY_AUDIO)) {
1440 tracks->error=CDS_NO_INFO;
1441 return;
1442 }
1443 /* Grab the TOC header so we can see how many tracks there are */ 1439 /* Grab the TOC header so we can see how many tracks there are */
1444 if ((ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCHDR, &header))) { 1440 if ((ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCHDR, &header))) {
1445 if (ret == -ENOMEDIUM) 1441 if (ret == -ENOMEDIUM)
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 71ec426ecffc..1e0455bd6df9 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -39,8 +39,8 @@
39#include <asm/io.h> 39#include <asm/io.h>
40#include <asm/dma.h> 40#include <asm/dma.h>
41#include <asm/delay.h> 41#include <asm/delay.h>
42#include <asm/mach/dma.h> 42#include <mach/dma.h>
43#include <asm/mach/sysasic.h> 43#include <mach/sysasic.h>
44 44
45#define GDROM_DEV_NAME "gdrom" 45#define GDROM_DEV_NAME "gdrom"
46#define GD_SESSION_OFFSET 150 46#define GD_SESSION_OFFSET 150
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index d0ac944e1696..caff85149b9d 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -8,7 +8,7 @@ config VT
8 bool "Virtual terminal" if EMBEDDED 8 bool "Virtual terminal" if EMBEDDED
9 depends on !S390 9 depends on !S390
10 select INPUT 10 select INPUT
11 default y if !VIOCONS 11 default y
12 ---help--- 12 ---help---
13 If you say Y here, you will get support for terminal devices with 13 If you say Y here, you will get support for terminal devices with
14 display and keyboard devices. These are called "virtual" because you 14 display and keyboard devices. These are called "virtual" because you
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 8a161c30e1dc..6850f6da7576 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -55,7 +55,6 @@ obj-$(CONFIG_RAW_DRIVER) += raw.o
55obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o 55obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o
56obj-$(CONFIG_MSPEC) += mspec.o 56obj-$(CONFIG_MSPEC) += mspec.o
57obj-$(CONFIG_MMTIMER) += mmtimer.o 57obj-$(CONFIG_MMTIMER) += mmtimer.o
58obj-$(CONFIG_VIOCONS) += viocons.o
59obj-$(CONFIG_VIOTAPE) += viotape.o 58obj-$(CONFIG_VIOTAPE) += viotape.o
60obj-$(CONFIG_HVCS) += hvcs.o 59obj-$(CONFIG_HVCS) += hvcs.o
61obj-$(CONFIG_IBM_BSR) += bsr.o 60obj-$(CONFIG_IBM_BSR) += bsr.o
diff --git a/drivers/char/amiserial.c b/drivers/char/amiserial.c
index 3530ff417a51..6e763e3f5a81 100644
--- a/drivers/char/amiserial.c
+++ b/drivers/char/amiserial.c
@@ -1254,7 +1254,7 @@ static int rs_break(struct tty_struct *tty, int break_state)
1254 unsigned long flags; 1254 unsigned long flags;
1255 1255
1256 if (serial_paranoia_check(info, tty->name, "rs_break")) 1256 if (serial_paranoia_check(info, tty->name, "rs_break"))
1257 return; 1257 return -EINVAL;
1258 1258
1259 local_irq_save(flags); 1259 local_irq_save(flags);
1260 if (break_state == -1) 1260 if (break_state == -1)
diff --git a/drivers/char/ds1620.c b/drivers/char/ds1620.c
index 34275c6f1da2..74e9cd81b5b2 100644
--- a/drivers/char/ds1620.c
+++ b/drivers/char/ds1620.c
@@ -10,7 +10,7 @@
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/smp_lock.h> 11#include <linux/smp_lock.h>
12 12
13#include <asm/hardware.h> 13#include <mach/hardware.h>
14#include <asm/mach-types.h> 14#include <asm/mach-types.h>
15#include <asm/uaccess.h> 15#include <asm/uaccess.h>
16#include <asm/therm.h> 16#include <asm/therm.h>
diff --git a/drivers/char/efirtc.c b/drivers/char/efirtc.c
index 67fbd7aab5db..34d15d548236 100644
--- a/drivers/char/efirtc.c
+++ b/drivers/char/efirtc.c
@@ -37,7 +37,6 @@
37#include <linux/rtc.h> 37#include <linux/rtc.h>
38#include <linux/proc_fs.h> 38#include <linux/proc_fs.h>
39#include <linux/efi.h> 39#include <linux/efi.h>
40#include <linux/smp_lock.h>
41#include <linux/uaccess.h> 40#include <linux/uaccess.h>
42 41
43#include <asm/system.h> 42#include <asm/system.h>
diff --git a/drivers/char/hvc_console.h b/drivers/char/hvc_console.h
index d9ce10915625..9790201718ae 100644
--- a/drivers/char/hvc_console.h
+++ b/drivers/char/hvc_console.h
@@ -6,7 +6,7 @@
6 * Ryan S. Arnold <rsa@us.ibm.com> 6 * Ryan S. Arnold <rsa@us.ibm.com>
7 * 7 *
8 * hvc_console header information: 8 * hvc_console header information:
9 * moved here from include/asm-powerpc/hvconsole.h 9 * moved here from arch/powerpc/include/asm/hvconsole.h
10 * and drivers/char/hvc_console.c 10 * and drivers/char/hvc_console.c
11 * 11 *
12 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
index 786d518e9477..473d9b14439a 100644
--- a/drivers/char/hvcs.c
+++ b/drivers/char/hvcs.c
@@ -114,7 +114,7 @@
114 * the hvcs_final_close() function in order to get it out of the spinlock. 114 * the hvcs_final_close() function in order to get it out of the spinlock.
115 * Rearranged hvcs_close(). Cleaned up some printks and did some housekeeping 115 * Rearranged hvcs_close(). Cleaned up some printks and did some housekeeping
116 * on the changelog. Removed local CLC_LENGTH and used HVCS_CLC_LENGTH from 116 * on the changelog. Removed local CLC_LENGTH and used HVCS_CLC_LENGTH from
117 * include/asm-powerpc/hvcserver.h 117 * arch/powerepc/include/asm/hvcserver.h
118 * 118 *
119 * 1.3.2 -> 1.3.3 Replaced yield() in hvcs_close() with tty_wait_until_sent() to 119 * 1.3.2 -> 1.3.3 Replaced yield() in hvcs_close() with tty_wait_until_sent() to
120 * prevent possible lockup with realtime scheduling as similarily pointed out by 120 * prevent possible lockup with realtime scheduling as similarily pointed out by
diff --git a/drivers/char/hw_random/ixp4xx-rng.c b/drivers/char/hw_random/ixp4xx-rng.c
index bab43ca32ac1..263567f5f392 100644
--- a/drivers/char/hw_random/ixp4xx-rng.c
+++ b/drivers/char/hw_random/ixp4xx-rng.c
@@ -23,7 +23,7 @@
23#include <linux/hw_random.h> 23#include <linux/hw_random.h>
24 24
25#include <asm/io.h> 25#include <asm/io.h>
26#include <asm/hardware.h> 26#include <mach/hardware.h>
27 27
28 28
29static int ixp4xx_rng_data_read(struct hwrng *rng, u32 *buffer) 29static int ixp4xx_rng_data_read(struct hwrng *rng, u32 *buffer)
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 192688344ed2..f52931e1c16e 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -66,8 +66,8 @@
66#include <linux/ctype.h> 66#include <linux/ctype.h>
67 67
68#ifdef CONFIG_PPC_OF 68#ifdef CONFIG_PPC_OF
69#include <asm/of_device.h> 69#include <linux/of_device.h>
70#include <asm/of_platform.h> 70#include <linux/of_platform.h>
71#endif 71#endif
72 72
73#define PFX "ipmi_si: " 73#define PFX "ipmi_si: "
diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c
index e30575e87648..b638403e8e9c 100644
--- a/drivers/char/mxser.c
+++ b/drivers/char/mxser.c
@@ -1612,8 +1612,10 @@ static int mxser_ioctl_special(unsigned int cmd, void __user *argp)
1612 1612
1613 switch (cmd) { 1613 switch (cmd) {
1614 case MOXA_GET_MAJOR: 1614 case MOXA_GET_MAJOR:
1615 printk(KERN_WARNING "mxser: '%s' uses deprecated ioctl %x, fix " 1615 if (printk_ratelimit())
1616 "your userspace\n", current->comm, cmd); 1616 printk(KERN_WARNING "mxser: '%s' uses deprecated ioctl "
1617 "%x (GET_MAJOR), fix your userspace\n",
1618 current->comm, cmd);
1617 return put_user(ttymajor, (int __user *)argp); 1619 return put_user(ttymajor, (int __user *)argp);
1618 1620
1619 case MOXA_CHKPORTENABLE: 1621 case MOXA_CHKPORTENABLE:
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index d1fceabe3aef..c240562c218b 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -232,7 +232,6 @@ typedef struct _mgslpc_info {
232 232
233 /* SPPP/Cisco HDLC device parts */ 233 /* SPPP/Cisco HDLC device parts */
234 int netcount; 234 int netcount;
235 int dosyncppp;
236 spinlock_t netlock; 235 spinlock_t netlock;
237 236
238#if SYNCLINK_GENERIC_HDLC 237#if SYNCLINK_GENERIC_HDLC
@@ -459,13 +458,11 @@ static int ttymajor=0;
459 458
460static int debug_level = 0; 459static int debug_level = 0;
461static int maxframe[MAX_DEVICE_COUNT] = {0,}; 460static int maxframe[MAX_DEVICE_COUNT] = {0,};
462static int dosyncppp[MAX_DEVICE_COUNT] = {1,1,1,1};
463 461
464module_param(break_on_load, bool, 0); 462module_param(break_on_load, bool, 0);
465module_param(ttymajor, int, 0); 463module_param(ttymajor, int, 0);
466module_param(debug_level, int, 0); 464module_param(debug_level, int, 0);
467module_param_array(maxframe, int, NULL, 0); 465module_param_array(maxframe, int, NULL, 0);
468module_param_array(dosyncppp, int, NULL, 0);
469 466
470MODULE_LICENSE("GPL"); 467MODULE_LICENSE("GPL");
471 468
@@ -2915,7 +2912,6 @@ static void mgslpc_add_device(MGSLPC_INFO *info)
2915 if (info->line < MAX_DEVICE_COUNT) { 2912 if (info->line < MAX_DEVICE_COUNT) {
2916 if (maxframe[info->line]) 2913 if (maxframe[info->line])
2917 info->max_frame_size = maxframe[info->line]; 2914 info->max_frame_size = maxframe[info->line];
2918 info->dosyncppp = dosyncppp[info->line];
2919 } 2915 }
2920 2916
2921 mgslpc_device_count++; 2917 mgslpc_device_count++;
diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c
index ef6706f09061..500f5176b6ba 100644
--- a/drivers/char/synclink.c
+++ b/drivers/char/synclink.c
@@ -304,7 +304,6 @@ struct mgsl_struct {
304 304
305 /* generic HDLC device parts */ 305 /* generic HDLC device parts */
306 int netcount; 306 int netcount;
307 int dosyncppp;
308 spinlock_t netlock; 307 spinlock_t netlock;
309 308
310#if SYNCLINK_GENERIC_HDLC 309#if SYNCLINK_GENERIC_HDLC
@@ -868,7 +867,6 @@ static int irq[MAX_ISA_DEVICES];
868static int dma[MAX_ISA_DEVICES]; 867static int dma[MAX_ISA_DEVICES];
869static int debug_level; 868static int debug_level;
870static int maxframe[MAX_TOTAL_DEVICES]; 869static int maxframe[MAX_TOTAL_DEVICES];
871static int dosyncppp[MAX_TOTAL_DEVICES];
872static int txdmabufs[MAX_TOTAL_DEVICES]; 870static int txdmabufs[MAX_TOTAL_DEVICES];
873static int txholdbufs[MAX_TOTAL_DEVICES]; 871static int txholdbufs[MAX_TOTAL_DEVICES];
874 872
@@ -879,7 +877,6 @@ module_param_array(irq, int, NULL, 0);
879module_param_array(dma, int, NULL, 0); 877module_param_array(dma, int, NULL, 0);
880module_param(debug_level, int, 0); 878module_param(debug_level, int, 0);
881module_param_array(maxframe, int, NULL, 0); 879module_param_array(maxframe, int, NULL, 0);
882module_param_array(dosyncppp, int, NULL, 0);
883module_param_array(txdmabufs, int, NULL, 0); 880module_param_array(txdmabufs, int, NULL, 0);
884module_param_array(txholdbufs, int, NULL, 0); 881module_param_array(txholdbufs, int, NULL, 0);
885 882
@@ -4258,7 +4255,6 @@ static void mgsl_add_device( struct mgsl_struct *info )
4258 if (info->line < MAX_TOTAL_DEVICES) { 4255 if (info->line < MAX_TOTAL_DEVICES) {
4259 if (maxframe[info->line]) 4256 if (maxframe[info->line])
4260 info->max_frame_size = maxframe[info->line]; 4257 info->max_frame_size = maxframe[info->line];
4261 info->dosyncppp = dosyncppp[info->line];
4262 4258
4263 if (txdmabufs[info->line]) { 4259 if (txdmabufs[info->line]) {
4264 info->num_tx_dma_buffers = txdmabufs[info->line]; 4260 info->num_tx_dma_buffers = txdmabufs[info->line];
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index 3e9058993e41..509c89ac5bd3 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -128,17 +128,14 @@ static int slgt_device_count;
128static int ttymajor; 128static int ttymajor;
129static int debug_level; 129static int debug_level;
130static int maxframe[MAX_DEVICES]; 130static int maxframe[MAX_DEVICES];
131static int dosyncppp[MAX_DEVICES];
132 131
133module_param(ttymajor, int, 0); 132module_param(ttymajor, int, 0);
134module_param(debug_level, int, 0); 133module_param(debug_level, int, 0);
135module_param_array(maxframe, int, NULL, 0); 134module_param_array(maxframe, int, NULL, 0);
136module_param_array(dosyncppp, int, NULL, 0);
137 135
138MODULE_PARM_DESC(ttymajor, "TTY major device number override: 0=auto assigned"); 136MODULE_PARM_DESC(ttymajor, "TTY major device number override: 0=auto assigned");
139MODULE_PARM_DESC(debug_level, "Debug syslog output: 0=disabled, 1 to 5=increasing detail"); 137MODULE_PARM_DESC(debug_level, "Debug syslog output: 0=disabled, 1 to 5=increasing detail");
140MODULE_PARM_DESC(maxframe, "Maximum frame size used by device (4096 to 65535)"); 138MODULE_PARM_DESC(maxframe, "Maximum frame size used by device (4096 to 65535)");
141MODULE_PARM_DESC(dosyncppp, "Enable synchronous net device, 0=disable 1=enable");
142 139
143/* 140/*
144 * tty support and callbacks 141 * tty support and callbacks
@@ -349,7 +346,6 @@ struct slgt_info {
349 /* SPPP/Cisco HDLC device parts */ 346 /* SPPP/Cisco HDLC device parts */
350 347
351 int netcount; 348 int netcount;
352 int dosyncppp;
353 spinlock_t netlock; 349 spinlock_t netlock;
354#if SYNCLINK_GENERIC_HDLC 350#if SYNCLINK_GENERIC_HDLC
355 struct net_device *netdev; 351 struct net_device *netdev;
@@ -3405,7 +3401,6 @@ static void add_device(struct slgt_info *info)
3405 if (info->line < MAX_DEVICES) { 3401 if (info->line < MAX_DEVICES) {
3406 if (maxframe[info->line]) 3402 if (maxframe[info->line])
3407 info->max_frame_size = maxframe[info->line]; 3403 info->max_frame_size = maxframe[info->line];
3408 info->dosyncppp = dosyncppp[info->line];
3409 } 3404 }
3410 3405
3411 slgt_device_count++; 3406 slgt_device_count++;
diff --git a/drivers/char/synclinkmp.c b/drivers/char/synclinkmp.c
index c0490cbd0db2..6bdb44f7bec2 100644
--- a/drivers/char/synclinkmp.c
+++ b/drivers/char/synclinkmp.c
@@ -270,7 +270,6 @@ typedef struct _synclinkmp_info {
270 270
271 /* SPPP/Cisco HDLC device parts */ 271 /* SPPP/Cisco HDLC device parts */
272 int netcount; 272 int netcount;
273 int dosyncppp;
274 spinlock_t netlock; 273 spinlock_t netlock;
275 274
276#if SYNCLINK_GENERIC_HDLC 275#if SYNCLINK_GENERIC_HDLC
@@ -469,13 +468,11 @@ static int ttymajor = 0;
469 */ 468 */
470static int debug_level = 0; 469static int debug_level = 0;
471static int maxframe[MAX_DEVICES] = {0,}; 470static int maxframe[MAX_DEVICES] = {0,};
472static int dosyncppp[MAX_DEVICES] = {0,};
473 471
474module_param(break_on_load, bool, 0); 472module_param(break_on_load, bool, 0);
475module_param(ttymajor, int, 0); 473module_param(ttymajor, int, 0);
476module_param(debug_level, int, 0); 474module_param(debug_level, int, 0);
477module_param_array(maxframe, int, NULL, 0); 475module_param_array(maxframe, int, NULL, 0);
478module_param_array(dosyncppp, int, NULL, 0);
479 476
480static char *driver_name = "SyncLink MultiPort driver"; 477static char *driver_name = "SyncLink MultiPort driver";
481static char *driver_version = "$Revision: 4.38 $"; 478static char *driver_version = "$Revision: 4.38 $";
@@ -3752,7 +3749,6 @@ static void add_device(SLMP_INFO *info)
3752 if (info->line < MAX_DEVICES) { 3749 if (info->line < MAX_DEVICES) {
3753 if (maxframe[info->line]) 3750 if (maxframe[info->line])
3754 info->max_frame_size = maxframe[info->line]; 3751 info->max_frame_size = maxframe[info->line];
3755 info->dosyncppp = dosyncppp[info->line];
3756 } 3752 }
3757 3753
3758 synclinkmp_device_count++; 3754 synclinkmp_device_count++;
diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c
index 241cbdea65ab..f307f135cbfb 100644
--- a/drivers/char/tty_ldisc.c
+++ b/drivers/char/tty_ldisc.c
@@ -169,7 +169,7 @@ static int tty_ldisc_get(int disc, struct tty_ldisc *ld)
169 if (disc < N_TTY || disc >= NR_LDISCS) 169 if (disc < N_TTY || disc >= NR_LDISCS)
170 return -EINVAL; 170 return -EINVAL;
171 err = tty_ldisc_try_get(disc, ld); 171 err = tty_ldisc_try_get(disc, ld);
172 if (err == -EAGAIN) { 172 if (err < 0) {
173 request_module("tty-ldisc-%d", disc); 173 request_module("tty-ldisc-%d", disc);
174 err = tty_ldisc_try_get(disc, ld); 174 err = tty_ldisc_try_get(disc, ld);
175 } 175 }
diff --git a/drivers/char/viocons.c b/drivers/char/viocons.c
deleted file mode 100644
index 65fb848e1cce..000000000000
--- a/drivers/char/viocons.c
+++ /dev/null
@@ -1,1171 +0,0 @@
1/* -*- linux-c -*-
2 *
3 * drivers/char/viocons.c
4 *
5 * iSeries Virtual Terminal
6 *
7 * Authors: Dave Boutcher <boutcher@us.ibm.com>
8 * Ryan Arnold <ryanarn@us.ibm.com>
9 * Colin Devilbiss <devilbis@us.ibm.com>
10 * Stephen Rothwell
11 *
12 * (C) Copyright 2000, 2001, 2002, 2003, 2004 IBM Corporation
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License as
16 * published by the Free Software Foundation; either version 2 of the
17 * License, or (at your option) anyu later version.
18 *
19 * This program is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software Foundation,
26 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 */
28#include <linux/kernel.h>
29#include <linux/proc_fs.h>
30#include <linux/errno.h>
31#include <linux/vmalloc.h>
32#include <linux/mm.h>
33#include <linux/console.h>
34#include <linux/module.h>
35#include <asm/uaccess.h>
36#include <linux/init.h>
37#include <linux/wait.h>
38#include <linux/spinlock.h>
39#include <asm/ioctls.h>
40#include <linux/kd.h>
41#include <linux/tty.h>
42#include <linux/tty_flip.h>
43#include <linux/sysrq.h>
44
45#include <asm/firmware.h>
46#include <asm/iseries/vio.h>
47#include <asm/iseries/hv_lp_event.h>
48#include <asm/iseries/hv_call_event.h>
49#include <asm/iseries/hv_lp_config.h>
50#include <asm/iseries/hv_call.h>
51
52#ifdef CONFIG_VT
53#error You must turn off CONFIG_VT to use CONFIG_VIOCONS
54#endif
55
56#define VIOTTY_MAGIC (0x0DCB)
57#define VTTY_PORTS 10
58
59#define VIOCONS_KERN_WARN KERN_WARNING "viocons: "
60#define VIOCONS_KERN_INFO KERN_INFO "viocons: "
61
62static DEFINE_SPINLOCK(consolelock);
63static DEFINE_SPINLOCK(consoleloglock);
64
65static int vio_sysrq_pressed;
66
67#define VIOCHAR_NUM_BUF 16
68
69/*
70 * Our port information. We store a pointer to one entry in the
71 * tty_driver_data
72 */
73static struct port_info {
74 int magic;
75 struct tty_struct *tty;
76 HvLpIndex lp;
77 u8 vcons;
78 u64 seq; /* sequence number of last HV send */
79 u64 ack; /* last ack from HV */
80/*
81 * When we get writes faster than we can send it to the partition,
82 * buffer the data here. Note that used is a bit map of used buffers.
83 * It had better have enough bits to hold VIOCHAR_NUM_BUF the bitops assume
84 * it is a multiple of unsigned long
85 */
86 unsigned long used;
87 u8 *buffer[VIOCHAR_NUM_BUF];
88 int bufferBytes[VIOCHAR_NUM_BUF];
89 int curbuf;
90 int bufferOverflow;
91 int overflowMessage;
92} port_info[VTTY_PORTS];
93
94#define viochar_is_console(pi) ((pi) == &port_info[0])
95#define viochar_port(pi) ((pi) - &port_info[0])
96
97static void initDataEvent(struct viocharlpevent *viochar, HvLpIndex lp);
98
99static struct tty_driver *viotty_driver;
100
101static void hvlog(char *fmt, ...)
102{
103 int i;
104 unsigned long flags;
105 va_list args;
106 static char buf[256];
107
108 spin_lock_irqsave(&consoleloglock, flags);
109 va_start(args, fmt);
110 i = vscnprintf(buf, sizeof(buf) - 1, fmt, args);
111 va_end(args);
112 buf[i++] = '\r';
113 HvCall_writeLogBuffer(buf, i);
114 spin_unlock_irqrestore(&consoleloglock, flags);
115}
116
117static void hvlogOutput(const char *buf, int count)
118{
119 unsigned long flags;
120 int begin;
121 int index;
122 static const char cr = '\r';
123
124 begin = 0;
125 spin_lock_irqsave(&consoleloglock, flags);
126 for (index = 0; index < count; index++) {
127 if (buf[index] == '\n') {
128 /*
129 * Start right after the last '\n' or at the zeroth
130 * array position and output the number of characters
131 * including the newline.
132 */
133 HvCall_writeLogBuffer(&buf[begin], index - begin + 1);
134 begin = index + 1;
135 HvCall_writeLogBuffer(&cr, 1);
136 }
137 }
138 if ((index - begin) > 0)
139 HvCall_writeLogBuffer(&buf[begin], index - begin);
140 spin_unlock_irqrestore(&consoleloglock, flags);
141}
142
143/*
144 * Make sure we're pointing to a valid port_info structure. Shamelessly
145 * plagerized from serial.c
146 */
147static inline int viotty_paranoia_check(struct port_info *pi,
148 char *name, const char *routine)
149{
150 static const char *bad_pi_addr = VIOCONS_KERN_WARN
151 "warning: bad address for port_info struct (%s) in %s\n";
152 static const char *badmagic = VIOCONS_KERN_WARN
153 "warning: bad magic number for port_info struct (%s) in %s\n";
154
155 if ((pi < &port_info[0]) || (viochar_port(pi) > VTTY_PORTS)) {
156 printk(bad_pi_addr, name, routine);
157 return 1;
158 }
159 if (pi->magic != VIOTTY_MAGIC) {
160 printk(badmagic, name, routine);
161 return 1;
162 }
163 return 0;
164}
165
166/*
167 * Add data to our pending-send buffers.
168 *
169 * NOTE: Don't use printk in here because it gets nastily recursive.
170 * hvlog can be used to log to the hypervisor buffer
171 */
172static int buffer_add(struct port_info *pi, const char *buf, size_t len)
173{
174 size_t bleft;
175 size_t curlen;
176 const char *curbuf;
177 int nextbuf;
178
179 curbuf = buf;
180 bleft = len;
181 while (bleft > 0) {
182 /*
183 * If there is no space left in the current buffer, we have
184 * filled everything up, so return. If we filled the previous
185 * buffer we would already have moved to the next one.
186 */
187 if (pi->bufferBytes[pi->curbuf] == VIOCHAR_MAX_DATA) {
188 hvlog ("\n\rviocons: No overflow buffer available for memcpy().\n");
189 pi->bufferOverflow++;
190 pi->overflowMessage = 1;
191 break;
192 }
193
194 /*
195 * Turn on the "used" bit for this buffer. If it's already on,
196 * that's fine.
197 */
198 set_bit(pi->curbuf, &pi->used);
199
200 /*
201 * See if this buffer has been allocated. If not, allocate it.
202 */
203 if (pi->buffer[pi->curbuf] == NULL) {
204 pi->buffer[pi->curbuf] =
205 kmalloc(VIOCHAR_MAX_DATA, GFP_ATOMIC);
206 if (pi->buffer[pi->curbuf] == NULL) {
207 hvlog("\n\rviocons: kmalloc failed allocating spaces for buffer %d.",
208 pi->curbuf);
209 break;
210 }
211 }
212
213 /* Figure out how much we can copy into this buffer. */
214 if (bleft < (VIOCHAR_MAX_DATA - pi->bufferBytes[pi->curbuf]))
215 curlen = bleft;
216 else
217 curlen = VIOCHAR_MAX_DATA - pi->bufferBytes[pi->curbuf];
218
219 /* Copy the data into the buffer. */
220 memcpy(pi->buffer[pi->curbuf] + pi->bufferBytes[pi->curbuf],
221 curbuf, curlen);
222
223 pi->bufferBytes[pi->curbuf] += curlen;
224 curbuf += curlen;
225 bleft -= curlen;
226
227 /*
228 * Now see if we've filled this buffer. If not then
229 * we'll try to use it again later. If we've filled it
230 * up then we'll advance the curbuf to the next in the
231 * circular queue.
232 */
233 if (pi->bufferBytes[pi->curbuf] == VIOCHAR_MAX_DATA) {
234 nextbuf = (pi->curbuf + 1) % VIOCHAR_NUM_BUF;
235 /*
236 * Move to the next buffer if it hasn't been used yet
237 */
238 if (test_bit(nextbuf, &pi->used) == 0)
239 pi->curbuf = nextbuf;
240 }
241 }
242 return len - bleft;
243}
244
245/*
246 * Send pending data
247 *
248 * NOTE: Don't use printk in here because it gets nastily recursive.
249 * hvlog can be used to log to the hypervisor buffer
250 */
251static void send_buffers(struct port_info *pi)
252{
253 HvLpEvent_Rc hvrc;
254 int nextbuf;
255 struct viocharlpevent *viochar;
256 unsigned long flags;
257
258 spin_lock_irqsave(&consolelock, flags);
259
260 viochar = (struct viocharlpevent *)
261 vio_get_event_buffer(viomajorsubtype_chario);
262
263 /* Make sure we got a buffer */
264 if (viochar == NULL) {
265 hvlog("\n\rviocons: Can't get viochar buffer in sendBuffers().");
266 spin_unlock_irqrestore(&consolelock, flags);
267 return;
268 }
269
270 if (pi->used == 0) {
271 hvlog("\n\rviocons: in sendbuffers(), but no buffers used.\n");
272 vio_free_event_buffer(viomajorsubtype_chario, viochar);
273 spin_unlock_irqrestore(&consolelock, flags);
274 return;
275 }
276
277 /*
278 * curbuf points to the buffer we're filling. We want to
279 * start sending AFTER this one.
280 */
281 nextbuf = (pi->curbuf + 1) % VIOCHAR_NUM_BUF;
282
283 /*
284 * Loop until we find a buffer with the used bit on
285 */
286 while (test_bit(nextbuf, &pi->used) == 0)
287 nextbuf = (nextbuf + 1) % VIOCHAR_NUM_BUF;
288
289 initDataEvent(viochar, pi->lp);
290
291 /*
292 * While we have buffers with data, and our send window
293 * is open, send them
294 */
295 while ((test_bit(nextbuf, &pi->used)) &&
296 ((pi->seq - pi->ack) < VIOCHAR_WINDOW)) {
297 viochar->len = pi->bufferBytes[nextbuf];
298 viochar->event.xCorrelationToken = pi->seq++;
299 viochar->event.xSizeMinus1 =
300 offsetof(struct viocharlpevent, data) + viochar->len;
301
302 memcpy(viochar->data, pi->buffer[nextbuf], viochar->len);
303
304 hvrc = HvCallEvent_signalLpEvent(&viochar->event);
305 if (hvrc) {
306 /*
307 * MUST unlock the spinlock before doing a printk
308 */
309 vio_free_event_buffer(viomajorsubtype_chario, viochar);
310 spin_unlock_irqrestore(&consolelock, flags);
311
312 printk(VIOCONS_KERN_WARN
313 "error sending event! return code %d\n",
314 (int)hvrc);
315 return;
316 }
317
318 /*
319 * clear the used bit, zero the number of bytes in
320 * this buffer, and move to the next buffer
321 */
322 clear_bit(nextbuf, &pi->used);
323 pi->bufferBytes[nextbuf] = 0;
324 nextbuf = (nextbuf + 1) % VIOCHAR_NUM_BUF;
325 }
326
327 /*
328 * If we have emptied all the buffers, start at 0 again.
329 * this will re-use any allocated buffers
330 */
331 if (pi->used == 0) {
332 pi->curbuf = 0;
333
334 if (pi->overflowMessage)
335 pi->overflowMessage = 0;
336
337 if (pi->tty) {
338 tty_wakeup(pi->tty);
339 }
340 }
341
342 vio_free_event_buffer(viomajorsubtype_chario, viochar);
343 spin_unlock_irqrestore(&consolelock, flags);
344}
345
346/*
347 * Our internal writer. Gets called both from the console device and
348 * the tty device. the tty pointer will be NULL if called from the console.
349 * Return total number of bytes "written".
350 *
351 * NOTE: Don't use printk in here because it gets nastily recursive. hvlog
352 * can be used to log to the hypervisor buffer
353 */
354static int internal_write(struct port_info *pi, const char *buf, size_t len)
355{
356 HvLpEvent_Rc hvrc;
357 size_t bleft;
358 size_t curlen;
359 const char *curbuf;
360 unsigned long flags;
361 struct viocharlpevent *viochar;
362
363 /*
364 * Write to the hvlog of inbound data are now done prior to
365 * calling internal_write() since internal_write() is only called in
366 * the event that an lp event path is active, which isn't the case for
367 * logging attempts prior to console initialization.
368 *
369 * If there is already data queued for this port, send it prior to
370 * attempting to send any new data.
371 */
372 if (pi->used)
373 send_buffers(pi);
374
375 spin_lock_irqsave(&consolelock, flags);
376
377 viochar = vio_get_event_buffer(viomajorsubtype_chario);
378 if (viochar == NULL) {
379 spin_unlock_irqrestore(&consolelock, flags);
380 hvlog("\n\rviocons: Can't get vio buffer in internal_write().");
381 return -EAGAIN;
382 }
383 initDataEvent(viochar, pi->lp);
384
385 curbuf = buf;
386 bleft = len;
387
388 while ((bleft > 0) && (pi->used == 0) &&
389 ((pi->seq - pi->ack) < VIOCHAR_WINDOW)) {
390 if (bleft > VIOCHAR_MAX_DATA)
391 curlen = VIOCHAR_MAX_DATA;
392 else
393 curlen = bleft;
394
395 viochar->event.xCorrelationToken = pi->seq++;
396 memcpy(viochar->data, curbuf, curlen);
397 viochar->len = curlen;
398 viochar->event.xSizeMinus1 =
399 offsetof(struct viocharlpevent, data) + curlen;
400
401 hvrc = HvCallEvent_signalLpEvent(&viochar->event);
402 if (hvrc) {
403 hvlog("viocons: error sending event! %d\n", (int)hvrc);
404 goto out;
405 }
406 curbuf += curlen;
407 bleft -= curlen;
408 }
409
410 /* If we didn't send it all, buffer as much of it as we can. */
411 if (bleft > 0)
412 bleft -= buffer_add(pi, curbuf, bleft);
413out:
414 vio_free_event_buffer(viomajorsubtype_chario, viochar);
415 spin_unlock_irqrestore(&consolelock, flags);
416 return len - bleft;
417}
418
419static struct port_info *get_port_data(struct tty_struct *tty)
420{
421 unsigned long flags;
422 struct port_info *pi;
423
424 spin_lock_irqsave(&consolelock, flags);
425 if (tty) {
426 pi = (struct port_info *)tty->driver_data;
427 if (!pi || viotty_paranoia_check(pi, tty->name,
428 "get_port_data")) {
429 pi = NULL;
430 }
431 } else
432 /*
433 * If this is the console device, use the lp from
434 * the first port entry
435 */
436 pi = &port_info[0];
437 spin_unlock_irqrestore(&consolelock, flags);
438 return pi;
439}
440
441/*
442 * Initialize the common fields in a charLpEvent
443 */
444static void initDataEvent(struct viocharlpevent *viochar, HvLpIndex lp)
445{
446 struct HvLpEvent *hev = &viochar->event;
447
448 memset(viochar, 0, sizeof(struct viocharlpevent));
449
450 hev->flags = HV_LP_EVENT_VALID | HV_LP_EVENT_DEFERRED_ACK |
451 HV_LP_EVENT_INT;
452 hev->xType = HvLpEvent_Type_VirtualIo;
453 hev->xSubtype = viomajorsubtype_chario | viochardata;
454 hev->xSourceLp = HvLpConfig_getLpIndex();
455 hev->xTargetLp = lp;
456 hev->xSizeMinus1 = sizeof(struct viocharlpevent);
457 hev->xSourceInstanceId = viopath_sourceinst(lp);
458 hev->xTargetInstanceId = viopath_targetinst(lp);
459}
460
461/*
462 * early console device write
463 */
464static void viocons_write_early(struct console *co, const char *s, unsigned count)
465{
466 hvlogOutput(s, count);
467}
468
469/*
470 * console device write
471 */
472static void viocons_write(struct console *co, const char *s, unsigned count)
473{
474 int index;
475 int begin;
476 struct port_info *pi;
477
478 static const char cr = '\r';
479
480 /*
481 * Check port data first because the target LP might be valid but
482 * simply not active, in which case we want to hvlog the output.
483 */
484 pi = get_port_data(NULL);
485 if (pi == NULL) {
486 hvlog("\n\rviocons_write: unable to get port data.");
487 return;
488 }
489
490 hvlogOutput(s, count);
491
492 if (!viopath_isactive(pi->lp))
493 return;
494
495 /*
496 * Any newline character found will cause a
497 * carriage return character to be emitted as well.
498 */
499 begin = 0;
500 for (index = 0; index < count; index++) {
501 if (s[index] == '\n') {
502 /*
503 * Newline found. Print everything up to and
504 * including the newline
505 */
506 internal_write(pi, &s[begin], index - begin + 1);
507 begin = index + 1;
508 /* Emit a carriage return as well */
509 internal_write(pi, &cr, 1);
510 }
511 }
512
513 /* If any characters left to write, write them now */
514 if ((index - begin) > 0)
515 internal_write(pi, &s[begin], index - begin);
516}
517
518/*
519 * Work out the device associate with this console
520 */
521static struct tty_driver *viocons_device(struct console *c, int *index)
522{
523 *index = c->index;
524 return viotty_driver;
525}
526
527/*
528 * console device I/O methods
529 */
530static struct console viocons_early = {
531 .name = "viocons",
532 .write = viocons_write_early,
533 .flags = CON_PRINTBUFFER,
534 .index = -1,
535};
536
537static struct console viocons = {
538 .name = "viocons",
539 .write = viocons_write,
540 .device = viocons_device,
541 .flags = CON_PRINTBUFFER,
542 .index = -1,
543};
544
545/*
546 * TTY Open method
547 */
548static int viotty_open(struct tty_struct *tty, struct file *filp)
549{
550 int port;
551 unsigned long flags;
552 struct port_info *pi;
553
554 port = tty->index;
555
556 if ((port < 0) || (port >= VTTY_PORTS))
557 return -ENODEV;
558
559 spin_lock_irqsave(&consolelock, flags);
560
561 pi = &port_info[port];
562 /* If some other TTY is already connected here, reject the open */
563 if ((pi->tty) && (pi->tty != tty)) {
564 spin_unlock_irqrestore(&consolelock, flags);
565 printk(VIOCONS_KERN_WARN
566 "attempt to open device twice from different ttys\n");
567 return -EBUSY;
568 }
569 tty->driver_data = pi;
570 pi->tty = tty;
571 spin_unlock_irqrestore(&consolelock, flags);
572
573 return 0;
574}
575
576/*
577 * TTY Close method
578 */
579static void viotty_close(struct tty_struct *tty, struct file *filp)
580{
581 unsigned long flags;
582 struct port_info *pi;
583
584 spin_lock_irqsave(&consolelock, flags);
585 pi = (struct port_info *)tty->driver_data;
586
587 if (!pi || viotty_paranoia_check(pi, tty->name, "viotty_close")) {
588 spin_unlock_irqrestore(&consolelock, flags);
589 return;
590 }
591 if (tty->count == 1)
592 pi->tty = NULL;
593 spin_unlock_irqrestore(&consolelock, flags);
594}
595
596/*
597 * TTY Write method
598 */
599static int viotty_write(struct tty_struct *tty, const unsigned char *buf,
600 int count)
601{
602 struct port_info *pi;
603
604 pi = get_port_data(tty);
605 if (pi == NULL) {
606 hvlog("\n\rviotty_write: no port data.");
607 return -ENODEV;
608 }
609
610 if (viochar_is_console(pi))
611 hvlogOutput(buf, count);
612
613 /*
614 * If the path to this LP is closed, don't bother doing anything more.
615 * just dump the data on the floor and return count. For some reason
616 * some user level programs will attempt to probe available tty's and
617 * they'll attempt a viotty_write on an invalid port which maps to an
618 * invalid target lp. If this is the case then ignore the
619 * viotty_write call and, since the viopath isn't active to this
620 * partition, return count.
621 */
622 if (!viopath_isactive(pi->lp))
623 return count;
624
625 return internal_write(pi, buf, count);
626}
627
628/*
629 * TTY put_char method
630 */
631static int viotty_put_char(struct tty_struct *tty, unsigned char ch)
632{
633 struct port_info *pi;
634
635 pi = get_port_data(tty);
636 if (pi == NULL)
637 return 0;
638
639 /* This will append '\r' as well if the char is '\n' */
640 if (viochar_is_console(pi))
641 hvlogOutput(&ch, 1);
642
643 if (viopath_isactive(pi->lp))
644 internal_write(pi, &ch, 1);
645 return 1;
646}
647
648/*
649 * TTY write_room method
650 */
651static int viotty_write_room(struct tty_struct *tty)
652{
653 int i;
654 int room = 0;
655 struct port_info *pi;
656 unsigned long flags;
657
658 spin_lock_irqsave(&consolelock, flags);
659 pi = (struct port_info *)tty->driver_data;
660 if (!pi || viotty_paranoia_check(pi, tty->name, "viotty_write_room")) {
661 spin_unlock_irqrestore(&consolelock, flags);
662 return 0;
663 }
664
665 /* If no buffers are used, return the max size. */
666 if (pi->used == 0) {
667 spin_unlock_irqrestore(&consolelock, flags);
668 return VIOCHAR_MAX_DATA * VIOCHAR_NUM_BUF;
669 }
670
671 /*
672 * We retain the spinlock because we want to get an accurate
673 * count and it can change on us between each operation if we
674 * don't hold the spinlock.
675 */
676 for (i = 0; ((i < VIOCHAR_NUM_BUF) && (room < VIOCHAR_MAX_DATA)); i++)
677 room += (VIOCHAR_MAX_DATA - pi->bufferBytes[i]);
678 spin_unlock_irqrestore(&consolelock, flags);
679
680 if (room > VIOCHAR_MAX_DATA)
681 room = VIOCHAR_MAX_DATA;
682 return room;
683}
684
685/*
686 * TTY chars_in_buffer method
687 */
688static int viotty_chars_in_buffer(struct tty_struct *tty)
689{
690 return 0;
691}
692
693static int viotty_ioctl(struct tty_struct *tty, struct file *file,
694 unsigned int cmd, unsigned long arg)
695{
696 switch (cmd) {
697 /*
698 * the ioctls below read/set the flags usually shown in the leds
699 * don't use them - they will go away without warning
700 */
701 case KDGETLED:
702 case KDGKBLED:
703 return put_user(0, (char *)arg);
704
705 case KDSKBLED:
706 return 0;
707 }
708 /* FIXME: WTF is this being called for ??? */
709 lock_kernel();
710 ret = n_tty_ioctl(tty, file, cmd, arg);
711 unlock_kernel();
712 return ret;
713}
714
715/*
716 * Handle an open charLpEvent. Could be either interrupt or ack
717 */
718static void vioHandleOpenEvent(struct HvLpEvent *event)
719{
720 unsigned long flags;
721 struct viocharlpevent *cevent = (struct viocharlpevent *)event;
722 u8 port = cevent->virtual_device;
723 struct port_info *pi;
724 int reject = 0;
725
726 if (hvlpevent_is_ack(event)) {
727 if (port >= VTTY_PORTS)
728 return;
729
730 spin_lock_irqsave(&consolelock, flags);
731 /* Got the lock, don't cause console output */
732
733 pi = &port_info[port];
734 if (event->xRc == HvLpEvent_Rc_Good) {
735 pi->seq = pi->ack = 0;
736 /*
737 * This line allows connections from the primary
738 * partition but once one is connected from the
739 * primary partition nothing short of a reboot
740 * of linux will allow access from the hosting
741 * partition again without a required iSeries fix.
742 */
743 pi->lp = event->xTargetLp;
744 }
745
746 spin_unlock_irqrestore(&consolelock, flags);
747 if (event->xRc != HvLpEvent_Rc_Good)
748 printk(VIOCONS_KERN_WARN
749 "handle_open_event: event->xRc == (%d).\n",
750 event->xRc);
751
752 if (event->xCorrelationToken != 0) {
753 atomic_t *aptr= (atomic_t *)event->xCorrelationToken;
754 atomic_set(aptr, 1);
755 } else
756 printk(VIOCONS_KERN_WARN
757 "weird...got open ack without atomic\n");
758 return;
759 }
760
761 /* This had better require an ack, otherwise complain */
762 if (!hvlpevent_need_ack(event)) {
763 printk(VIOCONS_KERN_WARN "viocharopen without ack bit!\n");
764 return;
765 }
766
767 spin_lock_irqsave(&consolelock, flags);
768 /* Got the lock, don't cause console output */
769
770 /* Make sure this is a good virtual tty */
771 if (port >= VTTY_PORTS) {
772 event->xRc = HvLpEvent_Rc_SubtypeError;
773 cevent->subtype_result_code = viorc_openRejected;
774 /*
775 * Flag state here since we can't printk while holding
776 * a spinlock.
777 */
778 reject = 1;
779 } else {
780 pi = &port_info[port];
781 if ((pi->lp != HvLpIndexInvalid) &&
782 (pi->lp != event->xSourceLp)) {
783 /*
784 * If this is tty is already connected to a different
785 * partition, fail.
786 */
787 event->xRc = HvLpEvent_Rc_SubtypeError;
788 cevent->subtype_result_code = viorc_openRejected;
789 reject = 2;
790 } else {
791 pi->lp = event->xSourceLp;
792 event->xRc = HvLpEvent_Rc_Good;
793 cevent->subtype_result_code = viorc_good;
794 pi->seq = pi->ack = 0;
795 reject = 0;
796 }
797 }
798
799 spin_unlock_irqrestore(&consolelock, flags);
800
801 if (reject == 1)
802 printk(VIOCONS_KERN_WARN "open rejected: bad virtual tty.\n");
803 else if (reject == 2)
804 printk(VIOCONS_KERN_WARN
805 "open rejected: console in exclusive use by another partition.\n");
806
807 /* Return the acknowledgement */
808 HvCallEvent_ackLpEvent(event);
809}
810
811/*
812 * Handle a close charLpEvent. This should ONLY be an Interrupt because the
813 * virtual console should never actually issue a close event to the hypervisor
814 * because the virtual console never goes away. A close event coming from the
815 * hypervisor simply means that there are no client consoles connected to the
816 * virtual console.
817 *
818 * Regardless of the number of connections masqueraded on the other side of
819 * the hypervisor ONLY ONE close event should be called to accompany the ONE
820 * open event that is called. The close event should ONLY be called when NO
821 * MORE connections (masqueraded or not) exist on the other side of the
822 * hypervisor.
823 */
824static void vioHandleCloseEvent(struct HvLpEvent *event)
825{
826 unsigned long flags;
827 struct viocharlpevent *cevent = (struct viocharlpevent *)event;
828 u8 port = cevent->virtual_device;
829
830 if (hvlpevent_is_int(event)) {
831 if (port >= VTTY_PORTS) {
832 printk(VIOCONS_KERN_WARN
833 "close message from invalid virtual device.\n");
834 return;
835 }
836
837 /* For closes, just mark the console partition invalid */
838 spin_lock_irqsave(&consolelock, flags);
839 /* Got the lock, don't cause console output */
840
841 if (port_info[port].lp == event->xSourceLp)
842 port_info[port].lp = HvLpIndexInvalid;
843
844 spin_unlock_irqrestore(&consolelock, flags);
845 printk(VIOCONS_KERN_INFO "close from %d\n", event->xSourceLp);
846 } else
847 printk(VIOCONS_KERN_WARN
848 "got unexpected close acknowlegement\n");
849}
850
851/*
852 * Handle a config charLpEvent. Could be either interrupt or ack
853 */
854static void vioHandleConfig(struct HvLpEvent *event)
855{
856 struct viocharlpevent *cevent = (struct viocharlpevent *)event;
857
858 HvCall_writeLogBuffer(cevent->data, cevent->len);
859
860 if (cevent->data[0] == 0x01)
861 printk(VIOCONS_KERN_INFO "window resized to %d: %d: %d: %d\n",
862 cevent->data[1], cevent->data[2],
863 cevent->data[3], cevent->data[4]);
864 else
865 printk(VIOCONS_KERN_WARN "unknown config event\n");
866}
867
868/*
869 * Handle a data charLpEvent.
870 */
871static void vioHandleData(struct HvLpEvent *event)
872{
873 struct tty_struct *tty;
874 unsigned long flags;
875 struct viocharlpevent *cevent = (struct viocharlpevent *)event;
876 struct port_info *pi;
877 int index;
878 int num_pushed;
879 u8 port = cevent->virtual_device;
880
881 if (port >= VTTY_PORTS) {
882 printk(VIOCONS_KERN_WARN "data on invalid virtual device %d\n",
883 port);
884 return;
885 }
886
887 /*
888 * Hold the spinlock so that we don't take an interrupt that
889 * changes tty between the time we fetch the port_info
890 * pointer and the time we paranoia check.
891 */
892 spin_lock_irqsave(&consolelock, flags);
893 pi = &port_info[port];
894
895 /*
896 * Change 05/01/2003 - Ryan Arnold: If a partition other than
897 * the current exclusive partition tries to send us data
898 * events then just drop them on the floor because we don't
899 * want his stinking data. He isn't authorized to receive
900 * data because he wasn't the first one to get the console,
901 * therefore he shouldn't be allowed to send data either.
902 * This will work without an iSeries fix.
903 */
904 if (pi->lp != event->xSourceLp) {
905 spin_unlock_irqrestore(&consolelock, flags);
906 return;
907 }
908
909 tty = pi->tty;
910 if (tty == NULL) {
911 spin_unlock_irqrestore(&consolelock, flags);
912 printk(VIOCONS_KERN_WARN "no tty for virtual device %d\n",
913 port);
914 return;
915 }
916
917 if (tty->magic != TTY_MAGIC) {
918 spin_unlock_irqrestore(&consolelock, flags);
919 printk(VIOCONS_KERN_WARN "tty bad magic\n");
920 return;
921 }
922
923 /*
924 * Just to be paranoid, make sure the tty points back to this port
925 */
926 pi = (struct port_info *)tty->driver_data;
927 if (!pi || viotty_paranoia_check(pi, tty->name, "vioHandleData")) {
928 spin_unlock_irqrestore(&consolelock, flags);
929 return;
930 }
931 spin_unlock_irqrestore(&consolelock, flags);
932
933 /*
934 * Change 07/21/2003 - Ryan Arnold: functionality added to
935 * support sysrq utilizing ^O as the sysrq key. The sysrq
936 * functionality will only work if built into the kernel and
937 * then only if sysrq is enabled through the proc filesystem.
938 */
939 num_pushed = 0;
940 for (index = 0; index < cevent->len; index++) {
941 /*
942 * Will be optimized away if !CONFIG_MAGIC_SYSRQ:
943 */
944 if (sysrq_on()) {
945 /* 0x0f is the ascii character for ^O */
946 if (cevent->data[index] == '\x0f') {
947 vio_sysrq_pressed = 1;
948 /*
949 * continue because we don't want to add
950 * the sysrq key into the data string.
951 */
952 continue;
953 } else if (vio_sysrq_pressed) {
954 handle_sysrq(cevent->data[index], tty);
955 vio_sysrq_pressed = 0;
956 /*
957 * continue because we don't want to add
958 * the sysrq sequence into the data string.
959 */
960 continue;
961 }
962 }
963 /*
964 * The sysrq sequence isn't included in this check if
965 * sysrq is enabled and compiled into the kernel because
966 * the sequence will never get inserted into the buffer.
967 * Don't attempt to copy more data into the buffer than we
968 * have room for because it would fail without indication.
969 */
970 if(tty_insert_flip_char(tty, cevent->data[index], TTY_NORMAL) == 0) {
971 printk(VIOCONS_KERN_WARN "input buffer overflow!\n");
972 break;
973 }
974 num_pushed++;
975 }
976
977 if (num_pushed)
978 tty_flip_buffer_push(tty);
979}
980
981/*
982 * Handle an ack charLpEvent.
983 */
984static void vioHandleAck(struct HvLpEvent *event)
985{
986 struct viocharlpevent *cevent = (struct viocharlpevent *)event;
987 unsigned long flags;
988 u8 port = cevent->virtual_device;
989
990 if (port >= VTTY_PORTS) {
991 printk(VIOCONS_KERN_WARN "data on invalid virtual device\n");
992 return;
993 }
994
995 spin_lock_irqsave(&consolelock, flags);
996 port_info[port].ack = event->xCorrelationToken;
997 spin_unlock_irqrestore(&consolelock, flags);
998
999 if (port_info[port].used)
1000 send_buffers(&port_info[port]);
1001}
1002
1003/*
1004 * Handle charLpEvents and route to the appropriate routine
1005 */
1006static void vioHandleCharEvent(struct HvLpEvent *event)
1007{
1008 int charminor;
1009
1010 if (event == NULL)
1011 return;
1012
1013 charminor = event->xSubtype & VIOMINOR_SUBTYPE_MASK;
1014 switch (charminor) {
1015 case viocharopen:
1016 vioHandleOpenEvent(event);
1017 break;
1018 case viocharclose:
1019 vioHandleCloseEvent(event);
1020 break;
1021 case viochardata:
1022 vioHandleData(event);
1023 break;
1024 case viocharack:
1025 vioHandleAck(event);
1026 break;
1027 case viocharconfig:
1028 vioHandleConfig(event);
1029 break;
1030 default:
1031 if (hvlpevent_is_int(event) && hvlpevent_need_ack(event)) {
1032 event->xRc = HvLpEvent_Rc_InvalidSubtype;
1033 HvCallEvent_ackLpEvent(event);
1034 }
1035 }
1036}
1037
1038/*
1039 * Send an open event
1040 */
1041static int send_open(HvLpIndex remoteLp, void *sem)
1042{
1043 return HvCallEvent_signalLpEventFast(remoteLp,
1044 HvLpEvent_Type_VirtualIo,
1045 viomajorsubtype_chario | viocharopen,
1046 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
1047 viopath_sourceinst(remoteLp),
1048 viopath_targetinst(remoteLp),
1049 (u64)(unsigned long)sem, VIOVERSION << 16,
1050 0, 0, 0, 0);
1051}
1052
1053static const struct tty_operations serial_ops = {
1054 .open = viotty_open,
1055 .close = viotty_close,
1056 .write = viotty_write,
1057 .put_char = viotty_put_char,
1058 .write_room = viotty_write_room,
1059 .chars_in_buffer = viotty_chars_in_buffer,
1060 .ioctl = viotty_ioctl,
1061};
1062
1063static int __init viocons_init2(void)
1064{
1065 atomic_t wait_flag;
1066 int rc;
1067
1068 if (!firmware_has_feature(FW_FEATURE_ISERIES))
1069 return -ENODEV;
1070
1071 /* +2 for fudge */
1072 rc = viopath_open(HvLpConfig_getPrimaryLpIndex(),
1073 viomajorsubtype_chario, VIOCHAR_WINDOW + 2);
1074 if (rc)
1075 printk(VIOCONS_KERN_WARN "error opening to primary %d\n", rc);
1076
1077 if (viopath_hostLp == HvLpIndexInvalid)
1078 vio_set_hostlp();
1079
1080 /*
1081 * And if the primary is not the same as the hosting LP, open to the
1082 * hosting lp
1083 */
1084 if ((viopath_hostLp != HvLpIndexInvalid) &&
1085 (viopath_hostLp != HvLpConfig_getPrimaryLpIndex())) {
1086 printk(VIOCONS_KERN_INFO "open path to hosting (%d)\n",
1087 viopath_hostLp);
1088 rc = viopath_open(viopath_hostLp, viomajorsubtype_chario,
1089 VIOCHAR_WINDOW + 2); /* +2 for fudge */
1090 if (rc)
1091 printk(VIOCONS_KERN_WARN
1092 "error opening to partition %d: %d\n",
1093 viopath_hostLp, rc);
1094 }
1095
1096 if (vio_setHandler(viomajorsubtype_chario, vioHandleCharEvent) < 0)
1097 printk(VIOCONS_KERN_WARN
1098 "error seting handler for console events!\n");
1099
1100 /*
1101 * First, try to open the console to the hosting lp.
1102 * Wait on a semaphore for the response.
1103 */
1104 atomic_set(&wait_flag, 0);
1105 if ((viopath_isactive(viopath_hostLp)) &&
1106 (send_open(viopath_hostLp, (void *)&wait_flag) == 0)) {
1107 printk(VIOCONS_KERN_INFO "hosting partition %d\n",
1108 viopath_hostLp);
1109 while (atomic_read(&wait_flag) == 0)
1110 mb();
1111 atomic_set(&wait_flag, 0);
1112 }
1113
1114 /*
1115 * If we don't have an active console, try the primary
1116 */
1117 if ((!viopath_isactive(port_info[0].lp)) &&
1118 (viopath_isactive(HvLpConfig_getPrimaryLpIndex())) &&
1119 (send_open(HvLpConfig_getPrimaryLpIndex(), (void *)&wait_flag)
1120 == 0)) {
1121 printk(VIOCONS_KERN_INFO "opening console to primary partition\n");
1122 while (atomic_read(&wait_flag) == 0)
1123 mb();
1124 }
1125
1126 /* Initialize the tty_driver structure */
1127 viotty_driver = alloc_tty_driver(VTTY_PORTS);
1128 viotty_driver->owner = THIS_MODULE;
1129 viotty_driver->driver_name = "vioconsole";
1130 viotty_driver->name = "tty";
1131 viotty_driver->name_base = 1;
1132 viotty_driver->major = TTY_MAJOR;
1133 viotty_driver->minor_start = 1;
1134 viotty_driver->type = TTY_DRIVER_TYPE_CONSOLE;
1135 viotty_driver->subtype = 1;
1136 viotty_driver->init_termios = tty_std_termios;
1137 viotty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_RESET_TERMIOS;
1138 tty_set_operations(viotty_driver, &serial_ops);
1139
1140 if (tty_register_driver(viotty_driver)) {
1141 printk(VIOCONS_KERN_WARN "couldn't register console driver\n");
1142 put_tty_driver(viotty_driver);
1143 viotty_driver = NULL;
1144 }
1145
1146 unregister_console(&viocons_early);
1147 register_console(&viocons);
1148
1149 return 0;
1150}
1151
1152static int __init viocons_init(void)
1153{
1154 int i;
1155
1156 if (!firmware_has_feature(FW_FEATURE_ISERIES))
1157 return -ENODEV;
1158
1159 printk(VIOCONS_KERN_INFO "registering console\n");
1160 for (i = 0; i < VTTY_PORTS; i++) {
1161 port_info[i].lp = HvLpIndexInvalid;
1162 port_info[i].magic = VIOTTY_MAGIC;
1163 }
1164 HvCall_setLogBufferFormatAndCodepage(HvCall_LogBuffer_ASCII, 437);
1165 add_preferred_console("viocons", 0, NULL);
1166 register_console(&viocons_early);
1167 return 0;
1168}
1169
1170console_initcall(viocons_init);
1171module_init(viocons_init2);
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 82a51f38a546..1bc00c9d860d 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -916,7 +916,6 @@ int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int lines)
916 ws.ws_col = vc->vc_cols; 916 ws.ws_col = vc->vc_cols;
917 ws.ws_ypixel = vc->vc_scan_lines; 917 ws.ws_ypixel = vc->vc_scan_lines;
918 918
919 mutex_lock(&vc->vc_tty->termios_mutex);
920 spin_lock_irq(&vc->vc_tty->ctrl_lock); 919 spin_lock_irq(&vc->vc_tty->ctrl_lock);
921 if ((ws.ws_row != cws->ws_row || ws.ws_col != cws->ws_col)) 920 if ((ws.ws_row != cws->ws_row || ws.ws_col != cws->ws_col))
922 pgrp = get_pid(vc->vc_tty->pgrp); 921 pgrp = get_pid(vc->vc_tty->pgrp);
@@ -926,7 +925,6 @@ int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int lines)
926 put_pid(pgrp); 925 put_pid(pgrp);
927 } 926 }
928 *cws = ws; 927 *cws = ws;
929 mutex_unlock(&vc->vc_tty->termios_mutex);
930 } 928 }
931 929
932 if (CON_IS_VISIBLE(vc)) 930 if (CON_IS_VISIBLE(vc))
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 8d6a3ff02672..8a67f16987db 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -825,6 +825,9 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
825 policy->user_policy.min = policy->cpuinfo.min_freq; 825 policy->user_policy.min = policy->cpuinfo.min_freq;
826 policy->user_policy.max = policy->cpuinfo.max_freq; 826 policy->user_policy.max = policy->cpuinfo.max_freq;
827 827
828 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
829 CPUFREQ_START, policy);
830
828#ifdef CONFIG_SMP 831#ifdef CONFIG_SMP
829 832
830#ifdef CONFIG_HOTPLUG_CPU 833#ifdef CONFIG_HOTPLUG_CPU
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index fe565ee43757..ac0bbf2d234f 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -333,7 +333,7 @@ static void dbs_check_cpu(int cpu)
333{ 333{
334 unsigned int idle_ticks, up_idle_ticks, down_idle_ticks; 334 unsigned int idle_ticks, up_idle_ticks, down_idle_ticks;
335 unsigned int tmp_idle_ticks, total_idle_ticks; 335 unsigned int tmp_idle_ticks, total_idle_ticks;
336 unsigned int freq_step; 336 unsigned int freq_target;
337 unsigned int freq_down_sampling_rate; 337 unsigned int freq_down_sampling_rate;
338 struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, cpu); 338 struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
339 struct cpufreq_policy *policy; 339 struct cpufreq_policy *policy;
@@ -383,13 +383,13 @@ static void dbs_check_cpu(int cpu)
383 if (this_dbs_info->requested_freq == policy->max) 383 if (this_dbs_info->requested_freq == policy->max)
384 return; 384 return;
385 385
386 freq_step = (dbs_tuners_ins.freq_step * policy->max) / 100; 386 freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100;
387 387
388 /* max freq cannot be less than 100. But who knows.... */ 388 /* max freq cannot be less than 100. But who knows.... */
389 if (unlikely(freq_step == 0)) 389 if (unlikely(freq_target == 0))
390 freq_step = 5; 390 freq_target = 5;
391 391
392 this_dbs_info->requested_freq += freq_step; 392 this_dbs_info->requested_freq += freq_target;
393 if (this_dbs_info->requested_freq > policy->max) 393 if (this_dbs_info->requested_freq > policy->max)
394 this_dbs_info->requested_freq = policy->max; 394 this_dbs_info->requested_freq = policy->max;
395 395
@@ -425,19 +425,19 @@ static void dbs_check_cpu(int cpu)
425 /* 425 /*
426 * if we are already at the lowest speed then break out early 426 * if we are already at the lowest speed then break out early
427 * or if we 'cannot' reduce the speed as the user might want 427 * or if we 'cannot' reduce the speed as the user might want
428 * freq_step to be zero 428 * freq_target to be zero
429 */ 429 */
430 if (this_dbs_info->requested_freq == policy->min 430 if (this_dbs_info->requested_freq == policy->min
431 || dbs_tuners_ins.freq_step == 0) 431 || dbs_tuners_ins.freq_step == 0)
432 return; 432 return;
433 433
434 freq_step = (dbs_tuners_ins.freq_step * policy->max) / 100; 434 freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100;
435 435
436 /* max freq cannot be less than 100. But who knows.... */ 436 /* max freq cannot be less than 100. But who knows.... */
437 if (unlikely(freq_step == 0)) 437 if (unlikely(freq_target == 0))
438 freq_step = 5; 438 freq_target = 5;
439 439
440 this_dbs_info->requested_freq -= freq_step; 440 this_dbs_info->requested_freq -= freq_target;
441 if (this_dbs_info->requested_freq < policy->min) 441 if (this_dbs_info->requested_freq < policy->min)
442 this_dbs_info->requested_freq = policy->min; 442 this_dbs_info->requested_freq = policy->min;
443 443
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 42a107fe9233..2d637e0fbc03 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -27,8 +27,8 @@
27#include <crypto/authenc.h> 27#include <crypto/authenc.h>
28#include <crypto/scatterwalk.h> 28#include <crypto/scatterwalk.h>
29 29
30#include <asm/arch/npe.h> 30#include <mach/npe.h>
31#include <asm/arch/qmgr.h> 31#include <mach/qmgr.h>
32 32
33#define MAX_KEYLEN 32 33#define MAX_KEYLEN 32
34 34
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c
index a52156e56886..bc8c6e3470ca 100644
--- a/drivers/dma/ioat_dma.c
+++ b/drivers/dma/ioat_dma.c
@@ -551,7 +551,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
551 /* write address into NextDescriptor field of last desc in chain */ 551 /* write address into NextDescriptor field of last desc in chain */
552 to_ioat_desc(ioat_chan->used_desc.prev)->hw->next = 552 to_ioat_desc(ioat_chan->used_desc.prev)->hw->next =
553 first->async_tx.phys; 553 first->async_tx.phys;
554 __list_splice(&new_chain, ioat_chan->used_desc.prev); 554 list_splice_tail(&new_chain, &ioat_chan->used_desc);
555 555
556 ioat_chan->dmacount += desc_count; 556 ioat_chan->dmacount += desc_count;
557 ioat_chan->pending += desc_count; 557 ioat_chan->pending += desc_count;
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index 85bfeba4d85e..71fba82462cb 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -33,7 +33,7 @@
33#include <linux/memory.h> 33#include <linux/memory.h>
34#include <linux/ioport.h> 34#include <linux/ioport.h>
35 35
36#include <asm/arch/adma.h> 36#include <mach/adma.h>
37 37
38#define to_iop_adma_chan(chan) container_of(chan, struct iop_adma_chan, common) 38#define to_iop_adma_chan(chan) container_of(chan, struct iop_adma_chan, common)
39#define to_iop_adma_device(dev) \ 39#define to_iop_adma_device(dev) \
diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c
index bc81d6fcd2fd..2e6d5848d217 100644
--- a/drivers/firewire/fw-cdev.c
+++ b/drivers/firewire/fw-cdev.c
@@ -369,22 +369,33 @@ complete_transaction(struct fw_card *card, int rcode,
369 struct response *response = data; 369 struct response *response = data;
370 struct client *client = response->client; 370 struct client *client = response->client;
371 unsigned long flags; 371 unsigned long flags;
372 struct fw_cdev_event_response *r = &response->response;
372 373
373 if (length < response->response.length) 374 if (length < r->length)
374 response->response.length = length; 375 r->length = length;
375 if (rcode == RCODE_COMPLETE) 376 if (rcode == RCODE_COMPLETE)
376 memcpy(response->response.data, payload, 377 memcpy(r->data, payload, r->length);
377 response->response.length);
378 378
379 spin_lock_irqsave(&client->lock, flags); 379 spin_lock_irqsave(&client->lock, flags);
380 list_del(&response->resource.link); 380 list_del(&response->resource.link);
381 spin_unlock_irqrestore(&client->lock, flags); 381 spin_unlock_irqrestore(&client->lock, flags);
382 382
383 response->response.type = FW_CDEV_EVENT_RESPONSE; 383 r->type = FW_CDEV_EVENT_RESPONSE;
384 response->response.rcode = rcode; 384 r->rcode = rcode;
385 queue_event(client, &response->event, &response->response, 385
386 sizeof(response->response) + response->response.length, 386 /*
387 NULL, 0); 387 * In the case that sizeof(*r) doesn't align with the position of the
388 * data, and the read is short, preserve an extra copy of the data
389 * to stay compatible with a pre-2.6.27 bug. Since the bug is harmless
390 * for short reads and some apps depended on it, this is both safe
391 * and prudent for compatibility.
392 */
393 if (r->length <= sizeof(*r) - offsetof(typeof(*r), data))
394 queue_event(client, &response->event, r, sizeof(*r),
395 r->data, r->length);
396 else
397 queue_event(client, &response->event, r, sizeof(*r) + r->length,
398 NULL, 0);
388} 399}
389 400
390static int ioctl_send_request(struct client *client, void *buffer) 401static int ioctl_send_request(struct client *client, void *buffer)
diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c
index 11f17440fea6..d53fbbfefa3e 100644
--- a/drivers/firmware/iscsi_ibft_find.c
+++ b/drivers/firmware/iscsi_ibft_find.c
@@ -81,4 +81,3 @@ void __init reserve_ibft_region(void)
81 if (ibft_addr) 81 if (ibft_addr)
82 reserve_bootmem(pos, PAGE_ALIGN(len), BOOTMEM_DEFAULT); 82 reserve_bootmem(pos, PAGE_ALIGN(len), BOOTMEM_DEFAULT);
83} 83}
84EXPORT_SYMBOL_GPL(reserve_ibft_region);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 349ac3d3b848..637bd7faf132 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -38,7 +38,7 @@
38 38
39int radeon_no_wb; 39int radeon_no_wb;
40 40
41MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers\n"); 41MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
42module_param_named(no_wb, radeon_no_wb, int, 0444); 42module_param_named(no_wb, radeon_no_wb, int, 0444);
43 43
44static int dri_library_name(struct drm_device *dev, char *buf) 44static int dri_library_name(struct drm_device *dev, char *buf)
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 00ff53348491..bf4ebfb86fa5 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -57,6 +57,16 @@ config SENSORS_ABITUGURU3
57 This driver can also be built as a module. If so, the module 57 This driver can also be built as a module. If so, the module
58 will be called abituguru3. 58 will be called abituguru3.
59 59
60config SENSORS_AD7414
61 tristate "Analog Devices AD7414"
62 depends on I2C && EXPERIMENTAL
63 help
64 If you say yes here you get support for the Analog Devices
65 AD7414 temperature monitoring chip.
66
67 This driver can also be built as a module. If so, the module
68 will be called ad7414.
69
60config SENSORS_AD7418 70config SENSORS_AD7418
61 tristate "Analog Devices AD7416, AD7417 and AD7418" 71 tristate "Analog Devices AD7416, AD7417 and AD7418"
62 depends on I2C && EXPERIMENTAL 72 depends on I2C && EXPERIMENTAL
@@ -124,7 +134,7 @@ config SENSORS_ADM1031
124 134
125config SENSORS_ADM9240 135config SENSORS_ADM9240
126 tristate "Analog Devices ADM9240 and compatibles" 136 tristate "Analog Devices ADM9240 and compatibles"
127 depends on I2C && EXPERIMENTAL 137 depends on I2C
128 select HWMON_VID 138 select HWMON_VID
129 help 139 help
130 If you say yes here you get support for Analog Devices ADM9240, 140 If you say yes here you get support for Analog Devices ADM9240,
@@ -394,13 +404,24 @@ config SENSORS_LM75
394 tristate "National Semiconductor LM75 and compatibles" 404 tristate "National Semiconductor LM75 and compatibles"
395 depends on I2C 405 depends on I2C
396 help 406 help
397 If you say yes here you get support for National Semiconductor LM75 407 If you say yes here you get support for one common type of
398 sensor chips and clones: Dallas Semiconductor DS75 and DS1775 (in 408 temperature sensor chip, with models including:
399 9-bit precision mode), and TelCom (now Microchip) TCN75. 409
410 - Dallas Semiconductor DS75 and DS1775
411 - Maxim MAX6625 and MAX6626
412 - Microchip MCP980x
413 - National Semiconductor LM75
414 - NXP's LM75A
415 - ST Microelectronics STDS75
416 - TelCom (now Microchip) TCN75
417 - Texas Instruments TMP100, TMP101, TMP75, TMP175, TMP275
418
419 This driver supports driver model based binding through board
420 specific I2C device tables.
400 421
401 The DS75 and DS1775 in 10- to 12-bit precision modes will require 422 It also supports the "legacy" style of driver binding. To use
402 a force module parameter. The driver will not handle the extra 423 that with some chips which don't replicate LM75 quirks exactly,
403 precision anyhow. 424 you may need the "force" module parameter.
404 425
405 This driver can also be built as a module. If so, the module 426 This driver can also be built as a module. If so, the module
406 will be called lm75. 427 will be called lm75.
@@ -564,8 +585,8 @@ config SENSORS_DME1737
564 select HWMON_VID 585 select HWMON_VID
565 help 586 help
566 If you say yes here you get support for the hardware monitoring 587 If you say yes here you get support for the hardware monitoring
567 and fan control features of the SMSC DME1737 (and compatibles 588 and fan control features of the SMSC DME1737, SCH311x, SCH5027, and
568 like the Asus A8000) and SCH311x Super-I/O chips. 589 Asus A8000 Super-I/O chips.
569 590
570 This driver can also be built as a module. If so, the module 591 This driver can also be built as a module. If so, the module
571 will be called dme1737. 592 will be called dme1737.
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index d098677e08de..7943e5cefb06 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_SENSORS_W83791D) += w83791d.o
15 15
16obj-$(CONFIG_SENSORS_ABITUGURU) += abituguru.o 16obj-$(CONFIG_SENSORS_ABITUGURU) += abituguru.o
17obj-$(CONFIG_SENSORS_ABITUGURU3)+= abituguru3.o 17obj-$(CONFIG_SENSORS_ABITUGURU3)+= abituguru3.o
18obj-$(CONFIG_SENSORS_AD7414) += ad7414.o
18obj-$(CONFIG_SENSORS_AD7418) += ad7418.o 19obj-$(CONFIG_SENSORS_AD7418) += ad7418.o
19obj-$(CONFIG_SENSORS_ADM1021) += adm1021.o 20obj-$(CONFIG_SENSORS_ADM1021) += adm1021.o
20obj-$(CONFIG_SENSORS_ADM1025) += adm1025.o 21obj-$(CONFIG_SENSORS_ADM1025) += adm1025.o
diff --git a/drivers/hwmon/ad7414.c b/drivers/hwmon/ad7414.c
new file mode 100644
index 000000000000..ce8d94fbfd7e
--- /dev/null
+++ b/drivers/hwmon/ad7414.c
@@ -0,0 +1,268 @@
1/*
2 * An hwmon driver for the Analog Devices AD7414
3 *
4 * Copyright 2006 Stefan Roese <sr at denx.de>, DENX Software Engineering
5 *
6 * Copyright (c) 2008 PIKA Technologies
7 * Sean MacLennan <smaclennan@pikatech.com>
8 *
9 * Copyright (c) 2008 Spansion Inc.
10 * Frank Edelhaeuser <frank.edelhaeuser at spansion.com>
11 * (converted to "new style" I2C driver model, removed checkpatch.pl warnings)
12 *
13 * Based on ad7418.c
14 * Copyright 2006 Tower Technologies, Alessandro Zummo <a.zummo at towertech.it>
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 */
21
22#include <linux/module.h>
23#include <linux/jiffies.h>
24#include <linux/i2c.h>
25#include <linux/hwmon.h>
26#include <linux/hwmon-sysfs.h>
27#include <linux/err.h>
28#include <linux/mutex.h>
29#include <linux/sysfs.h>
30
31
32/* AD7414 registers */
33#define AD7414_REG_TEMP 0x00
34#define AD7414_REG_CONF 0x01
35#define AD7414_REG_T_HIGH 0x02
36#define AD7414_REG_T_LOW 0x03
37
38static u8 AD7414_REG_LIMIT[] = { AD7414_REG_T_HIGH, AD7414_REG_T_LOW };
39
40struct ad7414_data {
41 struct device *hwmon_dev;
42 struct mutex lock; /* atomic read data updates */
43 char valid; /* !=0 if following fields are valid */
44 unsigned long next_update; /* In jiffies */
45 s16 temp_input; /* Register values */
46 s8 temps[ARRAY_SIZE(AD7414_REG_LIMIT)];
47};
48
49/* REG: (0.25C/bit, two's complement) << 6 */
50static inline int ad7414_temp_from_reg(s16 reg)
51{
52 /* use integer division instead of equivalent right shift to
53 * guarantee arithmetic shift and preserve the sign
54 */
55 return ((int)reg / 64) * 250;
56}
57
58static inline int ad7414_read(struct i2c_client *client, u8 reg)
59{
60 if (reg == AD7414_REG_TEMP) {
61 int value = i2c_smbus_read_word_data(client, reg);
62 return (value < 0) ? value : swab16(value);
63 } else
64 return i2c_smbus_read_byte_data(client, reg);
65}
66
67static inline int ad7414_write(struct i2c_client *client, u8 reg, u8 value)
68{
69 return i2c_smbus_write_byte_data(client, reg, value);
70}
71
72struct ad7414_data *ad7414_update_device(struct device *dev)
73{
74 struct i2c_client *client = to_i2c_client(dev);
75 struct ad7414_data *data = i2c_get_clientdata(client);
76
77 mutex_lock(&data->lock);
78
79 if (time_after(jiffies, data->next_update) || !data->valid) {
80 int value, i;
81
82 dev_dbg(&client->dev, "starting ad7414 update\n");
83
84 value = ad7414_read(client, AD7414_REG_TEMP);
85 if (value < 0)
86 dev_dbg(&client->dev, "AD7414_REG_TEMP err %d\n",
87 value);
88 else
89 data->temp_input = value;
90
91 for (i = 0; i < ARRAY_SIZE(AD7414_REG_LIMIT); ++i) {
92 value = ad7414_read(client, AD7414_REG_LIMIT[i]);
93 if (value < 0)
94 dev_dbg(&client->dev, "AD7414 reg %d err %d\n",
95 AD7414_REG_LIMIT[i], value);
96 else
97 data->temps[i] = value;
98 }
99
100 data->next_update = jiffies + HZ + HZ / 2;
101 data->valid = 1;
102 }
103
104 mutex_unlock(&data->lock);
105
106 return data;
107}
108
109static ssize_t show_temp_input(struct device *dev,
110 struct device_attribute *attr, char *buf)
111{
112 struct ad7414_data *data = ad7414_update_device(dev);
113 return sprintf(buf, "%d\n", ad7414_temp_from_reg(data->temp_input));
114}
115static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp_input, NULL, 0);
116
117static ssize_t show_max_min(struct device *dev, struct device_attribute *attr,
118 char *buf)
119{
120 int index = to_sensor_dev_attr(attr)->index;
121 struct ad7414_data *data = ad7414_update_device(dev);
122 return sprintf(buf, "%d\n", data->temps[index] * 1000);
123}
124
125static ssize_t set_max_min(struct device *dev,
126 struct device_attribute *attr,
127 const char *buf, size_t count)
128{
129 struct i2c_client *client = to_i2c_client(dev);
130 struct ad7414_data *data = i2c_get_clientdata(client);
131 int index = to_sensor_dev_attr(attr)->index;
132 u8 reg = AD7414_REG_LIMIT[index];
133 long temp = simple_strtol(buf, NULL, 10);
134
135 temp = SENSORS_LIMIT(temp, -40000, 85000);
136 temp = (temp + (temp < 0 ? -500 : 500)) / 1000;
137
138 mutex_lock(&data->lock);
139 data->temps[index] = temp;
140 ad7414_write(client, reg, temp);
141 mutex_unlock(&data->lock);
142 return count;
143}
144
145static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO,
146 show_max_min, set_max_min, 0);
147static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO,
148 show_max_min, set_max_min, 1);
149
150static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
151 char *buf)
152{
153 int bitnr = to_sensor_dev_attr(attr)->index;
154 struct ad7414_data *data = ad7414_update_device(dev);
155 int value = (data->temp_input >> bitnr) & 1;
156 return sprintf(buf, "%d\n", value);
157}
158
159static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_alarm, NULL, 3);
160static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 4);
161
162static struct attribute *ad7414_attributes[] = {
163 &sensor_dev_attr_temp1_input.dev_attr.attr,
164 &sensor_dev_attr_temp1_max.dev_attr.attr,
165 &sensor_dev_attr_temp1_min.dev_attr.attr,
166 &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
167 &sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
168 NULL
169};
170
171static const struct attribute_group ad7414_group = {
172 .attrs = ad7414_attributes,
173};
174
175static int ad7414_probe(struct i2c_client *client,
176 const struct i2c_device_id *dev_id)
177{
178 struct ad7414_data *data;
179 int conf;
180 int err = 0;
181
182 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA |
183 I2C_FUNC_SMBUS_READ_WORD_DATA))
184 goto exit;
185
186 data = kzalloc(sizeof(struct ad7414_data), GFP_KERNEL);
187 if (!data) {
188 err = -ENOMEM;
189 goto exit;
190 }
191
192 i2c_set_clientdata(client, data);
193 mutex_init(&data->lock);
194
195 dev_info(&client->dev, "chip found\n");
196
197 /* Make sure the chip is powered up. */
198 conf = i2c_smbus_read_byte_data(client, AD7414_REG_CONF);
199 if (conf < 0)
200 dev_warn(&client->dev,
201 "ad7414_probe unable to read config register.\n");
202 else {
203 conf &= ~(1 << 7);
204 i2c_smbus_write_byte_data(client, AD7414_REG_CONF, conf);
205 }
206
207 /* Register sysfs hooks */
208 err = sysfs_create_group(&client->dev.kobj, &ad7414_group);
209 if (err)
210 goto exit_free;
211
212 data->hwmon_dev = hwmon_device_register(&client->dev);
213 if (IS_ERR(data->hwmon_dev)) {
214 err = PTR_ERR(data->hwmon_dev);
215 goto exit_remove;
216 }
217
218 return 0;
219
220exit_remove:
221 sysfs_remove_group(&client->dev.kobj, &ad7414_group);
222exit_free:
223 kfree(data);
224exit:
225 return err;
226}
227
228static int __devexit ad7414_remove(struct i2c_client *client)
229{
230 struct ad7414_data *data = i2c_get_clientdata(client);
231
232 hwmon_device_unregister(data->hwmon_dev);
233 sysfs_remove_group(&client->dev.kobj, &ad7414_group);
234 kfree(data);
235 return 0;
236}
237
238static const struct i2c_device_id ad7414_id[] = {
239 { "ad7414", 0 },
240 {}
241};
242
243static struct i2c_driver ad7414_driver = {
244 .driver = {
245 .name = "ad7414",
246 },
247 .probe = ad7414_probe,
248 .remove = __devexit_p(ad7414_remove),
249 .id_table = ad7414_id,
250};
251
252static int __init ad7414_init(void)
253{
254 return i2c_add_driver(&ad7414_driver);
255}
256module_init(ad7414_init);
257
258static void __exit ad7414_exit(void)
259{
260 i2c_del_driver(&ad7414_driver);
261}
262module_exit(ad7414_exit);
263
264MODULE_AUTHOR("Stefan Roese <sr at denx.de>, "
265 "Frank Edelhaeuser <frank.edelhaeuser at spansion.com>");
266
267MODULE_DESCRIPTION("AD7414 driver");
268MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/adt7473.c b/drivers/hwmon/adt7473.c
index ce4a7cb5a116..3a0b63136479 100644
--- a/drivers/hwmon/adt7473.c
+++ b/drivers/hwmon/adt7473.c
@@ -39,32 +39,20 @@ I2C_CLIENT_INSMOD_1(adt7473);
39#define ADT7473_REG_BASE_ADDR 0x20 39#define ADT7473_REG_BASE_ADDR 0x20
40 40
41#define ADT7473_REG_VOLT_BASE_ADDR 0x21 41#define ADT7473_REG_VOLT_BASE_ADDR 0x21
42#define ADT7473_REG_VOLT_MAX_ADDR 0x22
43#define ADT7473_REG_VOLT_MIN_BASE_ADDR 0x46 42#define ADT7473_REG_VOLT_MIN_BASE_ADDR 0x46
44#define ADT7473_REG_VOLT_MIN_MAX_ADDR 0x49
45 43
46#define ADT7473_REG_TEMP_BASE_ADDR 0x25 44#define ADT7473_REG_TEMP_BASE_ADDR 0x25
47#define ADT7473_REG_TEMP_MAX_ADDR 0x27
48#define ADT7473_REG_TEMP_LIMITS_BASE_ADDR 0x4E 45#define ADT7473_REG_TEMP_LIMITS_BASE_ADDR 0x4E
49#define ADT7473_REG_TEMP_LIMITS_MAX_ADDR 0x53
50#define ADT7473_REG_TEMP_TMIN_BASE_ADDR 0x67 46#define ADT7473_REG_TEMP_TMIN_BASE_ADDR 0x67
51#define ADT7473_REG_TEMP_TMIN_MAX_ADDR 0x69
52#define ADT7473_REG_TEMP_TMAX_BASE_ADDR 0x6A 47#define ADT7473_REG_TEMP_TMAX_BASE_ADDR 0x6A
53#define ADT7473_REG_TEMP_TMAX_MAX_ADDR 0x6C
54 48
55#define ADT7473_REG_FAN_BASE_ADDR 0x28 49#define ADT7473_REG_FAN_BASE_ADDR 0x28
56#define ADT7473_REG_FAN_MAX_ADDR 0x2F
57#define ADT7473_REG_FAN_MIN_BASE_ADDR 0x54 50#define ADT7473_REG_FAN_MIN_BASE_ADDR 0x54
58#define ADT7473_REG_FAN_MIN_MAX_ADDR 0x5B
59 51
60#define ADT7473_REG_PWM_BASE_ADDR 0x30 52#define ADT7473_REG_PWM_BASE_ADDR 0x30
61#define ADT7473_REG_PWM_MAX_ADDR 0x32
62#define ADT7473_REG_PWM_MIN_BASE_ADDR 0x64 53#define ADT7473_REG_PWM_MIN_BASE_ADDR 0x64
63#define ADT7473_REG_PWM_MIN_MAX_ADDR 0x66
64#define ADT7473_REG_PWM_MAX_BASE_ADDR 0x38 54#define ADT7473_REG_PWM_MAX_BASE_ADDR 0x38
65#define ADT7473_REG_PWM_MAX_MAX_ADDR 0x3A
66#define ADT7473_REG_PWM_BHVR_BASE_ADDR 0x5C 55#define ADT7473_REG_PWM_BHVR_BASE_ADDR 0x5C
67#define ADT7473_REG_PWM_BHVR_MAX_ADDR 0x5E
68#define ADT7473_PWM_BHVR_MASK 0xE0 56#define ADT7473_PWM_BHVR_MASK 0xE0
69#define ADT7473_PWM_BHVR_SHIFT 5 57#define ADT7473_PWM_BHVR_SHIFT 5
70 58
@@ -102,7 +90,6 @@ I2C_CLIENT_INSMOD_1(adt7473);
102#define ADT7473_FAN4_ALARM 0x20 90#define ADT7473_FAN4_ALARM 0x20
103#define ADT7473_R1T_SHORT 0x40 91#define ADT7473_R1T_SHORT 0x40
104#define ADT7473_R2T_SHORT 0x80 92#define ADT7473_R2T_SHORT 0x80
105#define ADT7473_REG_MAX_ADDR 0x80
106 93
107#define ALARM2(x) ((x) << 8) 94#define ALARM2(x) ((x) << 8)
108 95
@@ -583,10 +570,9 @@ static ssize_t set_max_duty_at_crit(struct device *dev,
583 struct i2c_client *client = to_i2c_client(dev); 570 struct i2c_client *client = to_i2c_client(dev);
584 struct adt7473_data *data = i2c_get_clientdata(client); 571 struct adt7473_data *data = i2c_get_clientdata(client);
585 int temp = simple_strtol(buf, NULL, 10); 572 int temp = simple_strtol(buf, NULL, 10);
586 temp = temp && 0xFF;
587 573
588 mutex_lock(&data->lock); 574 mutex_lock(&data->lock);
589 data->max_duty_at_overheat = temp; 575 data->max_duty_at_overheat = !!temp;
590 reg = i2c_smbus_read_byte_data(client, ADT7473_REG_CFG4); 576 reg = i2c_smbus_read_byte_data(client, ADT7473_REG_CFG4);
591 if (temp) 577 if (temp)
592 reg |= ADT7473_CFG4_MAX_DUTY_AT_OVT; 578 reg |= ADT7473_CFG4_MAX_DUTY_AT_OVT;
diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c
index 7673f65877e1..cdb8311e4ef7 100644
--- a/drivers/hwmon/dme1737.c
+++ b/drivers/hwmon/dme1737.c
@@ -1,11 +1,11 @@
1/* 1/*
2 * dme1737.c - Driver for the SMSC DME1737, Asus A8000, and SMSC SCH311x 2 * dme1737.c - Driver for the SMSC DME1737, Asus A8000, SMSC SCH311x and
3 * Super-I/O chips integrated hardware monitoring features. 3 * SCH5027 Super-I/O chips integrated hardware monitoring features.
4 * Copyright (c) 2007 Juerg Haefliger <juergh@gmail.com> 4 * Copyright (c) 2007, 2008 Juerg Haefliger <juergh@gmail.com>
5 * 5 *
6 * This driver is an I2C/ISA hybrid, meaning that it uses the I2C bus to access 6 * This driver is an I2C/ISA hybrid, meaning that it uses the I2C bus to access
7 * the chip registers if a DME1737 (or A8000) is found and the ISA bus if a 7 * the chip registers if a DME1737, A8000, or SCH5027 is found and the ISA bus
8 * SCH311x chip is found. Both types of chips have very similar hardware 8 * if a SCH311x chip is found. Both types of chips have very similar hardware
9 * monitoring capabilities but differ in the way they can be accessed. 9 * monitoring capabilities but differ in the way they can be accessed.
10 * 10 *
11 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
@@ -48,11 +48,19 @@ static unsigned short force_id;
48module_param(force_id, ushort, 0); 48module_param(force_id, ushort, 0);
49MODULE_PARM_DESC(force_id, "Override the detected device ID"); 49MODULE_PARM_DESC(force_id, "Override the detected device ID");
50 50
51static int probe_all_addr;
52module_param(probe_all_addr, bool, 0);
53MODULE_PARM_DESC(probe_all_addr, "Include probing of non-standard LPC "
54 "addresses");
55
51/* Addresses to scan */ 56/* Addresses to scan */
52static const unsigned short normal_i2c[] = {0x2c, 0x2d, 0x2e, I2C_CLIENT_END}; 57static const unsigned short normal_i2c[] = {0x2c, 0x2d, 0x2e, I2C_CLIENT_END};
53 58
54/* Insmod parameters */ 59/* Insmod parameters */
55I2C_CLIENT_INSMOD_1(dme1737); 60I2C_CLIENT_INSMOD_2(dme1737, sch5027);
61
62/* ISA chip types */
63enum isa_chips { sch311x = sch5027 + 1 };
56 64
57/* --------------------------------------------------------------------- 65/* ---------------------------------------------------------------------
58 * Registers 66 * Registers
@@ -158,6 +166,7 @@ static const u8 DME1737_BIT_ALARM_FAN[] = {10, 11, 12, 13, 22, 23};
158#define DME1737_VERSTEP 0x88 166#define DME1737_VERSTEP 0x88
159#define DME1737_VERSTEP_MASK 0xf8 167#define DME1737_VERSTEP_MASK 0xf8
160#define SCH311X_DEVICE 0x8c 168#define SCH311X_DEVICE 0x8c
169#define SCH5027_VERSTEP 0x69
161 170
162/* Length of ISA address segment */ 171/* Length of ISA address segment */
163#define DME1737_EXTENT 2 172#define DME1737_EXTENT 2
@@ -176,6 +185,8 @@ struct dme1737_data {
176 int valid; /* !=0 if following fields are valid */ 185 int valid; /* !=0 if following fields are valid */
177 unsigned long last_update; /* in jiffies */ 186 unsigned long last_update; /* in jiffies */
178 unsigned long last_vbat; /* in jiffies */ 187 unsigned long last_vbat; /* in jiffies */
188 enum chips type;
189 const int *in_nominal; /* pointer to IN_NOMINAL array */
179 190
180 u8 vid; 191 u8 vid;
181 u8 pwm_rr_en; 192 u8 pwm_rr_en;
@@ -210,20 +221,27 @@ struct dme1737_data {
210}; 221};
211 222
212/* Nominal voltage values */ 223/* Nominal voltage values */
213static const int IN_NOMINAL[] = {5000, 2250, 3300, 5000, 12000, 3300, 3300}; 224static const int IN_NOMINAL_DME1737[] = {5000, 2250, 3300, 5000, 12000, 3300,
225 3300};
226static const int IN_NOMINAL_SCH311x[] = {2500, 1500, 3300, 5000, 12000, 3300,
227 3300};
228static const int IN_NOMINAL_SCH5027[] = {5000, 2250, 3300, 1125, 1125, 3300,
229 3300};
230#define IN_NOMINAL(type) ((type) == sch311x ? IN_NOMINAL_SCH311x : \
231 (type) == sch5027 ? IN_NOMINAL_SCH5027 : \
232 IN_NOMINAL_DME1737)
214 233
215/* Voltage input 234/* Voltage input
216 * Voltage inputs have 16 bits resolution, limit values have 8 bits 235 * Voltage inputs have 16 bits resolution, limit values have 8 bits
217 * resolution. */ 236 * resolution. */
218static inline int IN_FROM_REG(int reg, int ix, int res) 237static inline int IN_FROM_REG(int reg, int nominal, int res)
219{ 238{
220 return (reg * IN_NOMINAL[ix] + (3 << (res - 3))) / (3 << (res - 2)); 239 return (reg * nominal + (3 << (res - 3))) / (3 << (res - 2));
221} 240}
222 241
223static inline int IN_TO_REG(int val, int ix) 242static inline int IN_TO_REG(int val, int nominal)
224{ 243{
225 return SENSORS_LIMIT((val * 192 + IN_NOMINAL[ix] / 2) / 244 return SENSORS_LIMIT((val * 192 + nominal / 2) / nominal, 0, 255);
226 IN_NOMINAL[ix], 0, 255);
227} 245}
228 246
229/* Temperature input 247/* Temperature input
@@ -552,7 +570,10 @@ static struct dme1737_data *dme1737_update_device(struct device *dev)
552 570
553 /* Sample register contents every 1 sec */ 571 /* Sample register contents every 1 sec */
554 if (time_after(jiffies, data->last_update + HZ) || !data->valid) { 572 if (time_after(jiffies, data->last_update + HZ) || !data->valid) {
555 data->vid = dme1737_read(client, DME1737_REG_VID) & 0x3f; 573 if (data->type != sch5027) {
574 data->vid = dme1737_read(client, DME1737_REG_VID) &
575 0x3f;
576 }
556 577
557 /* In (voltage) registers */ 578 /* In (voltage) registers */
558 for (ix = 0; ix < ARRAY_SIZE(data->in); ix++) { 579 for (ix = 0; ix < ARRAY_SIZE(data->in); ix++) {
@@ -580,8 +601,10 @@ static struct dme1737_data *dme1737_update_device(struct device *dev)
580 DME1737_REG_TEMP_MIN(ix)); 601 DME1737_REG_TEMP_MIN(ix));
581 data->temp_max[ix] = dme1737_read(client, 602 data->temp_max[ix] = dme1737_read(client,
582 DME1737_REG_TEMP_MAX(ix)); 603 DME1737_REG_TEMP_MAX(ix));
583 data->temp_offset[ix] = dme1737_read(client, 604 if (data->type != sch5027) {
584 DME1737_REG_TEMP_OFFSET(ix)); 605 data->temp_offset[ix] = dme1737_read(client,
606 DME1737_REG_TEMP_OFFSET(ix));
607 }
585 } 608 }
586 609
587 /* In and temp LSB registers 610 /* In and temp LSB registers
@@ -656,9 +679,11 @@ static struct dme1737_data *dme1737_update_device(struct device *dev)
656 data->zone_abs[ix] = dme1737_read(client, 679 data->zone_abs[ix] = dme1737_read(client,
657 DME1737_REG_ZONE_ABS(ix)); 680 DME1737_REG_ZONE_ABS(ix));
658 } 681 }
659 for (ix = 0; ix < ARRAY_SIZE(data->zone_hyst); ix++) { 682 if (data->type != sch5027) {
660 data->zone_hyst[ix] = dme1737_read(client, 683 for (ix = 0; ix < ARRAY_SIZE(data->zone_hyst); ix++) {
684 data->zone_hyst[ix] = dme1737_read(client,
661 DME1737_REG_ZONE_HYST(ix)); 685 DME1737_REG_ZONE_HYST(ix));
686 }
662 } 687 }
663 688
664 /* Alarm registers */ 689 /* Alarm registers */
@@ -722,13 +747,13 @@ static ssize_t show_in(struct device *dev, struct device_attribute *attr,
722 747
723 switch (fn) { 748 switch (fn) {
724 case SYS_IN_INPUT: 749 case SYS_IN_INPUT:
725 res = IN_FROM_REG(data->in[ix], ix, 16); 750 res = IN_FROM_REG(data->in[ix], data->in_nominal[ix], 16);
726 break; 751 break;
727 case SYS_IN_MIN: 752 case SYS_IN_MIN:
728 res = IN_FROM_REG(data->in_min[ix], ix, 8); 753 res = IN_FROM_REG(data->in_min[ix], data->in_nominal[ix], 8);
729 break; 754 break;
730 case SYS_IN_MAX: 755 case SYS_IN_MAX:
731 res = IN_FROM_REG(data->in_max[ix], ix, 8); 756 res = IN_FROM_REG(data->in_max[ix], data->in_nominal[ix], 8);
732 break; 757 break;
733 case SYS_IN_ALARM: 758 case SYS_IN_ALARM:
734 res = (data->alarms >> DME1737_BIT_ALARM_IN[ix]) & 0x01; 759 res = (data->alarms >> DME1737_BIT_ALARM_IN[ix]) & 0x01;
@@ -755,12 +780,12 @@ static ssize_t set_in(struct device *dev, struct device_attribute *attr,
755 mutex_lock(&data->update_lock); 780 mutex_lock(&data->update_lock);
756 switch (fn) { 781 switch (fn) {
757 case SYS_IN_MIN: 782 case SYS_IN_MIN:
758 data->in_min[ix] = IN_TO_REG(val, ix); 783 data->in_min[ix] = IN_TO_REG(val, data->in_nominal[ix]);
759 dme1737_write(client, DME1737_REG_IN_MIN(ix), 784 dme1737_write(client, DME1737_REG_IN_MIN(ix),
760 data->in_min[ix]); 785 data->in_min[ix]);
761 break; 786 break;
762 case SYS_IN_MAX: 787 case SYS_IN_MAX:
763 data->in_max[ix] = IN_TO_REG(val, ix); 788 data->in_max[ix] = IN_TO_REG(val, data->in_nominal[ix]);
764 dme1737_write(client, DME1737_REG_IN_MAX(ix), 789 dme1737_write(client, DME1737_REG_IN_MAX(ix),
765 data->in_max[ix]); 790 data->in_max[ix]);
766 break; 791 break;
@@ -1153,7 +1178,7 @@ static ssize_t show_pwm(struct device *dev, struct device_attribute *attr,
1153 return sprintf(buf, "%d\n", res); 1178 return sprintf(buf, "%d\n", res);
1154} 1179}
1155 1180
1156static struct attribute *dme1737_attr_pwm[]; 1181static struct attribute *dme1737_pwm_chmod_attr[];
1157static void dme1737_chmod_file(struct device*, struct attribute*, mode_t); 1182static void dme1737_chmod_file(struct device*, struct attribute*, mode_t);
1158 1183
1159static ssize_t set_pwm(struct device *dev, struct device_attribute *attr, 1184static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
@@ -1217,7 +1242,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
1217 switch (val) { 1242 switch (val) {
1218 case 0: 1243 case 0:
1219 /* Change permissions of pwm[ix] to read-only */ 1244 /* Change permissions of pwm[ix] to read-only */
1220 dme1737_chmod_file(dev, dme1737_attr_pwm[ix], 1245 dme1737_chmod_file(dev, dme1737_pwm_chmod_attr[ix],
1221 S_IRUGO); 1246 S_IRUGO);
1222 /* Turn fan fully on */ 1247 /* Turn fan fully on */
1223 data->pwm_config[ix] = PWM_EN_TO_REG(0, 1248 data->pwm_config[ix] = PWM_EN_TO_REG(0,
@@ -1232,12 +1257,12 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
1232 dme1737_write(client, DME1737_REG_PWM_CONFIG(ix), 1257 dme1737_write(client, DME1737_REG_PWM_CONFIG(ix),
1233 data->pwm_config[ix]); 1258 data->pwm_config[ix]);
1234 /* Change permissions of pwm[ix] to read-writeable */ 1259 /* Change permissions of pwm[ix] to read-writeable */
1235 dme1737_chmod_file(dev, dme1737_attr_pwm[ix], 1260 dme1737_chmod_file(dev, dme1737_pwm_chmod_attr[ix],
1236 S_IRUGO | S_IWUSR); 1261 S_IRUGO | S_IWUSR);
1237 break; 1262 break;
1238 case 2: 1263 case 2:
1239 /* Change permissions of pwm[ix] to read-only */ 1264 /* Change permissions of pwm[ix] to read-only */
1240 dme1737_chmod_file(dev, dme1737_attr_pwm[ix], 1265 dme1737_chmod_file(dev, dme1737_pwm_chmod_attr[ix],
1241 S_IRUGO); 1266 S_IRUGO);
1242 /* Turn on auto mode using the saved zone channel 1267 /* Turn on auto mode using the saved zone channel
1243 * assignment */ 1268 * assignment */
@@ -1501,9 +1526,9 @@ SENSOR_DEVICE_ATTR_PWM_1TO3(3);
1501/* PWMs 5-6 */ 1526/* PWMs 5-6 */
1502 1527
1503#define SENSOR_DEVICE_ATTR_PWM_5TO6(ix) \ 1528#define SENSOR_DEVICE_ATTR_PWM_5TO6(ix) \
1504static SENSOR_DEVICE_ATTR_2(pwm##ix, S_IRUGO | S_IWUSR, \ 1529static SENSOR_DEVICE_ATTR_2(pwm##ix, S_IRUGO, \
1505 show_pwm, set_pwm, SYS_PWM, ix-1); \ 1530 show_pwm, set_pwm, SYS_PWM, ix-1); \
1506static SENSOR_DEVICE_ATTR_2(pwm##ix##_freq, S_IRUGO | S_IWUSR, \ 1531static SENSOR_DEVICE_ATTR_2(pwm##ix##_freq, S_IRUGO, \
1507 show_pwm, set_pwm, SYS_PWM_FREQ, ix-1); \ 1532 show_pwm, set_pwm, SYS_PWM_FREQ, ix-1); \
1508static SENSOR_DEVICE_ATTR_2(pwm##ix##_enable, S_IRUGO, \ 1533static SENSOR_DEVICE_ATTR_2(pwm##ix##_enable, S_IRUGO, \
1509 show_pwm, NULL, SYS_PWM_ENABLE, ix-1) 1534 show_pwm, NULL, SYS_PWM_ENABLE, ix-1)
@@ -1517,225 +1542,286 @@ static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm, set_vrm);
1517static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid, NULL); 1542static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid, NULL);
1518static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); /* for ISA devices */ 1543static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); /* for ISA devices */
1519 1544
1520#define SENSOR_DEV_ATTR_IN(ix) \
1521&sensor_dev_attr_in##ix##_input.dev_attr.attr, \
1522&sensor_dev_attr_in##ix##_min.dev_attr.attr, \
1523&sensor_dev_attr_in##ix##_max.dev_attr.attr, \
1524&sensor_dev_attr_in##ix##_alarm.dev_attr.attr
1525
1526/* These attributes are read-writeable only if the chip is *not* locked */
1527#define SENSOR_DEV_ATTR_TEMP_LOCK(ix) \
1528&sensor_dev_attr_temp##ix##_offset.dev_attr.attr
1529
1530#define SENSOR_DEV_ATTR_TEMP(ix) \
1531SENSOR_DEV_ATTR_TEMP_LOCK(ix), \
1532&sensor_dev_attr_temp##ix##_input.dev_attr.attr, \
1533&sensor_dev_attr_temp##ix##_min.dev_attr.attr, \
1534&sensor_dev_attr_temp##ix##_max.dev_attr.attr, \
1535&sensor_dev_attr_temp##ix##_alarm.dev_attr.attr, \
1536&sensor_dev_attr_temp##ix##_fault.dev_attr.attr
1537
1538/* These attributes are read-writeable only if the chip is *not* locked */
1539#define SENSOR_DEV_ATTR_ZONE_LOCK(ix) \
1540&sensor_dev_attr_zone##ix##_auto_point1_temp_hyst.dev_attr.attr, \
1541&sensor_dev_attr_zone##ix##_auto_point1_temp.dev_attr.attr, \
1542&sensor_dev_attr_zone##ix##_auto_point2_temp.dev_attr.attr, \
1543&sensor_dev_attr_zone##ix##_auto_point3_temp.dev_attr.attr
1544
1545#define SENSOR_DEV_ATTR_ZONE(ix) \
1546SENSOR_DEV_ATTR_ZONE_LOCK(ix), \
1547&sensor_dev_attr_zone##ix##_auto_channels_temp.dev_attr.attr
1548
1549#define SENSOR_DEV_ATTR_FAN_1TO4(ix) \
1550&sensor_dev_attr_fan##ix##_input.dev_attr.attr, \
1551&sensor_dev_attr_fan##ix##_min.dev_attr.attr, \
1552&sensor_dev_attr_fan##ix##_alarm.dev_attr.attr, \
1553&sensor_dev_attr_fan##ix##_type.dev_attr.attr
1554
1555#define SENSOR_DEV_ATTR_FAN_5TO6(ix) \
1556&sensor_dev_attr_fan##ix##_input.dev_attr.attr, \
1557&sensor_dev_attr_fan##ix##_min.dev_attr.attr, \
1558&sensor_dev_attr_fan##ix##_alarm.dev_attr.attr, \
1559&sensor_dev_attr_fan##ix##_max.dev_attr.attr
1560
1561/* These attributes are read-writeable only if the chip is *not* locked */
1562#define SENSOR_DEV_ATTR_PWM_1TO3_LOCK(ix) \
1563&sensor_dev_attr_pwm##ix##_freq.dev_attr.attr, \
1564&sensor_dev_attr_pwm##ix##_enable.dev_attr.attr, \
1565&sensor_dev_attr_pwm##ix##_ramp_rate.dev_attr.attr, \
1566&sensor_dev_attr_pwm##ix##_auto_channels_zone.dev_attr.attr, \
1567&sensor_dev_attr_pwm##ix##_auto_pwm_min.dev_attr.attr, \
1568&sensor_dev_attr_pwm##ix##_auto_point1_pwm.dev_attr.attr
1569
1570#define SENSOR_DEV_ATTR_PWM_1TO3(ix) \
1571SENSOR_DEV_ATTR_PWM_1TO3_LOCK(ix), \
1572&sensor_dev_attr_pwm##ix.dev_attr.attr, \
1573&sensor_dev_attr_pwm##ix##_auto_point2_pwm.dev_attr.attr
1574
1575/* These attributes are read-writeable only if the chip is *not* locked */
1576#define SENSOR_DEV_ATTR_PWM_5TO6_LOCK(ix) \
1577&sensor_dev_attr_pwm##ix.dev_attr.attr, \
1578&sensor_dev_attr_pwm##ix##_freq.dev_attr.attr
1579
1580#define SENSOR_DEV_ATTR_PWM_5TO6(ix) \
1581SENSOR_DEV_ATTR_PWM_5TO6_LOCK(ix), \
1582&sensor_dev_attr_pwm##ix##_enable.dev_attr.attr
1583
1584/* This struct holds all the attributes that are always present and need to be 1545/* This struct holds all the attributes that are always present and need to be
1585 * created unconditionally. The attributes that need modification of their 1546 * created unconditionally. The attributes that need modification of their
1586 * permissions are created read-only and write permissions are added or removed 1547 * permissions are created read-only and write permissions are added or removed
1587 * on the fly when required */ 1548 * on the fly when required */
1588static struct attribute *dme1737_attr[] ={ 1549static struct attribute *dme1737_attr[] ={
1589 /* Voltages */ 1550 /* Voltages */
1590 SENSOR_DEV_ATTR_IN(0), 1551 &sensor_dev_attr_in0_input.dev_attr.attr,
1591 SENSOR_DEV_ATTR_IN(1), 1552 &sensor_dev_attr_in0_min.dev_attr.attr,
1592 SENSOR_DEV_ATTR_IN(2), 1553 &sensor_dev_attr_in0_max.dev_attr.attr,
1593 SENSOR_DEV_ATTR_IN(3), 1554 &sensor_dev_attr_in0_alarm.dev_attr.attr,
1594 SENSOR_DEV_ATTR_IN(4), 1555 &sensor_dev_attr_in1_input.dev_attr.attr,
1595 SENSOR_DEV_ATTR_IN(5), 1556 &sensor_dev_attr_in1_min.dev_attr.attr,
1596 SENSOR_DEV_ATTR_IN(6), 1557 &sensor_dev_attr_in1_max.dev_attr.attr,
1558 &sensor_dev_attr_in1_alarm.dev_attr.attr,
1559 &sensor_dev_attr_in2_input.dev_attr.attr,
1560 &sensor_dev_attr_in2_min.dev_attr.attr,
1561 &sensor_dev_attr_in2_max.dev_attr.attr,
1562 &sensor_dev_attr_in2_alarm.dev_attr.attr,
1563 &sensor_dev_attr_in3_input.dev_attr.attr,
1564 &sensor_dev_attr_in3_min.dev_attr.attr,
1565 &sensor_dev_attr_in3_max.dev_attr.attr,
1566 &sensor_dev_attr_in3_alarm.dev_attr.attr,
1567 &sensor_dev_attr_in4_input.dev_attr.attr,
1568 &sensor_dev_attr_in4_min.dev_attr.attr,
1569 &sensor_dev_attr_in4_max.dev_attr.attr,
1570 &sensor_dev_attr_in4_alarm.dev_attr.attr,
1571 &sensor_dev_attr_in5_input.dev_attr.attr,
1572 &sensor_dev_attr_in5_min.dev_attr.attr,
1573 &sensor_dev_attr_in5_max.dev_attr.attr,
1574 &sensor_dev_attr_in5_alarm.dev_attr.attr,
1575 &sensor_dev_attr_in6_input.dev_attr.attr,
1576 &sensor_dev_attr_in6_min.dev_attr.attr,
1577 &sensor_dev_attr_in6_max.dev_attr.attr,
1578 &sensor_dev_attr_in6_alarm.dev_attr.attr,
1579 /* Temperatures */
1580 &sensor_dev_attr_temp1_input.dev_attr.attr,
1581 &sensor_dev_attr_temp1_min.dev_attr.attr,
1582 &sensor_dev_attr_temp1_max.dev_attr.attr,
1583 &sensor_dev_attr_temp1_alarm.dev_attr.attr,
1584 &sensor_dev_attr_temp1_fault.dev_attr.attr,
1585 &sensor_dev_attr_temp2_input.dev_attr.attr,
1586 &sensor_dev_attr_temp2_min.dev_attr.attr,
1587 &sensor_dev_attr_temp2_max.dev_attr.attr,
1588 &sensor_dev_attr_temp2_alarm.dev_attr.attr,
1589 &sensor_dev_attr_temp2_fault.dev_attr.attr,
1590 &sensor_dev_attr_temp3_input.dev_attr.attr,
1591 &sensor_dev_attr_temp3_min.dev_attr.attr,
1592 &sensor_dev_attr_temp3_max.dev_attr.attr,
1593 &sensor_dev_attr_temp3_alarm.dev_attr.attr,
1594 &sensor_dev_attr_temp3_fault.dev_attr.attr,
1595 /* Zones */
1596 &sensor_dev_attr_zone1_auto_point1_temp.dev_attr.attr,
1597 &sensor_dev_attr_zone1_auto_point2_temp.dev_attr.attr,
1598 &sensor_dev_attr_zone1_auto_point3_temp.dev_attr.attr,
1599 &sensor_dev_attr_zone1_auto_channels_temp.dev_attr.attr,
1600 &sensor_dev_attr_zone2_auto_point1_temp.dev_attr.attr,
1601 &sensor_dev_attr_zone2_auto_point2_temp.dev_attr.attr,
1602 &sensor_dev_attr_zone2_auto_point3_temp.dev_attr.attr,
1603 &sensor_dev_attr_zone2_auto_channels_temp.dev_attr.attr,
1604 &sensor_dev_attr_zone3_auto_point1_temp.dev_attr.attr,
1605 &sensor_dev_attr_zone3_auto_point2_temp.dev_attr.attr,
1606 &sensor_dev_attr_zone3_auto_point3_temp.dev_attr.attr,
1607 &sensor_dev_attr_zone3_auto_channels_temp.dev_attr.attr,
1608 NULL
1609};
1610
1611static const struct attribute_group dme1737_group = {
1612 .attrs = dme1737_attr,
1613};
1614
1615/* The following struct holds misc attributes, which are not available in all
1616 * chips. Their creation depends on the chip type which is determined during
1617 * module load. */
1618static struct attribute *dme1737_misc_attr[] = {
1597 /* Temperatures */ 1619 /* Temperatures */
1598 SENSOR_DEV_ATTR_TEMP(1), 1620 &sensor_dev_attr_temp1_offset.dev_attr.attr,
1599 SENSOR_DEV_ATTR_TEMP(2), 1621 &sensor_dev_attr_temp2_offset.dev_attr.attr,
1600 SENSOR_DEV_ATTR_TEMP(3), 1622 &sensor_dev_attr_temp3_offset.dev_attr.attr,
1601 /* Zones */ 1623 /* Zones */
1602 SENSOR_DEV_ATTR_ZONE(1), 1624 &sensor_dev_attr_zone1_auto_point1_temp_hyst.dev_attr.attr,
1603 SENSOR_DEV_ATTR_ZONE(2), 1625 &sensor_dev_attr_zone2_auto_point1_temp_hyst.dev_attr.attr,
1604 SENSOR_DEV_ATTR_ZONE(3), 1626 &sensor_dev_attr_zone3_auto_point1_temp_hyst.dev_attr.attr,
1605 /* Misc */ 1627 /* Misc */
1606 &dev_attr_vrm.attr, 1628 &dev_attr_vrm.attr,
1607 &dev_attr_cpu0_vid.attr, 1629 &dev_attr_cpu0_vid.attr,
1608 NULL 1630 NULL
1609}; 1631};
1610 1632
1611static const struct attribute_group dme1737_group = { 1633static const struct attribute_group dme1737_misc_group = {
1612 .attrs = dme1737_attr, 1634 .attrs = dme1737_misc_attr,
1613}; 1635};
1614 1636
1615/* The following structs hold the PWM attributes, some of which are optional. 1637/* The following structs hold the PWM attributes, some of which are optional.
1616 * Their creation depends on the chip configuration which is determined during 1638 * Their creation depends on the chip configuration which is determined during
1617 * module load. */ 1639 * module load. */
1618static struct attribute *dme1737_attr_pwm1[] = { 1640static struct attribute *dme1737_pwm1_attr[] = {
1619 SENSOR_DEV_ATTR_PWM_1TO3(1), 1641 &sensor_dev_attr_pwm1.dev_attr.attr,
1642 &sensor_dev_attr_pwm1_freq.dev_attr.attr,
1643 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
1644 &sensor_dev_attr_pwm1_ramp_rate.dev_attr.attr,
1645 &sensor_dev_attr_pwm1_auto_channels_zone.dev_attr.attr,
1646 &sensor_dev_attr_pwm1_auto_point1_pwm.dev_attr.attr,
1647 &sensor_dev_attr_pwm1_auto_point2_pwm.dev_attr.attr,
1620 NULL 1648 NULL
1621}; 1649};
1622static struct attribute *dme1737_attr_pwm2[] = { 1650static struct attribute *dme1737_pwm2_attr[] = {
1623 SENSOR_DEV_ATTR_PWM_1TO3(2), 1651 &sensor_dev_attr_pwm2.dev_attr.attr,
1652 &sensor_dev_attr_pwm2_freq.dev_attr.attr,
1653 &sensor_dev_attr_pwm2_enable.dev_attr.attr,
1654 &sensor_dev_attr_pwm2_ramp_rate.dev_attr.attr,
1655 &sensor_dev_attr_pwm2_auto_channels_zone.dev_attr.attr,
1656 &sensor_dev_attr_pwm2_auto_point1_pwm.dev_attr.attr,
1657 &sensor_dev_attr_pwm2_auto_point2_pwm.dev_attr.attr,
1624 NULL 1658 NULL
1625}; 1659};
1626static struct attribute *dme1737_attr_pwm3[] = { 1660static struct attribute *dme1737_pwm3_attr[] = {
1627 SENSOR_DEV_ATTR_PWM_1TO3(3), 1661 &sensor_dev_attr_pwm3.dev_attr.attr,
1662 &sensor_dev_attr_pwm3_freq.dev_attr.attr,
1663 &sensor_dev_attr_pwm3_enable.dev_attr.attr,
1664 &sensor_dev_attr_pwm3_ramp_rate.dev_attr.attr,
1665 &sensor_dev_attr_pwm3_auto_channels_zone.dev_attr.attr,
1666 &sensor_dev_attr_pwm3_auto_point1_pwm.dev_attr.attr,
1667 &sensor_dev_attr_pwm3_auto_point2_pwm.dev_attr.attr,
1628 NULL 1668 NULL
1629}; 1669};
1630static struct attribute *dme1737_attr_pwm5[] = { 1670static struct attribute *dme1737_pwm5_attr[] = {
1631 SENSOR_DEV_ATTR_PWM_5TO6(5), 1671 &sensor_dev_attr_pwm5.dev_attr.attr,
1672 &sensor_dev_attr_pwm5_freq.dev_attr.attr,
1673 &sensor_dev_attr_pwm5_enable.dev_attr.attr,
1632 NULL 1674 NULL
1633}; 1675};
1634static struct attribute *dme1737_attr_pwm6[] = { 1676static struct attribute *dme1737_pwm6_attr[] = {
1635 SENSOR_DEV_ATTR_PWM_5TO6(6), 1677 &sensor_dev_attr_pwm6.dev_attr.attr,
1678 &sensor_dev_attr_pwm6_freq.dev_attr.attr,
1679 &sensor_dev_attr_pwm6_enable.dev_attr.attr,
1636 NULL 1680 NULL
1637}; 1681};
1638 1682
1639static const struct attribute_group dme1737_pwm_group[] = { 1683static const struct attribute_group dme1737_pwm_group[] = {
1640 { .attrs = dme1737_attr_pwm1 }, 1684 { .attrs = dme1737_pwm1_attr },
1641 { .attrs = dme1737_attr_pwm2 }, 1685 { .attrs = dme1737_pwm2_attr },
1642 { .attrs = dme1737_attr_pwm3 }, 1686 { .attrs = dme1737_pwm3_attr },
1643 { .attrs = NULL }, 1687 { .attrs = NULL },
1644 { .attrs = dme1737_attr_pwm5 }, 1688 { .attrs = dme1737_pwm5_attr },
1645 { .attrs = dme1737_attr_pwm6 }, 1689 { .attrs = dme1737_pwm6_attr },
1690};
1691
1692/* The following struct holds misc PWM attributes, which are not available in
1693 * all chips. Their creation depends on the chip type which is determined
1694 * during module load. */
1695static struct attribute *dme1737_pwm_misc_attr[] = {
1696 &sensor_dev_attr_pwm1_auto_pwm_min.dev_attr.attr,
1697 &sensor_dev_attr_pwm2_auto_pwm_min.dev_attr.attr,
1698 &sensor_dev_attr_pwm3_auto_pwm_min.dev_attr.attr,
1646}; 1699};
1647 1700
1648/* The following structs hold the fan attributes, some of which are optional. 1701/* The following structs hold the fan attributes, some of which are optional.
1649 * Their creation depends on the chip configuration which is determined during 1702 * Their creation depends on the chip configuration which is determined during
1650 * module load. */ 1703 * module load. */
1651static struct attribute *dme1737_attr_fan1[] = { 1704static struct attribute *dme1737_fan1_attr[] = {
1652 SENSOR_DEV_ATTR_FAN_1TO4(1), 1705 &sensor_dev_attr_fan1_input.dev_attr.attr,
1706 &sensor_dev_attr_fan1_min.dev_attr.attr,
1707 &sensor_dev_attr_fan1_alarm.dev_attr.attr,
1708 &sensor_dev_attr_fan1_type.dev_attr.attr,
1653 NULL 1709 NULL
1654}; 1710};
1655static struct attribute *dme1737_attr_fan2[] = { 1711static struct attribute *dme1737_fan2_attr[] = {
1656 SENSOR_DEV_ATTR_FAN_1TO4(2), 1712 &sensor_dev_attr_fan2_input.dev_attr.attr,
1713 &sensor_dev_attr_fan2_min.dev_attr.attr,
1714 &sensor_dev_attr_fan2_alarm.dev_attr.attr,
1715 &sensor_dev_attr_fan2_type.dev_attr.attr,
1657 NULL 1716 NULL
1658}; 1717};
1659static struct attribute *dme1737_attr_fan3[] = { 1718static struct attribute *dme1737_fan3_attr[] = {
1660 SENSOR_DEV_ATTR_FAN_1TO4(3), 1719 &sensor_dev_attr_fan3_input.dev_attr.attr,
1720 &sensor_dev_attr_fan3_min.dev_attr.attr,
1721 &sensor_dev_attr_fan3_alarm.dev_attr.attr,
1722 &sensor_dev_attr_fan3_type.dev_attr.attr,
1661 NULL 1723 NULL
1662}; 1724};
1663static struct attribute *dme1737_attr_fan4[] = { 1725static struct attribute *dme1737_fan4_attr[] = {
1664 SENSOR_DEV_ATTR_FAN_1TO4(4), 1726 &sensor_dev_attr_fan4_input.dev_attr.attr,
1727 &sensor_dev_attr_fan4_min.dev_attr.attr,
1728 &sensor_dev_attr_fan4_alarm.dev_attr.attr,
1729 &sensor_dev_attr_fan4_type.dev_attr.attr,
1665 NULL 1730 NULL
1666}; 1731};
1667static struct attribute *dme1737_attr_fan5[] = { 1732static struct attribute *dme1737_fan5_attr[] = {
1668 SENSOR_DEV_ATTR_FAN_5TO6(5), 1733 &sensor_dev_attr_fan5_input.dev_attr.attr,
1734 &sensor_dev_attr_fan5_min.dev_attr.attr,
1735 &sensor_dev_attr_fan5_alarm.dev_attr.attr,
1736 &sensor_dev_attr_fan5_max.dev_attr.attr,
1669 NULL 1737 NULL
1670}; 1738};
1671static struct attribute *dme1737_attr_fan6[] = { 1739static struct attribute *dme1737_fan6_attr[] = {
1672 SENSOR_DEV_ATTR_FAN_5TO6(6), 1740 &sensor_dev_attr_fan6_input.dev_attr.attr,
1741 &sensor_dev_attr_fan6_min.dev_attr.attr,
1742 &sensor_dev_attr_fan6_alarm.dev_attr.attr,
1743 &sensor_dev_attr_fan6_max.dev_attr.attr,
1673 NULL 1744 NULL
1674}; 1745};
1675 1746
1676static const struct attribute_group dme1737_fan_group[] = { 1747static const struct attribute_group dme1737_fan_group[] = {
1677 { .attrs = dme1737_attr_fan1 }, 1748 { .attrs = dme1737_fan1_attr },
1678 { .attrs = dme1737_attr_fan2 }, 1749 { .attrs = dme1737_fan2_attr },
1679 { .attrs = dme1737_attr_fan3 }, 1750 { .attrs = dme1737_fan3_attr },
1680 { .attrs = dme1737_attr_fan4 }, 1751 { .attrs = dme1737_fan4_attr },
1681 { .attrs = dme1737_attr_fan5 }, 1752 { .attrs = dme1737_fan5_attr },
1682 { .attrs = dme1737_attr_fan6 }, 1753 { .attrs = dme1737_fan6_attr },
1683}; 1754};
1684 1755
1685/* The permissions of all of the following attributes are changed to read- 1756/* The permissions of the following zone attributes are changed to read-
1686 * writeable if the chip is *not* locked. Otherwise they stay read-only. */ 1757 * writeable if the chip is *not* locked. Otherwise they stay read-only. */
1687static struct attribute *dme1737_attr_lock[] = { 1758static struct attribute *dme1737_zone_chmod_attr[] = {
1688 /* Temperatures */ 1759 &sensor_dev_attr_zone1_auto_point1_temp.dev_attr.attr,
1689 SENSOR_DEV_ATTR_TEMP_LOCK(1), 1760 &sensor_dev_attr_zone1_auto_point2_temp.dev_attr.attr,
1690 SENSOR_DEV_ATTR_TEMP_LOCK(2), 1761 &sensor_dev_attr_zone1_auto_point3_temp.dev_attr.attr,
1691 SENSOR_DEV_ATTR_TEMP_LOCK(3), 1762 &sensor_dev_attr_zone2_auto_point1_temp.dev_attr.attr,
1692 /* Zones */ 1763 &sensor_dev_attr_zone2_auto_point2_temp.dev_attr.attr,
1693 SENSOR_DEV_ATTR_ZONE_LOCK(1), 1764 &sensor_dev_attr_zone2_auto_point3_temp.dev_attr.attr,
1694 SENSOR_DEV_ATTR_ZONE_LOCK(2), 1765 &sensor_dev_attr_zone3_auto_point1_temp.dev_attr.attr,
1695 SENSOR_DEV_ATTR_ZONE_LOCK(3), 1766 &sensor_dev_attr_zone3_auto_point2_temp.dev_attr.attr,
1767 &sensor_dev_attr_zone3_auto_point3_temp.dev_attr.attr,
1696 NULL 1768 NULL
1697}; 1769};
1698 1770
1699static const struct attribute_group dme1737_lock_group = { 1771static const struct attribute_group dme1737_zone_chmod_group = {
1700 .attrs = dme1737_attr_lock, 1772 .attrs = dme1737_zone_chmod_attr,
1701}; 1773};
1702 1774
1703/* The permissions of the following PWM attributes are changed to read- 1775/* The permissions of the following PWM attributes are changed to read-
1704 * writeable if the chip is *not* locked and the respective PWM is available. 1776 * writeable if the chip is *not* locked and the respective PWM is available.
1705 * Otherwise they stay read-only. */ 1777 * Otherwise they stay read-only. */
1706static struct attribute *dme1737_attr_pwm1_lock[] = { 1778static struct attribute *dme1737_pwm1_chmod_attr[] = {
1707 SENSOR_DEV_ATTR_PWM_1TO3_LOCK(1), 1779 &sensor_dev_attr_pwm1_freq.dev_attr.attr,
1780 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
1781 &sensor_dev_attr_pwm1_ramp_rate.dev_attr.attr,
1782 &sensor_dev_attr_pwm1_auto_channels_zone.dev_attr.attr,
1783 &sensor_dev_attr_pwm1_auto_point1_pwm.dev_attr.attr,
1708 NULL 1784 NULL
1709}; 1785};
1710static struct attribute *dme1737_attr_pwm2_lock[] = { 1786static struct attribute *dme1737_pwm2_chmod_attr[] = {
1711 SENSOR_DEV_ATTR_PWM_1TO3_LOCK(2), 1787 &sensor_dev_attr_pwm2_freq.dev_attr.attr,
1788 &sensor_dev_attr_pwm2_enable.dev_attr.attr,
1789 &sensor_dev_attr_pwm2_ramp_rate.dev_attr.attr,
1790 &sensor_dev_attr_pwm2_auto_channels_zone.dev_attr.attr,
1791 &sensor_dev_attr_pwm2_auto_point1_pwm.dev_attr.attr,
1712 NULL 1792 NULL
1713}; 1793};
1714static struct attribute *dme1737_attr_pwm3_lock[] = { 1794static struct attribute *dme1737_pwm3_chmod_attr[] = {
1715 SENSOR_DEV_ATTR_PWM_1TO3_LOCK(3), 1795 &sensor_dev_attr_pwm3_freq.dev_attr.attr,
1796 &sensor_dev_attr_pwm3_enable.dev_attr.attr,
1797 &sensor_dev_attr_pwm3_ramp_rate.dev_attr.attr,
1798 &sensor_dev_attr_pwm3_auto_channels_zone.dev_attr.attr,
1799 &sensor_dev_attr_pwm3_auto_point1_pwm.dev_attr.attr,
1716 NULL 1800 NULL
1717}; 1801};
1718static struct attribute *dme1737_attr_pwm5_lock[] = { 1802static struct attribute *dme1737_pwm5_chmod_attr[] = {
1719 SENSOR_DEV_ATTR_PWM_5TO6_LOCK(5), 1803 &sensor_dev_attr_pwm5.dev_attr.attr,
1804 &sensor_dev_attr_pwm5_freq.dev_attr.attr,
1720 NULL 1805 NULL
1721}; 1806};
1722static struct attribute *dme1737_attr_pwm6_lock[] = { 1807static struct attribute *dme1737_pwm6_chmod_attr[] = {
1723 SENSOR_DEV_ATTR_PWM_5TO6_LOCK(6), 1808 &sensor_dev_attr_pwm6.dev_attr.attr,
1809 &sensor_dev_attr_pwm6_freq.dev_attr.attr,
1724 NULL 1810 NULL
1725}; 1811};
1726 1812
1727static const struct attribute_group dme1737_pwm_lock_group[] = { 1813static const struct attribute_group dme1737_pwm_chmod_group[] = {
1728 { .attrs = dme1737_attr_pwm1_lock }, 1814 { .attrs = dme1737_pwm1_chmod_attr },
1729 { .attrs = dme1737_attr_pwm2_lock }, 1815 { .attrs = dme1737_pwm2_chmod_attr },
1730 { .attrs = dme1737_attr_pwm3_lock }, 1816 { .attrs = dme1737_pwm3_chmod_attr },
1731 { .attrs = NULL }, 1817 { .attrs = NULL },
1732 { .attrs = dme1737_attr_pwm5_lock }, 1818 { .attrs = dme1737_pwm5_chmod_attr },
1733 { .attrs = dme1737_attr_pwm6_lock }, 1819 { .attrs = dme1737_pwm6_chmod_attr },
1734}; 1820};
1735 1821
1736/* Pwm[1-3] are read-writeable if the associated pwm is in manual mode and the 1822/* Pwm[1-3] are read-writeable if the associated pwm is in manual mode and the
1737 * chip is not locked. Otherwise they are read-only. */ 1823 * chip is not locked. Otherwise they are read-only. */
1738static struct attribute *dme1737_attr_pwm[] = { 1824static struct attribute *dme1737_pwm_chmod_attr[] = {
1739 &sensor_dev_attr_pwm1.dev_attr.attr, 1825 &sensor_dev_attr_pwm1.dev_attr.attr,
1740 &sensor_dev_attr_pwm2.dev_attr.attr, 1826 &sensor_dev_attr_pwm2.dev_attr.attr,
1741 &sensor_dev_attr_pwm3.dev_attr.attr, 1827 &sensor_dev_attr_pwm3.dev_attr.attr,
@@ -1809,9 +1895,17 @@ static void dme1737_remove_files(struct device *dev)
1809 if (data->has_pwm & (1 << ix)) { 1895 if (data->has_pwm & (1 << ix)) {
1810 sysfs_remove_group(&dev->kobj, 1896 sysfs_remove_group(&dev->kobj,
1811 &dme1737_pwm_group[ix]); 1897 &dme1737_pwm_group[ix]);
1898 if (data->type != sch5027 && ix < 3) {
1899 sysfs_remove_file(&dev->kobj,
1900 dme1737_pwm_misc_attr[ix]);
1901 }
1812 } 1902 }
1813 } 1903 }
1814 1904
1905 if (data->type != sch5027) {
1906 sysfs_remove_group(&dev->kobj, &dme1737_misc_group);
1907 }
1908
1815 sysfs_remove_group(&dev->kobj, &dme1737_group); 1909 sysfs_remove_group(&dev->kobj, &dme1737_group);
1816 1910
1817 if (!data->client.driver) { 1911 if (!data->client.driver) {
@@ -1835,6 +1929,13 @@ static int dme1737_create_files(struct device *dev)
1835 goto exit_remove; 1929 goto exit_remove;
1836 } 1930 }
1837 1931
1932 /* Create misc sysfs attributes */
1933 if ((data->type != sch5027) &&
1934 (err = sysfs_create_group(&dev->kobj,
1935 &dme1737_misc_group))) {
1936 goto exit_remove;
1937 }
1938
1838 /* Create fan sysfs attributes */ 1939 /* Create fan sysfs attributes */
1839 for (ix = 0; ix < ARRAY_SIZE(dme1737_fan_group); ix++) { 1940 for (ix = 0; ix < ARRAY_SIZE(dme1737_fan_group); ix++) {
1840 if (data->has_fan & (1 << ix)) { 1941 if (data->has_fan & (1 << ix)) {
@@ -1852,6 +1953,11 @@ static int dme1737_create_files(struct device *dev)
1852 &dme1737_pwm_group[ix]))) { 1953 &dme1737_pwm_group[ix]))) {
1853 goto exit_remove; 1954 goto exit_remove;
1854 } 1955 }
1956 if (data->type != sch5027 && ix < 3 &&
1957 (err = sysfs_create_file(&dev->kobj,
1958 dme1737_pwm_misc_attr[ix]))) {
1959 goto exit_remove;
1960 }
1855 } 1961 }
1856 } 1962 }
1857 1963
@@ -1861,16 +1967,27 @@ static int dme1737_create_files(struct device *dev)
1861 dev_info(dev, "Device is locked. Some attributes " 1967 dev_info(dev, "Device is locked. Some attributes "
1862 "will be read-only.\n"); 1968 "will be read-only.\n");
1863 } else { 1969 } else {
1864 /* Change permissions of standard attributes */ 1970 /* Change permissions of zone sysfs attributes */
1865 dme1737_chmod_group(dev, &dme1737_lock_group, 1971 dme1737_chmod_group(dev, &dme1737_zone_chmod_group,
1866 S_IRUGO | S_IWUSR); 1972 S_IRUGO | S_IWUSR);
1867 1973
1868 /* Change permissions of PWM attributes */ 1974 /* Change permissions of misc sysfs attributes */
1869 for (ix = 0; ix < ARRAY_SIZE(dme1737_pwm_lock_group); ix++) { 1975 if (data->type != sch5027) {
1976 dme1737_chmod_group(dev, &dme1737_misc_group,
1977 S_IRUGO | S_IWUSR);
1978 }
1979
1980 /* Change permissions of PWM sysfs attributes */
1981 for (ix = 0; ix < ARRAY_SIZE(dme1737_pwm_chmod_group); ix++) {
1870 if (data->has_pwm & (1 << ix)) { 1982 if (data->has_pwm & (1 << ix)) {
1871 dme1737_chmod_group(dev, 1983 dme1737_chmod_group(dev,
1872 &dme1737_pwm_lock_group[ix], 1984 &dme1737_pwm_chmod_group[ix],
1985 S_IRUGO | S_IWUSR);
1986 if (data->type != sch5027 && ix < 3) {
1987 dme1737_chmod_file(dev,
1988 dme1737_pwm_misc_attr[ix],
1873 S_IRUGO | S_IWUSR); 1989 S_IRUGO | S_IWUSR);
1990 }
1874 } 1991 }
1875 } 1992 }
1876 1993
@@ -1879,7 +1996,7 @@ static int dme1737_create_files(struct device *dev)
1879 if ((data->has_pwm & (1 << ix)) && 1996 if ((data->has_pwm & (1 << ix)) &&
1880 (PWM_EN_FROM_REG(data->pwm_config[ix]) == 1)) { 1997 (PWM_EN_FROM_REG(data->pwm_config[ix]) == 1)) {
1881 dme1737_chmod_file(dev, 1998 dme1737_chmod_file(dev,
1882 dme1737_attr_pwm[ix], 1999 dme1737_pwm_chmod_attr[ix],
1883 S_IRUGO | S_IWUSR); 2000 S_IRUGO | S_IWUSR);
1884 } 2001 }
1885 } 2002 }
@@ -1900,6 +2017,9 @@ static int dme1737_init_device(struct device *dev)
1900 int ix; 2017 int ix;
1901 u8 reg; 2018 u8 reg;
1902 2019
2020 /* Point to the right nominal voltages array */
2021 data->in_nominal = IN_NOMINAL(data->type);
2022
1903 data->config = dme1737_read(client, DME1737_REG_CONFIG); 2023 data->config = dme1737_read(client, DME1737_REG_CONFIG);
1904 /* Inform if part is not monitoring/started */ 2024 /* Inform if part is not monitoring/started */
1905 if (!(data->config & 0x01)) { 2025 if (!(data->config & 0x01)) {
@@ -2010,7 +2130,9 @@ static int dme1737_init_device(struct device *dev)
2010 data->pwm_acz[2] = 4; /* pwm3 -> zone3 */ 2130 data->pwm_acz[2] = 4; /* pwm3 -> zone3 */
2011 2131
2012 /* Set VRM */ 2132 /* Set VRM */
2013 data->vrm = vid_which_vrm(); 2133 if (data->type != sch5027) {
2134 data->vrm = vid_which_vrm();
2135 }
2014 2136
2015 return 0; 2137 return 0;
2016} 2138}
@@ -2029,9 +2151,10 @@ static int dme1737_i2c_get_features(int sio_cip, struct dme1737_data *data)
2029 dme1737_sio_enter(sio_cip); 2151 dme1737_sio_enter(sio_cip);
2030 2152
2031 /* Check device ID 2153 /* Check device ID
2032 * The DME1737 can return either 0x78 or 0x77 as its device ID. */ 2154 * The DME1737 can return either 0x78 or 0x77 as its device ID.
2155 * The SCH5027 returns 0x89 as its device ID. */
2033 reg = force_id ? force_id : dme1737_sio_inb(sio_cip, 0x20); 2156 reg = force_id ? force_id : dme1737_sio_inb(sio_cip, 0x20);
2034 if (!(reg == 0x77 || reg == 0x78)) { 2157 if (!(reg == 0x77 || reg == 0x78 || reg == 0x89)) {
2035 err = -ENODEV; 2158 err = -ENODEV;
2036 goto exit; 2159 goto exit;
2037 } 2160 }
@@ -2100,15 +2223,25 @@ static int dme1737_i2c_detect(struct i2c_adapter *adapter, int address,
2100 company = dme1737_read(client, DME1737_REG_COMPANY); 2223 company = dme1737_read(client, DME1737_REG_COMPANY);
2101 verstep = dme1737_read(client, DME1737_REG_VERSTEP); 2224 verstep = dme1737_read(client, DME1737_REG_VERSTEP);
2102 2225
2103 if (!((company == DME1737_COMPANY_SMSC) && 2226 if (company == DME1737_COMPANY_SMSC &&
2104 ((verstep & DME1737_VERSTEP_MASK) == DME1737_VERSTEP))) { 2227 (verstep & DME1737_VERSTEP_MASK) == DME1737_VERSTEP) {
2228 kind = dme1737;
2229 } else if (company == DME1737_COMPANY_SMSC &&
2230 verstep == SCH5027_VERSTEP) {
2231 kind = sch5027;
2232 } else {
2105 err = -ENODEV; 2233 err = -ENODEV;
2106 goto exit_kfree; 2234 goto exit_kfree;
2107 } 2235 }
2108 } 2236 }
2109 2237
2110 kind = dme1737; 2238 if (kind == sch5027) {
2111 name = "dme1737"; 2239 name = "sch5027";
2240 } else {
2241 kind = dme1737;
2242 name = "dme1737";
2243 }
2244 data->type = kind;
2112 2245
2113 /* Fill in the remaining client fields and put it into the global 2246 /* Fill in the remaining client fields and put it into the global
2114 * list */ 2247 * list */
@@ -2120,8 +2253,9 @@ static int dme1737_i2c_detect(struct i2c_adapter *adapter, int address,
2120 goto exit_kfree; 2253 goto exit_kfree;
2121 } 2254 }
2122 2255
2123 dev_info(dev, "Found a DME1737 chip at 0x%02x (rev 0x%02x).\n", 2256 dev_info(dev, "Found a %s chip at 0x%02x (rev 0x%02x).\n",
2124 client->addr, verstep); 2257 kind == sch5027 ? "SCH5027" : "DME1737", client->addr,
2258 verstep);
2125 2259
2126 /* Initialize the DME1737 chip */ 2260 /* Initialize the DME1737 chip */
2127 if ((err = dme1737_init_device(dev))) { 2261 if ((err = dme1737_init_device(dev))) {
@@ -2293,14 +2427,18 @@ static int __devinit dme1737_isa_probe(struct platform_device *pdev)
2293 client->addr = res->start; 2427 client->addr = res->start;
2294 platform_set_drvdata(pdev, data); 2428 platform_set_drvdata(pdev, data);
2295 2429
2296 company = dme1737_read(client, DME1737_REG_COMPANY); 2430 /* Skip chip detection if module is loaded with force_id parameter */
2297 device = dme1737_read(client, DME1737_REG_DEVICE); 2431 if (!force_id) {
2432 company = dme1737_read(client, DME1737_REG_COMPANY);
2433 device = dme1737_read(client, DME1737_REG_DEVICE);
2298 2434
2299 if (!((company == DME1737_COMPANY_SMSC) && 2435 if (!((company == DME1737_COMPANY_SMSC) &&
2300 (device == SCH311X_DEVICE))) { 2436 (device == SCH311X_DEVICE))) {
2301 err = -ENODEV; 2437 err = -ENODEV;
2302 goto exit_kfree; 2438 goto exit_kfree;
2439 }
2303 } 2440 }
2441 data->type = sch311x;
2304 2442
2305 /* Fill in the remaining client fields and initialize the mutex */ 2443 /* Fill in the remaining client fields and initialize the mutex */
2306 strlcpy(client->name, "sch311x", I2C_NAME_SIZE); 2444 strlcpy(client->name, "sch311x", I2C_NAME_SIZE);
@@ -2377,7 +2515,10 @@ static int __init dme1737_init(void)
2377 } 2515 }
2378 2516
2379 if (dme1737_isa_detect(0x2e, &addr) && 2517 if (dme1737_isa_detect(0x2e, &addr) &&
2380 dme1737_isa_detect(0x4e, &addr)) { 2518 dme1737_isa_detect(0x4e, &addr) &&
2519 (!probe_all_addr ||
2520 (dme1737_isa_detect(0x162e, &addr) &&
2521 dme1737_isa_detect(0x164e, &addr)))) {
2381 /* Return 0 if we didn't find an ISA device */ 2522 /* Return 0 if we didn't find an ISA device */
2382 return 0; 2523 return 0;
2383 } 2524 }
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index cbeb4984b5c7..67067e9a323e 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -87,8 +87,6 @@ static inline void superio_enter(int base);
87static inline void superio_select(int base, int ld); 87static inline void superio_select(int base, int ld);
88static inline void superio_exit(int base); 88static inline void superio_exit(int base);
89 89
90static inline u16 fan_from_reg ( u16 reg );
91
92struct f71882fg_data { 90struct f71882fg_data {
93 unsigned short addr; 91 unsigned short addr;
94 struct device *hwmon_dev; 92 struct device *hwmon_dev;
@@ -116,10 +114,6 @@ struct f71882fg_data {
116 u8 temp_diode_open; 114 u8 temp_diode_open;
117}; 115};
118 116
119static u8 f71882fg_read8(struct f71882fg_data *data, u8 reg);
120static u16 f71882fg_read16(struct f71882fg_data *data, u8 reg);
121static void f71882fg_write8(struct f71882fg_data *data, u8 reg, u8 val);
122
123/* Sysfs in*/ 117/* Sysfs in*/
124static ssize_t show_in(struct device *dev, struct device_attribute *devattr, 118static ssize_t show_in(struct device *dev, struct device_attribute *devattr,
125 char *buf); 119 char *buf);
diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c
index 3330667280b9..7b0a32c4dcfb 100644
--- a/drivers/hwmon/hwmon-vid.c
+++ b/drivers/hwmon/hwmon-vid.c
@@ -1,76 +1,74 @@
1/* 1/*
2 hwmon-vid.c - VID/VRM/VRD voltage conversions 2 * hwmon-vid.c - VID/VRM/VRD voltage conversions
3 3 *
4 Copyright (c) 2004 Rudolf Marek <r.marek@assembler.cz> 4 * Copyright (c) 2004 Rudolf Marek <r.marek@assembler.cz>
5 5 *
6 Partly imported from i2c-vid.h of the lm_sensors project 6 * Partly imported from i2c-vid.h of the lm_sensors project
7 Copyright (c) 2002 Mark D. Studebaker <mdsxyz123@yahoo.com> 7 * Copyright (c) 2002 Mark D. Studebaker <mdsxyz123@yahoo.com>
8 With assistance from Trent Piepho <xyzzy@speakeasy.org> 8 * With assistance from Trent Piepho <xyzzy@speakeasy.org>
9 9 *
10 This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2 of the License, or 12 * the Free Software Foundation; either version 2 of the License, or
13 (at your option) any later version. 13 * (at your option) any later version.
14 14 *
15 This program is distributed in the hope that it will be useful, 15 * This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details. 18 * GNU General Public License for more details.
19 19 *
20 You should have received a copy of the GNU General Public License 20 * You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software 21 * along with this program; if not, write to the Free Software
22 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23*/ 23 */
24 24
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/hwmon-vid.h> 27#include <linux/hwmon-vid.h>
28 28
29/* 29/*
30 Common code for decoding VID pins. 30 * Common code for decoding VID pins.
31 31 *
32 References: 32 * References:
33 33 *
34 For VRM 8.4 to 9.1, "VRM x.y DC-DC Converter Design Guidelines", 34 * For VRM 8.4 to 9.1, "VRM x.y DC-DC Converter Design Guidelines",
35 available at http://developer.intel.com/. 35 * available at http://developer.intel.com/.
36 36 *
37 For VRD 10.0 and up, "VRD x.y Design Guide", 37 * For VRD 10.0 and up, "VRD x.y Design Guide",
38 available at http://developer.intel.com/. 38 * available at http://developer.intel.com/.
39 39 *
40 AMD Opteron processors don't follow the Intel specifications. 40 * AMD NPT 0Fh (Athlon64 & Opteron), AMD Publication 32559,
41 I'm going to "make up" 2.4 as the spec number for the Opterons. 41 * http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/32559.pdf
42 No good reason just a mnemonic for the 24x Opteron processor 42 * Table 71. VID Code Voltages
43 series. 43 * AMD Opteron processors don't follow the Intel specifications.
44 44 * I'm going to "make up" 2.4 as the spec number for the Opterons.
45 Opteron VID encoding is: 45 * No good reason just a mnemonic for the 24x Opteron processor
46 00000 = 1.550 V 46 * series.
47 00001 = 1.525 V 47 *
48 . . . . 48 * The 17 specification is in fact Intel Mobile Voltage Positioning -
49 11110 = 0.800 V 49 * (IMVP-II). You can find more information in the datasheet of Max1718
50 11111 = 0.000 V (off) 50 * http://www.maxim-ic.com/quick_view2.cfm/qv_pk/2452
51 51 *
52 The 17 specification is in fact Intel Mobile Voltage Positioning - 52 * The 13 specification corresponds to the Intel Pentium M series. There
53 (IMVP-II). You can find more information in the datasheet of Max1718 53 * doesn't seem to be any named specification for these. The conversion
54 http://www.maxim-ic.com/quick_view2.cfm/qv_pk/2452 54 * tables are detailed directly in the various Pentium M datasheets:
55 55 * http://www.intel.com/design/intarch/pentiumm/docs_pentiumm.htm
56 The 13 specification corresponds to the Intel Pentium M series. There 56 *
57 doesn't seem to be any named specification for these. The conversion 57 * The 14 specification corresponds to Intel Core series. There
58 tables are detailed directly in the various Pentium M datasheets: 58 * doesn't seem to be any named specification for these. The conversion
59 http://www.intel.com/design/intarch/pentiumm/docs_pentiumm.htm 59 * tables are detailed directly in the various Pentium Core datasheets:
60 60 * http://www.intel.com/design/mobile/datashts/309221.htm
61 The 14 specification corresponds to Intel Core series. There 61 *
62 doesn't seem to be any named specification for these. The conversion 62 * The 110 (VRM 11) specification corresponds to Intel Conroe based series.
63 tables are detailed directly in the various Pentium Core datasheets: 63 * http://www.intel.com/design/processor/applnots/313214.htm
64 http://www.intel.com/design/mobile/datashts/309221.htm 64 */
65 65
66 The 110 (VRM 11) specification corresponds to Intel Conroe based series. 66/*
67 http://www.intel.com/design/processor/applnots/313214.htm 67 * vrm is the VRM/VRD document version multiplied by 10.
68*/ 68 * val is the 4-bit or more VID code.
69 69 * Returned value is in mV to avoid floating point in the kernel.
70/* vrm is the VRM/VRD document version multiplied by 10. 70 * Some VID have some bits in uV scale, this is rounded to mV.
71 val is the 4-bit or more VID code. 71 */
72 Returned value is in mV to avoid floating point in the kernel.
73 Some VID have some bits in uV scale, this is rounded to mV */
74int vid_from_reg(int val, u8 vrm) 72int vid_from_reg(int val, u8 vrm)
75{ 73{
76 int vid; 74 int vid;
@@ -96,9 +94,11 @@ int vid_from_reg(int val, u8 vrm)
96 if (val < 0x02 || val > 0xb2) 94 if (val < 0x02 || val > 0xb2)
97 return 0; 95 return 0;
98 return((1600000 - (val - 2) * 6250 + 500) / 1000); 96 return((1600000 - (val - 2) * 6250 + 500) / 1000);
99 case 24: /* Opteron processor */ 97
100 val &= 0x1f; 98 case 24: /* AMD NPT 0Fh (Athlon64 & Opteron) */
101 return(val == 0x1f ? 0 : 1550 - val * 25); 99 val &= 0x3f;
100 return (val < 32) ? 1550 - 25 * val
101 : 775 - (25 * (val - 31)) / 2;
102 102
103 case 91: /* VRM 9.1 */ 103 case 91: /* VRM 9.1 */
104 case 90: /* VRM 9.0 */ 104 case 90: /* VRM 9.0 */
@@ -141,9 +141,9 @@ int vid_from_reg(int val, u8 vrm)
141 141
142 142
143/* 143/*
144 After this point is the code to automatically determine which 144 * After this point is the code to automatically determine which
145 VRM/VRD specification should be used depending on the CPU. 145 * VRM/VRD specification should be used depending on the CPU.
146*/ 146 */
147 147
148struct vrm_model { 148struct vrm_model {
149 u8 vendor; 149 u8 vendor;
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index e12c132ff83a..30cdb0956779 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -151,9 +151,9 @@ static int fix_pwm_polarity;
151/* The IT8718F has the VID value in a different register, in Super-I/O 151/* The IT8718F has the VID value in a different register, in Super-I/O
152 configuration space. */ 152 configuration space. */
153#define IT87_REG_VID 0x0a 153#define IT87_REG_VID 0x0a
154/* Warning: register 0x0b is used for something completely different in 154/* The IT8705F and IT8712F earlier than revision 0x08 use register 0x0b
155 new chips/revisions. I suspect only 16-bit tachometer mode will work 155 for fan divisors. Later IT8712F revisions must use 16-bit tachometer
156 for these. */ 156 mode. */
157#define IT87_REG_FAN_DIV 0x0b 157#define IT87_REG_FAN_DIV 0x0b
158#define IT87_REG_FAN_16BIT 0x0c 158#define IT87_REG_FAN_16BIT 0x0c
159 159
@@ -234,6 +234,7 @@ static const unsigned int pwm_freq[8] = {
234struct it87_sio_data { 234struct it87_sio_data {
235 enum chips type; 235 enum chips type;
236 /* Values read from Super-I/O config space */ 236 /* Values read from Super-I/O config space */
237 u8 revision;
237 u8 vid_value; 238 u8 vid_value;
238}; 239};
239 240
@@ -242,6 +243,7 @@ struct it87_sio_data {
242struct it87_data { 243struct it87_data {
243 struct device *hwmon_dev; 244 struct device *hwmon_dev;
244 enum chips type; 245 enum chips type;
246 u8 revision;
245 247
246 unsigned short addr; 248 unsigned short addr;
247 const char *name; 249 const char *name;
@@ -268,6 +270,16 @@ struct it87_data {
268 u8 manual_pwm_ctl[3]; /* manual PWM value set by user */ 270 u8 manual_pwm_ctl[3]; /* manual PWM value set by user */
269}; 271};
270 272
273static inline int has_16bit_fans(const struct it87_data *data)
274{
275 /* IT8705F Datasheet 0.4.1, 3h == Version G.
276 IT8712F Datasheet 0.9.1, section 8.3.5 indicates 7h == Version I.
277 These are the first revisions with 16bit tachometer support. */
278 return (data->type == it87 && data->revision >= 0x03)
279 || (data->type == it8712 && data->revision >= 0x07)
280 || data->type == it8716
281 || data->type == it8718;
282}
271 283
272static int it87_probe(struct platform_device *pdev); 284static int it87_probe(struct platform_device *pdev);
273static int __devexit it87_remove(struct platform_device *pdev); 285static int __devexit it87_remove(struct platform_device *pdev);
@@ -991,8 +1003,9 @@ static int __init it87_find(unsigned short *address,
991 } 1003 }
992 1004
993 err = 0; 1005 err = 0;
1006 sio_data->revision = superio_inb(DEVREV) & 0x0f;
994 pr_info("it87: Found IT%04xF chip at 0x%x, revision %d\n", 1007 pr_info("it87: Found IT%04xF chip at 0x%x, revision %d\n",
995 chip_type, *address, superio_inb(DEVREV) & 0x0f); 1008 chip_type, *address, sio_data->revision);
996 1009
997 /* Read GPIO config and VID value from LDN 7 (GPIO) */ 1010 /* Read GPIO config and VID value from LDN 7 (GPIO) */
998 if (chip_type != IT8705F_DEVID) { 1011 if (chip_type != IT8705F_DEVID) {
@@ -1045,6 +1058,7 @@ static int __devinit it87_probe(struct platform_device *pdev)
1045 1058
1046 data->addr = res->start; 1059 data->addr = res->start;
1047 data->type = sio_data->type; 1060 data->type = sio_data->type;
1061 data->revision = sio_data->revision;
1048 data->name = names[sio_data->type]; 1062 data->name = names[sio_data->type];
1049 1063
1050 /* Now, we do the remaining detection. */ 1064 /* Now, we do the remaining detection. */
@@ -1069,7 +1083,7 @@ static int __devinit it87_probe(struct platform_device *pdev)
1069 goto ERROR2; 1083 goto ERROR2;
1070 1084
1071 /* Do not create fan files for disabled fans */ 1085 /* Do not create fan files for disabled fans */
1072 if (data->type == it8716 || data->type == it8718) { 1086 if (has_16bit_fans(data)) {
1073 /* 16-bit tachometers */ 1087 /* 16-bit tachometers */
1074 if (data->has_fan & (1 << 0)) { 1088 if (data->has_fan & (1 << 0)) {
1075 if ((err = device_create_file(dev, 1089 if ((err = device_create_file(dev,
@@ -1350,7 +1364,7 @@ static void __devinit it87_init_device(struct platform_device *pdev)
1350 data->has_fan = (data->fan_main_ctrl >> 4) & 0x07; 1364 data->has_fan = (data->fan_main_ctrl >> 4) & 0x07;
1351 1365
1352 /* Set tachometers to 16-bit mode if needed */ 1366 /* Set tachometers to 16-bit mode if needed */
1353 if (data->type == it8716 || data->type == it8718) { 1367 if (has_16bit_fans(data)) {
1354 tmp = it87_read_value(data, IT87_REG_FAN_16BIT); 1368 tmp = it87_read_value(data, IT87_REG_FAN_16BIT);
1355 if (~tmp & 0x07 & data->has_fan) { 1369 if (~tmp & 0x07 & data->has_fan) {
1356 dev_dbg(&pdev->dev, 1370 dev_dbg(&pdev->dev,
@@ -1358,10 +1372,13 @@ static void __devinit it87_init_device(struct platform_device *pdev)
1358 it87_write_value(data, IT87_REG_FAN_16BIT, 1372 it87_write_value(data, IT87_REG_FAN_16BIT,
1359 tmp | 0x07); 1373 tmp | 0x07);
1360 } 1374 }
1361 if (tmp & (1 << 4)) 1375 /* IT8705F only supports three fans. */
1362 data->has_fan |= (1 << 3); /* fan4 enabled */ 1376 if (data->type != it87) {
1363 if (tmp & (1 << 5)) 1377 if (tmp & (1 << 4))
1364 data->has_fan |= (1 << 4); /* fan5 enabled */ 1378 data->has_fan |= (1 << 3); /* fan4 enabled */
1379 if (tmp & (1 << 5))
1380 data->has_fan |= (1 << 4); /* fan5 enabled */
1381 }
1365 } 1382 }
1366 1383
1367 /* Set current fan mode registers and the default settings for the 1384 /* Set current fan mode registers and the default settings for the
@@ -1426,7 +1443,7 @@ static struct it87_data *it87_update_device(struct device *dev)
1426 data->fan[i] = it87_read_value(data, 1443 data->fan[i] = it87_read_value(data,
1427 IT87_REG_FAN[i]); 1444 IT87_REG_FAN[i]);
1428 /* Add high byte if in 16-bit mode */ 1445 /* Add high byte if in 16-bit mode */
1429 if (data->type == it8716 || data->type == it8718) { 1446 if (has_16bit_fans(data)) {
1430 data->fan[i] |= it87_read_value(data, 1447 data->fan[i] |= it87_read_value(data,
1431 IT87_REG_FANX[i]) << 8; 1448 IT87_REG_FANX[i]) << 8;
1432 data->fan_min[i] |= it87_read_value(data, 1449 data->fan_min[i] |= it87_read_value(data,
@@ -1443,8 +1460,7 @@ static struct it87_data *it87_update_device(struct device *dev)
1443 } 1460 }
1444 1461
1445 /* Newer chips don't have clock dividers */ 1462 /* Newer chips don't have clock dividers */
1446 if ((data->has_fan & 0x07) && data->type != it8716 1463 if ((data->has_fan & 0x07) && !has_16bit_fans(data)) {
1447 && data->type != it8718) {
1448 i = it87_read_value(data, IT87_REG_FAN_DIV); 1464 i = it87_read_value(data, IT87_REG_FAN_DIV);
1449 data->fan_div[0] = i & 0x07; 1465 data->fan_div[0] = i & 0x07;
1450 data->fan_div[1] = (i >> 3) & 0x07; 1466 data->fan_div[1] = (i >> 3) & 0x07;
@@ -1460,7 +1476,8 @@ static struct it87_data *it87_update_device(struct device *dev)
1460 data->fan_ctl = it87_read_value(data, IT87_REG_FAN_CTL); 1476 data->fan_ctl = it87_read_value(data, IT87_REG_FAN_CTL);
1461 1477
1462 data->sensor = it87_read_value(data, IT87_REG_TEMP_ENABLE); 1478 data->sensor = it87_read_value(data, IT87_REG_TEMP_ENABLE);
1463 /* The 8705 does not have VID capability */ 1479 /* The 8705 does not have VID capability.
1480 The 8718 does not use IT87_REG_VID for the same purpose. */
1464 if (data->type == it8712 || data->type == it8716) { 1481 if (data->type == it8712 || data->type == it8716) {
1465 data->vid = it87_read_value(data, IT87_REG_VID); 1482 data->vid = it87_read_value(data, IT87_REG_VID);
1466 /* The older IT8712F revisions had only 5 VID pins, 1483 /* The older IT8712F revisions had only 5 VID pins,
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index de698dc73020..7880c273c2c5 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -30,14 +30,37 @@
30#include "lm75.h" 30#include "lm75.h"
31 31
32 32
33/* Addresses to scan */ 33/*
34 * This driver handles the LM75 and compatible digital temperature sensors.
35 * Only types which are _not_ listed in I2C_CLIENT_INSMOD_*() need to be
36 * listed here. We start at 9 since I2C_CLIENT_INSMOD_*() currently allow
37 * definition of up to 8 chip types (plus zero).
38 */
39
40enum lm75_type { /* keep sorted in alphabetical order */
41 ds1775 = 9,
42 ds75,
43 /* lm75 -- in I2C_CLIENT_INSMOD_1() */
44 lm75a,
45 max6625,
46 max6626,
47 mcp980x,
48 stds75,
49 tcn75,
50 tmp100,
51 tmp101,
52 tmp175,
53 tmp275,
54 tmp75,
55};
56
57/* Addresses scanned by legacy style driver binding */
34static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c, 58static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c,
35 0x4d, 0x4e, 0x4f, I2C_CLIENT_END }; 59 0x4d, 0x4e, 0x4f, I2C_CLIENT_END };
36 60
37/* Insmod parameters */ 61/* Insmod parameters (only for legacy style driver binding) */
38I2C_CLIENT_INSMOD_1(lm75); 62I2C_CLIENT_INSMOD_1(lm75);
39 63
40/* Many LM75 constants specified below */
41 64
42/* The LM75 registers */ 65/* The LM75 registers */
43#define LM75_REG_CONF 0x01 66#define LM75_REG_CONF 0x01
@@ -49,10 +72,11 @@ static const u8 LM75_REG_TEMP[3] = {
49 72
50/* Each client has this additional data */ 73/* Each client has this additional data */
51struct lm75_data { 74struct lm75_data {
52 struct i2c_client client; 75 struct i2c_client *client;
53 struct device *hwmon_dev; 76 struct device *hwmon_dev;
54 struct mutex update_lock; 77 struct mutex update_lock;
55 char valid; /* !=0 if following fields are valid */ 78 u8 orig_conf;
79 char valid; /* !=0 if registers are valid */
56 unsigned long last_updated; /* In jiffies */ 80 unsigned long last_updated; /* In jiffies */
57 u16 temp[3]; /* Register values, 81 u16 temp[3]; /* Register values,
58 0 = input 82 0 = input
@@ -60,23 +84,14 @@ struct lm75_data {
60 2 = hyst */ 84 2 = hyst */
61}; 85};
62 86
63static int lm75_attach_adapter(struct i2c_adapter *adapter);
64static int lm75_detect(struct i2c_adapter *adapter, int address, int kind);
65static void lm75_init_client(struct i2c_client *client);
66static int lm75_detach_client(struct i2c_client *client);
67static int lm75_read_value(struct i2c_client *client, u8 reg); 87static int lm75_read_value(struct i2c_client *client, u8 reg);
68static int lm75_write_value(struct i2c_client *client, u8 reg, u16 value); 88static int lm75_write_value(struct i2c_client *client, u8 reg, u16 value);
69static struct lm75_data *lm75_update_device(struct device *dev); 89static struct lm75_data *lm75_update_device(struct device *dev);
70 90
71 91
72/* This is the driver that will be inserted */ 92/*-----------------------------------------------------------------------*/
73static struct i2c_driver lm75_driver = { 93
74 .driver = { 94/* sysfs attributes for hwmon */
75 .name = "lm75",
76 },
77 .attach_adapter = lm75_attach_adapter,
78 .detach_client = lm75_detach_client,
79};
80 95
81static ssize_t show_temp(struct device *dev, struct device_attribute *da, 96static ssize_t show_temp(struct device *dev, struct device_attribute *da,
82 char *buf) 97 char *buf)
@@ -109,13 +124,6 @@ static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IWUSR | S_IRUGO,
109 show_temp, set_temp, 2); 124 show_temp, set_temp, 2);
110static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0); 125static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0);
111 126
112static int lm75_attach_adapter(struct i2c_adapter *adapter)
113{
114 if (!(adapter->class & I2C_CLASS_HWMON))
115 return 0;
116 return i2c_probe(adapter, &addr_data, lm75_detect);
117}
118
119static struct attribute *lm75_attributes[] = { 127static struct attribute *lm75_attributes[] = {
120 &sensor_dev_attr_temp1_input.dev_attr.attr, 128 &sensor_dev_attr_temp1_input.dev_attr.attr,
121 &sensor_dev_attr_temp1_max.dev_attr.attr, 129 &sensor_dev_attr_temp1_max.dev_attr.attr,
@@ -128,32 +136,144 @@ static const struct attribute_group lm75_group = {
128 .attrs = lm75_attributes, 136 .attrs = lm75_attributes,
129}; 137};
130 138
139/*-----------------------------------------------------------------------*/
140
141/* "New style" I2C driver binding -- following the driver model */
142
143static int
144lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
145{
146 struct lm75_data *data;
147 int status;
148 u8 set_mask, clr_mask;
149 int new;
150
151 if (!i2c_check_functionality(client->adapter,
152 I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA))
153 return -EIO;
154
155 data = kzalloc(sizeof(struct lm75_data), GFP_KERNEL);
156 if (!data)
157 return -ENOMEM;
158
159 i2c_set_clientdata(client, data);
160
161 data->client = client;
162 mutex_init(&data->update_lock);
163
164 /* Set to LM75 resolution (9 bits, 1/2 degree C) and range.
165 * Then tweak to be more precise when appropriate.
166 */
167 set_mask = 0;
168 clr_mask = (1 << 0) /* continuous conversions */
169 | (1 << 6) | (1 << 5); /* 9-bit mode */
170
171 /* configure as specified */
172 status = lm75_read_value(client, LM75_REG_CONF);
173 if (status < 0) {
174 dev_dbg(&client->dev, "Can't read config? %d\n", status);
175 goto exit_free;
176 }
177 data->orig_conf = status;
178 new = status & ~clr_mask;
179 new |= set_mask;
180 if (status != new)
181 lm75_write_value(client, LM75_REG_CONF, new);
182 dev_dbg(&client->dev, "Config %02x\n", new);
183
184 /* Register sysfs hooks */
185 status = sysfs_create_group(&client->dev.kobj, &lm75_group);
186 if (status)
187 goto exit_free;
188
189 data->hwmon_dev = hwmon_device_register(&client->dev);
190 if (IS_ERR(data->hwmon_dev)) {
191 status = PTR_ERR(data->hwmon_dev);
192 goto exit_remove;
193 }
194
195 dev_info(&client->dev, "%s: sensor '%s'\n",
196 data->hwmon_dev->bus_id, client->name);
197
198 return 0;
199
200exit_remove:
201 sysfs_remove_group(&client->dev.kobj, &lm75_group);
202exit_free:
203 i2c_set_clientdata(client, NULL);
204 kfree(data);
205 return status;
206}
207
208static int lm75_remove(struct i2c_client *client)
209{
210 struct lm75_data *data = i2c_get_clientdata(client);
211
212 hwmon_device_unregister(data->hwmon_dev);
213 sysfs_remove_group(&client->dev.kobj, &lm75_group);
214 lm75_write_value(client, LM75_REG_CONF, data->orig_conf);
215 i2c_set_clientdata(client, NULL);
216 kfree(data);
217 return 0;
218}
219
220static const struct i2c_device_id lm75_ids[] = {
221 { "ds1775", ds1775, },
222 { "ds75", ds75, },
223 { "lm75", lm75, },
224 { "lm75a", lm75a, },
225 { "max6625", max6625, },
226 { "max6626", max6626, },
227 { "mcp980x", mcp980x, },
228 { "stds75", stds75, },
229 { "tcn75", tcn75, },
230 { "tmp100", tmp100, },
231 { "tmp101", tmp101, },
232 { "tmp175", tmp175, },
233 { "tmp275", tmp275, },
234 { "tmp75", tmp75, },
235 { /* LIST END */ }
236};
237MODULE_DEVICE_TABLE(i2c, lm75_ids);
238
239static struct i2c_driver lm75_driver = {
240 .driver = {
241 .name = "lm75",
242 },
243 .probe = lm75_probe,
244 .remove = lm75_remove,
245 .id_table = lm75_ids,
246};
247
248/*-----------------------------------------------------------------------*/
249
250/* "Legacy" I2C driver binding */
251
252static struct i2c_driver lm75_legacy_driver;
253
131/* This function is called by i2c_probe */ 254/* This function is called by i2c_probe */
132static int lm75_detect(struct i2c_adapter *adapter, int address, int kind) 255static int lm75_detect(struct i2c_adapter *adapter, int address, int kind)
133{ 256{
134 int i; 257 int i;
135 struct i2c_client *new_client; 258 struct i2c_client *new_client;
136 struct lm75_data *data;
137 int err = 0; 259 int err = 0;
138 const char *name = "";
139 260
140 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA | 261 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
141 I2C_FUNC_SMBUS_WORD_DATA)) 262 I2C_FUNC_SMBUS_WORD_DATA))
142 goto exit; 263 goto exit;
143 264
144 /* OK. For now, we presume we have a valid client. We now create the 265 /* OK. For now, we presume we have a valid address. We create the
145 client structure, even though we cannot fill it completely yet. 266 client structure, even though there may be no sensor present.
146 But it allows us to access lm75_{read,write}_value. */ 267 But it allows us to use i2c_smbus_read_*_data() calls. */
147 if (!(data = kzalloc(sizeof(struct lm75_data), GFP_KERNEL))) { 268 new_client = kzalloc(sizeof *new_client, GFP_KERNEL);
269 if (!new_client) {
148 err = -ENOMEM; 270 err = -ENOMEM;
149 goto exit; 271 goto exit;
150 } 272 }
151 273
152 new_client = &data->client;
153 i2c_set_clientdata(new_client, data);
154 new_client->addr = address; 274 new_client->addr = address;
155 new_client->adapter = adapter; 275 new_client->adapter = adapter;
156 new_client->driver = &lm75_driver; 276 new_client->driver = &lm75_legacy_driver;
157 new_client->flags = 0; 277 new_client->flags = 0;
158 278
159 /* Now, we do the remaining detection. There is no identification- 279 /* Now, we do the remaining detection. There is no identification-
@@ -174,17 +294,17 @@ static int lm75_detect(struct i2c_adapter *adapter, int address, int kind)
174 || i2c_smbus_read_word_data(new_client, 5) != hyst 294 || i2c_smbus_read_word_data(new_client, 5) != hyst
175 || i2c_smbus_read_word_data(new_client, 6) != hyst 295 || i2c_smbus_read_word_data(new_client, 6) != hyst
176 || i2c_smbus_read_word_data(new_client, 7) != hyst) 296 || i2c_smbus_read_word_data(new_client, 7) != hyst)
177 goto exit_free; 297 goto exit_free;
178 os = i2c_smbus_read_word_data(new_client, 3); 298 os = i2c_smbus_read_word_data(new_client, 3);
179 if (i2c_smbus_read_word_data(new_client, 4) != os 299 if (i2c_smbus_read_word_data(new_client, 4) != os
180 || i2c_smbus_read_word_data(new_client, 5) != os 300 || i2c_smbus_read_word_data(new_client, 5) != os
181 || i2c_smbus_read_word_data(new_client, 6) != os 301 || i2c_smbus_read_word_data(new_client, 6) != os
182 || i2c_smbus_read_word_data(new_client, 7) != os) 302 || i2c_smbus_read_word_data(new_client, 7) != os)
183 goto exit_free; 303 goto exit_free;
184 304
185 /* Unused bits */ 305 /* Unused bits */
186 if (conf & 0xe0) 306 if (conf & 0xe0)
187 goto exit_free; 307 goto exit_free;
188 308
189 /* Addresses cycling */ 309 /* Addresses cycling */
190 for (i = 8; i < 0xff; i += 8) 310 for (i = 8; i < 0xff; i += 8)
@@ -194,58 +314,57 @@ static int lm75_detect(struct i2c_adapter *adapter, int address, int kind)
194 goto exit_free; 314 goto exit_free;
195 } 315 }
196 316
197 /* Determine the chip type - only one kind supported! */ 317 /* NOTE: we treat "force=..." and "force_lm75=..." the same.
198 if (kind <= 0) 318 * Only new-style driver binding distinguishes chip types.
199 kind = lm75; 319 */
200 320 strlcpy(new_client->name, "lm75", I2C_NAME_SIZE);
201 if (kind == lm75) {
202 name = "lm75";
203 }
204
205 /* Fill in the remaining client fields and put it into the global list */
206 strlcpy(new_client->name, name, I2C_NAME_SIZE);
207 data->valid = 0;
208 mutex_init(&data->update_lock);
209 321
210 /* Tell the I2C layer a new client has arrived */ 322 /* Tell the I2C layer a new client has arrived */
211 if ((err = i2c_attach_client(new_client))) 323 err = i2c_attach_client(new_client);
324 if (err)
212 goto exit_free; 325 goto exit_free;
213 326
214 /* Initialize the LM75 chip */ 327 err = lm75_probe(new_client, NULL);
215 lm75_init_client(new_client); 328 if (err < 0)
216
217 /* Register sysfs hooks */
218 if ((err = sysfs_create_group(&new_client->dev.kobj, &lm75_group)))
219 goto exit_detach; 329 goto exit_detach;
220 330
221 data->hwmon_dev = hwmon_device_register(&new_client->dev);
222 if (IS_ERR(data->hwmon_dev)) {
223 err = PTR_ERR(data->hwmon_dev);
224 goto exit_remove;
225 }
226
227 return 0; 331 return 0;
228 332
229exit_remove:
230 sysfs_remove_group(&new_client->dev.kobj, &lm75_group);
231exit_detach: 333exit_detach:
232 i2c_detach_client(new_client); 334 i2c_detach_client(new_client);
233exit_free: 335exit_free:
234 kfree(data); 336 kfree(new_client);
235exit: 337exit:
236 return err; 338 return err;
237} 339}
238 340
341static int lm75_attach_adapter(struct i2c_adapter *adapter)
342{
343 if (!(adapter->class & I2C_CLASS_HWMON))
344 return 0;
345 return i2c_probe(adapter, &addr_data, lm75_detect);
346}
347
239static int lm75_detach_client(struct i2c_client *client) 348static int lm75_detach_client(struct i2c_client *client)
240{ 349{
241 struct lm75_data *data = i2c_get_clientdata(client); 350 lm75_remove(client);
242 hwmon_device_unregister(data->hwmon_dev);
243 sysfs_remove_group(&client->dev.kobj, &lm75_group);
244 i2c_detach_client(client); 351 i2c_detach_client(client);
245 kfree(data); 352 kfree(client);
246 return 0; 353 return 0;
247} 354}
248 355
356static struct i2c_driver lm75_legacy_driver = {
357 .driver = {
358 .name = "lm75_legacy",
359 },
360 .attach_adapter = lm75_attach_adapter,
361 .detach_client = lm75_detach_client,
362};
363
364/*-----------------------------------------------------------------------*/
365
366/* register access */
367
249/* All registers are word-sized, except for the configuration register. 368/* All registers are word-sized, except for the configuration register.
250 LM75 uses a high-byte first convention, which is exactly opposite to 369 LM75 uses a high-byte first convention, which is exactly opposite to
251 the SMBus standard. */ 370 the SMBus standard. */
@@ -268,16 +387,6 @@ static int lm75_write_value(struct i2c_client *client, u8 reg, u16 value)
268 return i2c_smbus_write_word_data(client, reg, swab16(value)); 387 return i2c_smbus_write_word_data(client, reg, swab16(value));
269} 388}
270 389
271static void lm75_init_client(struct i2c_client *client)
272{
273 int reg;
274
275 /* Enable if in shutdown mode */
276 reg = lm75_read_value(client, LM75_REG_CONF);
277 if (reg >= 0 && (reg & 0x01))
278 lm75_write_value(client, LM75_REG_CONF, reg & 0xfe);
279}
280
281static struct lm75_data *lm75_update_device(struct device *dev) 390static struct lm75_data *lm75_update_device(struct device *dev)
282{ 391{
283 struct i2c_client *client = to_i2c_client(dev); 392 struct i2c_client *client = to_i2c_client(dev);
@@ -309,13 +418,28 @@ static struct lm75_data *lm75_update_device(struct device *dev)
309 return data; 418 return data;
310} 419}
311 420
421/*-----------------------------------------------------------------------*/
422
423/* module glue */
424
312static int __init sensors_lm75_init(void) 425static int __init sensors_lm75_init(void)
313{ 426{
314 return i2c_add_driver(&lm75_driver); 427 int status;
428
429 status = i2c_add_driver(&lm75_driver);
430 if (status < 0)
431 return status;
432
433 status = i2c_add_driver(&lm75_legacy_driver);
434 if (status < 0)
435 i2c_del_driver(&lm75_driver);
436
437 return status;
315} 438}
316 439
317static void __exit sensors_lm75_exit(void) 440static void __exit sensors_lm75_exit(void)
318{ 441{
442 i2c_del_driver(&lm75_legacy_driver);
319 i2c_del_driver(&lm75_driver); 443 i2c_del_driver(&lm75_driver);
320} 444}
321 445
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index ee5eca1c1921..12d446f54f97 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -1,7 +1,7 @@
1/* 1/*
2 lm85.c - Part of lm_sensors, Linux kernel modules for hardware 2 lm85.c - Part of lm_sensors, Linux kernel modules for hardware
3 monitoring 3 monitoring
4 Copyright (c) 1998, 1999 Frodo Looijaard <frodol@dds.nl> 4 Copyright (c) 1998, 1999 Frodo Looijaard <frodol@dds.nl>
5 Copyright (c) 2002, 2003 Philip Pokorny <ppokorny@penguincomputing.com> 5 Copyright (c) 2002, 2003 Philip Pokorny <ppokorny@penguincomputing.com>
6 Copyright (c) 2003 Margit Schubert-While <margitsw@t-online.de> 6 Copyright (c) 2003 Margit Schubert-While <margitsw@t-online.de>
7 Copyright (c) 2004 Justin Thiessen <jthiessen@penguincomputing.com> 7 Copyright (c) 2004 Justin Thiessen <jthiessen@penguincomputing.com>
@@ -51,24 +51,17 @@ I2C_CLIENT_INSMOD_6(lm85b, lm85c, adm1027, adt7463, emc6d100, emc6d102);
51#define LM85_REG_TEMP_MAX(nr) (0x4f + (nr) * 2) 51#define LM85_REG_TEMP_MAX(nr) (0x4f + (nr) * 2)
52 52
53/* Fan speeds are LSB, MSB (2 bytes) */ 53/* Fan speeds are LSB, MSB (2 bytes) */
54#define LM85_REG_FAN(nr) (0x28 + (nr) *2) 54#define LM85_REG_FAN(nr) (0x28 + (nr) * 2)
55#define LM85_REG_FAN_MIN(nr) (0x54 + (nr) *2) 55#define LM85_REG_FAN_MIN(nr) (0x54 + (nr) * 2)
56 56
57#define LM85_REG_PWM(nr) (0x30 + (nr)) 57#define LM85_REG_PWM(nr) (0x30 + (nr))
58 58
59#define ADT7463_REG_OPPOINT(nr) (0x33 + (nr))
60
61#define ADT7463_REG_TMIN_CTL1 0x36
62#define ADT7463_REG_TMIN_CTL2 0x37
63
64#define LM85_REG_DEVICE 0x3d
65#define LM85_REG_COMPANY 0x3e 59#define LM85_REG_COMPANY 0x3e
66#define LM85_REG_VERSTEP 0x3f 60#define LM85_REG_VERSTEP 0x3f
67/* These are the recognized values for the above regs */ 61/* These are the recognized values for the above regs */
68#define LM85_DEVICE_ADX 0x27
69#define LM85_COMPANY_NATIONAL 0x01 62#define LM85_COMPANY_NATIONAL 0x01
70#define LM85_COMPANY_ANALOG_DEV 0x41 63#define LM85_COMPANY_ANALOG_DEV 0x41
71#define LM85_COMPANY_SMSC 0x5c 64#define LM85_COMPANY_SMSC 0x5c
72#define LM85_VERSTEP_VMASK 0xf0 65#define LM85_VERSTEP_VMASK 0xf0
73#define LM85_VERSTEP_GENERIC 0x60 66#define LM85_VERSTEP_GENERIC 0x60
74#define LM85_VERSTEP_LM85C 0x60 67#define LM85_VERSTEP_LM85C 0x60
@@ -91,58 +84,45 @@ I2C_CLIENT_INSMOD_6(lm85b, lm85c, adm1027, adt7463, emc6d100, emc6d102);
91#define LM85_REG_AFAN_CONFIG(nr) (0x5c + (nr)) 84#define LM85_REG_AFAN_CONFIG(nr) (0x5c + (nr))
92#define LM85_REG_AFAN_RANGE(nr) (0x5f + (nr)) 85#define LM85_REG_AFAN_RANGE(nr) (0x5f + (nr))
93#define LM85_REG_AFAN_SPIKE1 0x62 86#define LM85_REG_AFAN_SPIKE1 0x62
94#define LM85_REG_AFAN_SPIKE2 0x63
95#define LM85_REG_AFAN_MINPWM(nr) (0x64 + (nr)) 87#define LM85_REG_AFAN_MINPWM(nr) (0x64 + (nr))
96#define LM85_REG_AFAN_LIMIT(nr) (0x67 + (nr)) 88#define LM85_REG_AFAN_LIMIT(nr) (0x67 + (nr))
97#define LM85_REG_AFAN_CRITICAL(nr) (0x6a + (nr)) 89#define LM85_REG_AFAN_CRITICAL(nr) (0x6a + (nr))
98#define LM85_REG_AFAN_HYST1 0x6d 90#define LM85_REG_AFAN_HYST1 0x6d
99#define LM85_REG_AFAN_HYST2 0x6e 91#define LM85_REG_AFAN_HYST2 0x6e
100 92
101#define LM85_REG_TACH_MODE 0x74
102#define LM85_REG_SPINUP_CTL 0x75
103
104#define ADM1027_REG_TEMP_OFFSET(nr) (0x70 + (nr))
105#define ADM1027_REG_CONFIG2 0x73
106#define ADM1027_REG_INTMASK1 0x74
107#define ADM1027_REG_INTMASK2 0x75
108#define ADM1027_REG_EXTEND_ADC1 0x76 93#define ADM1027_REG_EXTEND_ADC1 0x76
109#define ADM1027_REG_EXTEND_ADC2 0x77 94#define ADM1027_REG_EXTEND_ADC2 0x77
110#define ADM1027_REG_CONFIG3 0x78
111#define ADM1027_REG_FAN_PPR 0x7b
112
113#define ADT7463_REG_THERM 0x79
114#define ADT7463_REG_THERM_LIMIT 0x7A
115 95
116#define EMC6D100_REG_ALARM3 0x7d 96#define EMC6D100_REG_ALARM3 0x7d
117/* IN5, IN6 and IN7 */ 97/* IN5, IN6 and IN7 */
118#define EMC6D100_REG_IN(nr) (0x70 + ((nr)-5)) 98#define EMC6D100_REG_IN(nr) (0x70 + ((nr) - 5))
119#define EMC6D100_REG_IN_MIN(nr) (0x73 + ((nr)-5) * 2) 99#define EMC6D100_REG_IN_MIN(nr) (0x73 + ((nr) - 5) * 2)
120#define EMC6D100_REG_IN_MAX(nr) (0x74 + ((nr)-5) * 2) 100#define EMC6D100_REG_IN_MAX(nr) (0x74 + ((nr) - 5) * 2)
121#define EMC6D102_REG_EXTEND_ADC1 0x85 101#define EMC6D102_REG_EXTEND_ADC1 0x85
122#define EMC6D102_REG_EXTEND_ADC2 0x86 102#define EMC6D102_REG_EXTEND_ADC2 0x86
123#define EMC6D102_REG_EXTEND_ADC3 0x87 103#define EMC6D102_REG_EXTEND_ADC3 0x87
124#define EMC6D102_REG_EXTEND_ADC4 0x88 104#define EMC6D102_REG_EXTEND_ADC4 0x88
125 105
126 106
127/* Conversions. Rounding and limit checking is only done on the TO_REG 107/* Conversions. Rounding and limit checking is only done on the TO_REG
128 variants. Note that you should be a bit careful with which arguments 108 variants. Note that you should be a bit careful with which arguments
129 these macros are called: arguments may be evaluated more than once. 109 these macros are called: arguments may be evaluated more than once.
130 */ 110 */
131 111
132/* IN are scaled acording to built-in resistors */ 112/* IN are scaled acording to built-in resistors */
133static int lm85_scaling[] = { /* .001 Volts */ 113static const int lm85_scaling[] = { /* .001 Volts */
134 2500, 2250, 3300, 5000, 12000, 114 2500, 2250, 3300, 5000, 12000,
135 3300, 1500, 1800 /*EMC6D100*/ 115 3300, 1500, 1800 /*EMC6D100*/
136 }; 116};
137#define SCALE(val,from,to) (((val)*(to) + ((from)/2))/(from)) 117#define SCALE(val, from, to) (((val) * (to) + ((from) / 2)) / (from))
138 118
139#define INS_TO_REG(n,val) \ 119#define INS_TO_REG(n, val) \
140 SENSORS_LIMIT(SCALE(val,lm85_scaling[n],192),0,255) 120 SENSORS_LIMIT(SCALE(val, lm85_scaling[n], 192), 0, 255)
141 121
142#define INSEXT_FROM_REG(n,val,ext) \ 122#define INSEXT_FROM_REG(n, val, ext) \
143 SCALE(((val) << 4) + (ext), 192 << 4, lm85_scaling[n]) 123 SCALE(((val) << 4) + (ext), 192 << 4, lm85_scaling[n])
144 124
145#define INS_FROM_REG(n,val) SCALE((val), 192, lm85_scaling[n]) 125#define INS_FROM_REG(n, val) SCALE((val), 192, lm85_scaling[n])
146 126
147/* FAN speed is measured using 90kHz clock */ 127/* FAN speed is measured using 90kHz clock */
148static inline u16 FAN_TO_REG(unsigned long val) 128static inline u16 FAN_TO_REG(unsigned long val)
@@ -151,16 +131,17 @@ static inline u16 FAN_TO_REG(unsigned long val)
151 return 0xffff; 131 return 0xffff;
152 return SENSORS_LIMIT(5400000 / val, 1, 0xfffe); 132 return SENSORS_LIMIT(5400000 / val, 1, 0xfffe);
153} 133}
154#define FAN_FROM_REG(val) ((val)==0?-1:(val)==0xffff?0:5400000/(val)) 134#define FAN_FROM_REG(val) ((val) == 0 ? -1 : (val) == 0xffff ? 0 : \
135 5400000 / (val))
155 136
156/* Temperature is reported in .001 degC increments */ 137/* Temperature is reported in .001 degC increments */
157#define TEMP_TO_REG(val) \ 138#define TEMP_TO_REG(val) \
158 SENSORS_LIMIT(SCALE(val,1000,1),-127,127) 139 SENSORS_LIMIT(SCALE(val, 1000, 1), -127, 127)
159#define TEMPEXT_FROM_REG(val,ext) \ 140#define TEMPEXT_FROM_REG(val, ext) \
160 SCALE(((val) << 4) + (ext), 16, 1000) 141 SCALE(((val) << 4) + (ext), 16, 1000)
161#define TEMP_FROM_REG(val) ((val) * 1000) 142#define TEMP_FROM_REG(val) ((val) * 1000)
162 143
163#define PWM_TO_REG(val) (SENSORS_LIMIT(val,0,255)) 144#define PWM_TO_REG(val) SENSORS_LIMIT(val, 0, 255)
164#define PWM_FROM_REG(val) (val) 145#define PWM_FROM_REG(val) (val)
165 146
166 147
@@ -183,17 +164,17 @@ static inline u16 FAN_TO_REG(unsigned long val)
183 */ 164 */
184 165
185/* These are the zone temperature range encodings in .001 degree C */ 166/* These are the zone temperature range encodings in .001 degree C */
186static int lm85_range_map[] = { 167static const int lm85_range_map[] = {
187 2000, 2500, 3300, 4000, 5000, 6600, 168 2000, 2500, 3300, 4000, 5000, 6600, 8000, 10000,
188 8000, 10000, 13300, 16000, 20000, 26600, 169 13300, 16000, 20000, 26600, 32000, 40000, 53300, 80000
189 32000, 40000, 53300, 80000 170};
190 }; 171
191static int RANGE_TO_REG( int range ) 172static int RANGE_TO_REG(int range)
192{ 173{
193 int i; 174 int i;
194 175
195 if (range >= lm85_range_map[15]) 176 if (range >= lm85_range_map[15])
196 return 15 ; 177 return 15;
197 178
198 /* Find the closest match */ 179 /* Find the closest match */
199 for (i = 14; i >= 0; --i) { 180 for (i = 14; i >= 0; --i) {
@@ -207,28 +188,25 @@ static int RANGE_TO_REG( int range )
207 188
208 return 0; 189 return 0;
209} 190}
210#define RANGE_FROM_REG(val) (lm85_range_map[(val)&0x0f]) 191#define RANGE_FROM_REG(val) lm85_range_map[(val) & 0x0f]
211 192
212/* These are the Acoustic Enhancement, or Temperature smoothing encodings
213 * NOTE: The enable/disable bit is INCLUDED in these encodings as the
214 * MSB (bit 3, value 8). If the enable bit is 0, the encoded value
215 * is ignored, or set to 0.
216 */
217/* These are the PWM frequency encodings */ 193/* These are the PWM frequency encodings */
218static int lm85_freq_map[] = { /* .1 Hz */ 194static const int lm85_freq_map[] = { /* .1 Hz */
219 100, 150, 230, 300, 380, 470, 620, 940 195 100, 150, 230, 300, 380, 470, 620, 940
220 }; 196};
221static int FREQ_TO_REG( int freq ) 197
198static int FREQ_TO_REG(int freq)
222{ 199{
223 int i; 200 int i;
224 201
225 if( freq >= lm85_freq_map[7] ) { return 7 ; } 202 if (freq >= lm85_freq_map[7])
226 for( i = 0 ; i < 7 ; ++i ) 203 return 7;
227 if( freq <= lm85_freq_map[i] ) 204 for (i = 0; i < 7; ++i)
228 break ; 205 if (freq <= lm85_freq_map[i])
229 return( i & 0x07 ); 206 break;
207 return i;
230} 208}
231#define FREQ_FROM_REG(val) (lm85_freq_map[(val)&0x07]) 209#define FREQ_FROM_REG(val) lm85_freq_map[(val) & 0x07]
232 210
233/* Since we can't use strings, I'm abusing these numbers 211/* Since we can't use strings, I'm abusing these numbers
234 * to stand in for the following meanings: 212 * to stand in for the following meanings:
@@ -242,30 +220,23 @@ static int FREQ_TO_REG( int freq )
242 * -2 -- PWM responds to manual control 220 * -2 -- PWM responds to manual control
243 */ 221 */
244 222
245static int lm85_zone_map[] = { 1, 2, 3, -1, 0, 23, 123, -2 }; 223static const int lm85_zone_map[] = { 1, 2, 3, -1, 0, 23, 123, -2 };
246#define ZONE_FROM_REG(val) (lm85_zone_map[((val)>>5)&0x07]) 224#define ZONE_FROM_REG(val) lm85_zone_map[(val) >> 5]
247 225
248static int ZONE_TO_REG( int zone ) 226static int ZONE_TO_REG(int zone)
249{ 227{
250 int i; 228 int i;
251 229
252 for( i = 0 ; i <= 7 ; ++i ) 230 for (i = 0; i <= 7; ++i)
253 if( zone == lm85_zone_map[i] ) 231 if (zone == lm85_zone_map[i])
254 break ; 232 break;
255 if( i > 7 ) /* Not found. */ 233 if (i > 7) /* Not found. */
256 i = 3; /* Always 100% */ 234 i = 3; /* Always 100% */
257 return( (i & 0x07)<<5 ); 235 return i << 5;
258} 236}
259 237
260#define HYST_TO_REG(val) (SENSORS_LIMIT(((val)+500)/1000,0,15)) 238#define HYST_TO_REG(val) SENSORS_LIMIT(((val) + 500) / 1000, 0, 15)
261#define HYST_FROM_REG(val) ((val)*1000) 239#define HYST_FROM_REG(val) ((val) * 1000)
262
263#define OFFSET_TO_REG(val) (SENSORS_LIMIT((val)/25,-127,127))
264#define OFFSET_FROM_REG(val) ((val)*25)
265
266#define PPR_MASK(fan) (0x03<<(fan *2))
267#define PPR_TO_REG(val,fan) (SENSORS_LIMIT((val)-1,0,3)<<(fan *2))
268#define PPR_FROM_REG(val,fan) ((((val)>>(fan * 2))&0x03)+1)
269 240
270/* Chip sampling rates 241/* Chip sampling rates
271 * 242 *
@@ -292,11 +263,11 @@ struct lm85_zone {
292 u8 hyst; /* Low limit hysteresis. (0-15) */ 263 u8 hyst; /* Low limit hysteresis. (0-15) */
293 u8 range; /* Temp range, encoded */ 264 u8 range; /* Temp range, encoded */
294 s8 critical; /* "All fans ON" temp limit */ 265 s8 critical; /* "All fans ON" temp limit */
295 u8 off_desired; /* Actual "off" temperature specified. Preserved 266 u8 off_desired; /* Actual "off" temperature specified. Preserved
296 * to prevent "drift" as other autofan control 267 * to prevent "drift" as other autofan control
297 * values change. 268 * values change.
298 */ 269 */
299 u8 max_desired; /* Actual "max" temperature specified. Preserved 270 u8 max_desired; /* Actual "max" temperature specified. Preserved
300 * to prevent "drift" as other autofan control 271 * to prevent "drift" as other autofan control
301 * values change. 272 * values change.
302 */ 273 */
@@ -327,23 +298,13 @@ struct lm85_data {
327 s8 temp[3]; /* Register value */ 298 s8 temp[3]; /* Register value */
328 s8 temp_min[3]; /* Register value */ 299 s8 temp_min[3]; /* Register value */
329 s8 temp_max[3]; /* Register value */ 300 s8 temp_max[3]; /* Register value */
330 s8 temp_offset[3]; /* Register value */
331 u16 fan[4]; /* Register value */ 301 u16 fan[4]; /* Register value */
332 u16 fan_min[4]; /* Register value */ 302 u16 fan_min[4]; /* Register value */
333 u8 pwm[3]; /* Register value */ 303 u8 pwm[3]; /* Register value */
334 u8 spinup_ctl; /* Register encoding, combined */
335 u8 tach_mode; /* Register encoding, combined */
336 u8 temp_ext[3]; /* Decoded values */ 304 u8 temp_ext[3]; /* Decoded values */
337 u8 in_ext[8]; /* Decoded values */ 305 u8 in_ext[8]; /* Decoded values */
338 u8 fan_ppr; /* Register value */
339 u8 smooth[3]; /* Register encoding */
340 u8 vid; /* Register value */ 306 u8 vid; /* Register value */
341 u8 vrm; /* VRM version */ 307 u8 vrm; /* VRM version */
342 u8 syncpwm3; /* Saved PWM3 for TACH 2,3,4 config */
343 u8 oppoint[3]; /* Register value */
344 u16 tmin_ctl; /* Register value */
345 unsigned long therm_total; /* Cummulative therm count */
346 u8 therm_limit; /* Register value */
347 u32 alarms; /* Register encoding, combined */ 308 u32 alarms; /* Register encoding, combined */
348 struct lm85_autofan autofan[3]; 309 struct lm85_autofan autofan[3];
349 struct lm85_zone zone[3]; 310 struct lm85_zone zone[3];
@@ -355,9 +316,8 @@ static int lm85_detect(struct i2c_adapter *adapter, int address,
355static int lm85_detach_client(struct i2c_client *client); 316static int lm85_detach_client(struct i2c_client *client);
356 317
357static int lm85_read_value(struct i2c_client *client, u8 reg); 318static int lm85_read_value(struct i2c_client *client, u8 reg);
358static int lm85_write_value(struct i2c_client *client, u8 reg, int value); 319static void lm85_write_value(struct i2c_client *client, u8 reg, int value);
359static struct lm85_data *lm85_update_device(struct device *dev); 320static struct lm85_data *lm85_update_device(struct device *dev);
360static void lm85_init_client(struct i2c_client *client);
361 321
362 322
363static struct i2c_driver lm85_driver = { 323static struct i2c_driver lm85_driver = {
@@ -375,7 +335,7 @@ static ssize_t show_fan(struct device *dev, struct device_attribute *attr,
375{ 335{
376 int nr = to_sensor_dev_attr(attr)->index; 336 int nr = to_sensor_dev_attr(attr)->index;
377 struct lm85_data *data = lm85_update_device(dev); 337 struct lm85_data *data = lm85_update_device(dev);
378 return sprintf(buf,"%d\n", FAN_FROM_REG(data->fan[nr]) ); 338 return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan[nr]));
379} 339}
380 340
381static ssize_t show_fan_min(struct device *dev, struct device_attribute *attr, 341static ssize_t show_fan_min(struct device *dev, struct device_attribute *attr,
@@ -383,7 +343,7 @@ static ssize_t show_fan_min(struct device *dev, struct device_attribute *attr,
383{ 343{
384 int nr = to_sensor_dev_attr(attr)->index; 344 int nr = to_sensor_dev_attr(attr)->index;
385 struct lm85_data *data = lm85_update_device(dev); 345 struct lm85_data *data = lm85_update_device(dev);
386 return sprintf(buf,"%d\n", FAN_FROM_REG(data->fan_min[nr]) ); 346 return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan_min[nr]));
387} 347}
388 348
389static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr, 349static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
@@ -414,7 +374,8 @@ show_fan_offset(4);
414 374
415/* vid, vrm, alarms */ 375/* vid, vrm, alarms */
416 376
417static ssize_t show_vid_reg(struct device *dev, struct device_attribute *attr, char *buf) 377static ssize_t show_vid_reg(struct device *dev, struct device_attribute *attr,
378 char *buf)
418{ 379{
419 struct lm85_data *data = lm85_update_device(dev); 380 struct lm85_data *data = lm85_update_device(dev);
420 int vid; 381 int vid;
@@ -432,13 +393,15 @@ static ssize_t show_vid_reg(struct device *dev, struct device_attribute *attr, c
432 393
433static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid_reg, NULL); 394static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid_reg, NULL);
434 395
435static ssize_t show_vrm_reg(struct device *dev, struct device_attribute *attr, char *buf) 396static ssize_t show_vrm_reg(struct device *dev, struct device_attribute *attr,
397 char *buf)
436{ 398{
437 struct lm85_data *data = dev_get_drvdata(dev); 399 struct lm85_data *data = dev_get_drvdata(dev);
438 return sprintf(buf, "%ld\n", (long) data->vrm); 400 return sprintf(buf, "%ld\n", (long) data->vrm);
439} 401}
440 402
441static ssize_t store_vrm_reg(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 403static ssize_t store_vrm_reg(struct device *dev, struct device_attribute *attr,
404 const char *buf, size_t count)
442{ 405{
443 struct lm85_data *data = dev_get_drvdata(dev); 406 struct lm85_data *data = dev_get_drvdata(dev);
444 data->vrm = simple_strtoul(buf, NULL, 10); 407 data->vrm = simple_strtoul(buf, NULL, 10);
@@ -447,7 +410,8 @@ static ssize_t store_vrm_reg(struct device *dev, struct device_attribute *attr,
447 410
448static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm_reg, store_vrm_reg); 411static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm_reg, store_vrm_reg);
449 412
450static ssize_t show_alarms_reg(struct device *dev, struct device_attribute *attr, char *buf) 413static ssize_t show_alarms_reg(struct device *dev, struct device_attribute
414 *attr, char *buf)
451{ 415{
452 struct lm85_data *data = lm85_update_device(dev); 416 struct lm85_data *data = lm85_update_device(dev);
453 return sprintf(buf, "%u\n", data->alarms); 417 return sprintf(buf, "%u\n", data->alarms);
@@ -488,7 +452,7 @@ static ssize_t show_pwm(struct device *dev, struct device_attribute *attr,
488{ 452{
489 int nr = to_sensor_dev_attr(attr)->index; 453 int nr = to_sensor_dev_attr(attr)->index;
490 struct lm85_data *data = lm85_update_device(dev); 454 struct lm85_data *data = lm85_update_device(dev);
491 return sprintf(buf,"%d\n", PWM_FROM_REG(data->pwm[nr]) ); 455 return sprintf(buf, "%d\n", PWM_FROM_REG(data->pwm[nr]));
492} 456}
493 457
494static ssize_t set_pwm(struct device *dev, struct device_attribute *attr, 458static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
@@ -581,17 +545,16 @@ static ssize_t show_in(struct device *dev, struct device_attribute *attr,
581{ 545{
582 int nr = to_sensor_dev_attr(attr)->index; 546 int nr = to_sensor_dev_attr(attr)->index;
583 struct lm85_data *data = lm85_update_device(dev); 547 struct lm85_data *data = lm85_update_device(dev);
584 return sprintf( buf, "%d\n", INSEXT_FROM_REG(nr, 548 return sprintf(buf, "%d\n", INSEXT_FROM_REG(nr, data->in[nr],
585 data->in[nr], 549 data->in_ext[nr]));
586 data->in_ext[nr]));
587} 550}
588 551
589static ssize_t show_in_min(struct device *dev, struct device_attribute *attr, 552static ssize_t show_in_min(struct device *dev, struct device_attribute *attr,
590 char *buf) 553 char *buf)
591{ 554{
592 int nr = to_sensor_dev_attr(attr)->index; 555 int nr = to_sensor_dev_attr(attr)->index;
593 struct lm85_data *data = lm85_update_device(dev); 556 struct lm85_data *data = lm85_update_device(dev);
594 return sprintf(buf,"%d\n", INS_FROM_REG(nr, data->in_min[nr]) ); 557 return sprintf(buf, "%d\n", INS_FROM_REG(nr, data->in_min[nr]));
595} 558}
596 559
597static ssize_t set_in_min(struct device *dev, struct device_attribute *attr, 560static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
@@ -614,7 +577,7 @@ static ssize_t show_in_max(struct device *dev, struct device_attribute *attr,
614{ 577{
615 int nr = to_sensor_dev_attr(attr)->index; 578 int nr = to_sensor_dev_attr(attr)->index;
616 struct lm85_data *data = lm85_update_device(dev); 579 struct lm85_data *data = lm85_update_device(dev);
617 return sprintf(buf,"%d\n", INS_FROM_REG(nr, data->in_max[nr]) ); 580 return sprintf(buf, "%d\n", INS_FROM_REG(nr, data->in_max[nr]));
618} 581}
619 582
620static ssize_t set_in_max(struct device *dev, struct device_attribute *attr, 583static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
@@ -656,8 +619,8 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
656{ 619{
657 int nr = to_sensor_dev_attr(attr)->index; 620 int nr = to_sensor_dev_attr(attr)->index;
658 struct lm85_data *data = lm85_update_device(dev); 621 struct lm85_data *data = lm85_update_device(dev);
659 return sprintf(buf,"%d\n", TEMPEXT_FROM_REG(data->temp[nr], 622 return sprintf(buf, "%d\n", TEMPEXT_FROM_REG(data->temp[nr],
660 data->temp_ext[nr])); 623 data->temp_ext[nr]));
661} 624}
662 625
663static ssize_t show_temp_min(struct device *dev, struct device_attribute *attr, 626static ssize_t show_temp_min(struct device *dev, struct device_attribute *attr,
@@ -665,7 +628,7 @@ static ssize_t show_temp_min(struct device *dev, struct device_attribute *attr,
665{ 628{
666 int nr = to_sensor_dev_attr(attr)->index; 629 int nr = to_sensor_dev_attr(attr)->index;
667 struct lm85_data *data = lm85_update_device(dev); 630 struct lm85_data *data = lm85_update_device(dev);
668 return sprintf(buf,"%d\n", TEMP_FROM_REG(data->temp_min[nr]) ); 631 return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_min[nr]));
669} 632}
670 633
671static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr, 634static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
@@ -688,7 +651,7 @@ static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr,
688{ 651{
689 int nr = to_sensor_dev_attr(attr)->index; 652 int nr = to_sensor_dev_attr(attr)->index;
690 struct lm85_data *data = lm85_update_device(dev); 653 struct lm85_data *data = lm85_update_device(dev);
691 return sprintf(buf,"%d\n", TEMP_FROM_REG(data->temp_max[nr]) ); 654 return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_max[nr]));
692} 655}
693 656
694static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr, 657static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
@@ -697,7 +660,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
697 int nr = to_sensor_dev_attr(attr)->index; 660 int nr = to_sensor_dev_attr(attr)->index;
698 struct i2c_client *client = to_i2c_client(dev); 661 struct i2c_client *client = to_i2c_client(dev);
699 struct lm85_data *data = i2c_get_clientdata(client); 662 struct lm85_data *data = i2c_get_clientdata(client);
700 long val = simple_strtol(buf, NULL, 10); 663 long val = simple_strtol(buf, NULL, 10);
701 664
702 mutex_lock(&data->update_lock); 665 mutex_lock(&data->update_lock);
703 data->temp_max[nr] = TEMP_TO_REG(val); 666 data->temp_max[nr] = TEMP_TO_REG(val);
@@ -726,7 +689,7 @@ static ssize_t show_pwm_auto_channels(struct device *dev,
726{ 689{
727 int nr = to_sensor_dev_attr(attr)->index; 690 int nr = to_sensor_dev_attr(attr)->index;
728 struct lm85_data *data = lm85_update_device(dev); 691 struct lm85_data *data = lm85_update_device(dev);
729 return sprintf(buf,"%d\n", ZONE_FROM_REG(data->autofan[nr].config)); 692 return sprintf(buf, "%d\n", ZONE_FROM_REG(data->autofan[nr].config));
730} 693}
731 694
732static ssize_t set_pwm_auto_channels(struct device *dev, 695static ssize_t set_pwm_auto_channels(struct device *dev,
@@ -735,11 +698,11 @@ static ssize_t set_pwm_auto_channels(struct device *dev,
735 int nr = to_sensor_dev_attr(attr)->index; 698 int nr = to_sensor_dev_attr(attr)->index;
736 struct i2c_client *client = to_i2c_client(dev); 699 struct i2c_client *client = to_i2c_client(dev);
737 struct lm85_data *data = i2c_get_clientdata(client); 700 struct lm85_data *data = i2c_get_clientdata(client);
738 long val = simple_strtol(buf, NULL, 10); 701 long val = simple_strtol(buf, NULL, 10);
739 702
740 mutex_lock(&data->update_lock); 703 mutex_lock(&data->update_lock);
741 data->autofan[nr].config = (data->autofan[nr].config & (~0xe0)) 704 data->autofan[nr].config = (data->autofan[nr].config & (~0xe0))
742 | ZONE_TO_REG(val) ; 705 | ZONE_TO_REG(val);
743 lm85_write_value(client, LM85_REG_AFAN_CONFIG(nr), 706 lm85_write_value(client, LM85_REG_AFAN_CONFIG(nr),
744 data->autofan[nr].config); 707 data->autofan[nr].config);
745 mutex_unlock(&data->update_lock); 708 mutex_unlock(&data->update_lock);
@@ -751,7 +714,7 @@ static ssize_t show_pwm_auto_pwm_min(struct device *dev,
751{ 714{
752 int nr = to_sensor_dev_attr(attr)->index; 715 int nr = to_sensor_dev_attr(attr)->index;
753 struct lm85_data *data = lm85_update_device(dev); 716 struct lm85_data *data = lm85_update_device(dev);
754 return sprintf(buf,"%d\n", PWM_FROM_REG(data->autofan[nr].min_pwm)); 717 return sprintf(buf, "%d\n", PWM_FROM_REG(data->autofan[nr].min_pwm));
755} 718}
756 719
757static ssize_t set_pwm_auto_pwm_min(struct device *dev, 720static ssize_t set_pwm_auto_pwm_min(struct device *dev,
@@ -775,7 +738,7 @@ static ssize_t show_pwm_auto_pwm_minctl(struct device *dev,
775{ 738{
776 int nr = to_sensor_dev_attr(attr)->index; 739 int nr = to_sensor_dev_attr(attr)->index;
777 struct lm85_data *data = lm85_update_device(dev); 740 struct lm85_data *data = lm85_update_device(dev);
778 return sprintf(buf,"%d\n", data->autofan[nr].min_off); 741 return sprintf(buf, "%d\n", data->autofan[nr].min_off);
779} 742}
780 743
781static ssize_t set_pwm_auto_pwm_minctl(struct device *dev, 744static ssize_t set_pwm_auto_pwm_minctl(struct device *dev,
@@ -785,15 +748,15 @@ static ssize_t set_pwm_auto_pwm_minctl(struct device *dev,
785 struct i2c_client *client = to_i2c_client(dev); 748 struct i2c_client *client = to_i2c_client(dev);
786 struct lm85_data *data = i2c_get_clientdata(client); 749 struct lm85_data *data = i2c_get_clientdata(client);
787 long val = simple_strtol(buf, NULL, 10); 750 long val = simple_strtol(buf, NULL, 10);
751 u8 tmp;
788 752
789 mutex_lock(&data->update_lock); 753 mutex_lock(&data->update_lock);
790 data->autofan[nr].min_off = val; 754 data->autofan[nr].min_off = val;
791 lm85_write_value(client, LM85_REG_AFAN_SPIKE1, data->smooth[0] 755 tmp = lm85_read_value(client, LM85_REG_AFAN_SPIKE1);
792 | data->syncpwm3 756 tmp &= ~(0x20 << nr);
793 | (data->autofan[0].min_off ? 0x20 : 0) 757 if (data->autofan[nr].min_off)
794 | (data->autofan[1].min_off ? 0x40 : 0) 758 tmp |= 0x20 << nr;
795 | (data->autofan[2].min_off ? 0x80 : 0) 759 lm85_write_value(client, LM85_REG_AFAN_SPIKE1, tmp);
796 );
797 mutex_unlock(&data->update_lock); 760 mutex_unlock(&data->update_lock);
798 return count; 761 return count;
799} 762}
@@ -803,7 +766,7 @@ static ssize_t show_pwm_auto_pwm_freq(struct device *dev,
803{ 766{
804 int nr = to_sensor_dev_attr(attr)->index; 767 int nr = to_sensor_dev_attr(attr)->index;
805 struct lm85_data *data = lm85_update_device(dev); 768 struct lm85_data *data = lm85_update_device(dev);
806 return sprintf(buf,"%d\n", FREQ_FROM_REG(data->autofan[nr].freq)); 769 return sprintf(buf, "%d\n", FREQ_FROM_REG(data->autofan[nr].freq));
807} 770}
808 771
809static ssize_t set_pwm_auto_pwm_freq(struct device *dev, 772static ssize_t set_pwm_auto_pwm_freq(struct device *dev,
@@ -818,8 +781,7 @@ static ssize_t set_pwm_auto_pwm_freq(struct device *dev,
818 data->autofan[nr].freq = FREQ_TO_REG(val); 781 data->autofan[nr].freq = FREQ_TO_REG(val);
819 lm85_write_value(client, LM85_REG_AFAN_RANGE(nr), 782 lm85_write_value(client, LM85_REG_AFAN_RANGE(nr),
820 (data->zone[nr].range << 4) 783 (data->zone[nr].range << 4)
821 | data->autofan[nr].freq 784 | data->autofan[nr].freq);
822 );
823 mutex_unlock(&data->update_lock); 785 mutex_unlock(&data->update_lock);
824 return count; 786 return count;
825} 787}
@@ -849,7 +811,7 @@ static ssize_t show_temp_auto_temp_off(struct device *dev,
849{ 811{
850 int nr = to_sensor_dev_attr(attr)->index; 812 int nr = to_sensor_dev_attr(attr)->index;
851 struct lm85_data *data = lm85_update_device(dev); 813 struct lm85_data *data = lm85_update_device(dev);
852 return sprintf(buf,"%d\n", TEMP_FROM_REG(data->zone[nr].limit) - 814 return sprintf(buf, "%d\n", TEMP_FROM_REG(data->zone[nr].limit) -
853 HYST_FROM_REG(data->zone[nr].hyst)); 815 HYST_FROM_REG(data->zone[nr].hyst));
854} 816}
855 817
@@ -866,15 +828,13 @@ static ssize_t set_temp_auto_temp_off(struct device *dev,
866 min = TEMP_FROM_REG(data->zone[nr].limit); 828 min = TEMP_FROM_REG(data->zone[nr].limit);
867 data->zone[nr].off_desired = TEMP_TO_REG(val); 829 data->zone[nr].off_desired = TEMP_TO_REG(val);
868 data->zone[nr].hyst = HYST_TO_REG(min - val); 830 data->zone[nr].hyst = HYST_TO_REG(min - val);
869 if ( nr == 0 || nr == 1 ) { 831 if (nr == 0 || nr == 1) {
870 lm85_write_value(client, LM85_REG_AFAN_HYST1, 832 lm85_write_value(client, LM85_REG_AFAN_HYST1,
871 (data->zone[0].hyst << 4) 833 (data->zone[0].hyst << 4)
872 | data->zone[1].hyst 834 | data->zone[1].hyst);
873 );
874 } else { 835 } else {
875 lm85_write_value(client, LM85_REG_AFAN_HYST2, 836 lm85_write_value(client, LM85_REG_AFAN_HYST2,
876 (data->zone[2].hyst << 4) 837 (data->zone[2].hyst << 4));
877 );
878 } 838 }
879 mutex_unlock(&data->update_lock); 839 mutex_unlock(&data->update_lock);
880 return count; 840 return count;
@@ -885,7 +845,7 @@ static ssize_t show_temp_auto_temp_min(struct device *dev,
885{ 845{
886 int nr = to_sensor_dev_attr(attr)->index; 846 int nr = to_sensor_dev_attr(attr)->index;
887 struct lm85_data *data = lm85_update_device(dev); 847 struct lm85_data *data = lm85_update_device(dev);
888 return sprintf(buf,"%d\n", TEMP_FROM_REG(data->zone[nr].limit) ); 848 return sprintf(buf, "%d\n", TEMP_FROM_REG(data->zone[nr].limit));
889} 849}
890 850
891static ssize_t set_temp_auto_temp_min(struct device *dev, 851static ssize_t set_temp_auto_temp_min(struct device *dev,
@@ -913,15 +873,13 @@ static ssize_t set_temp_auto_temp_min(struct device *dev,
913 data->zone[nr].hyst = HYST_TO_REG(TEMP_FROM_REG( 873 data->zone[nr].hyst = HYST_TO_REG(TEMP_FROM_REG(
914 data->zone[nr].limit) - TEMP_FROM_REG( 874 data->zone[nr].limit) - TEMP_FROM_REG(
915 data->zone[nr].off_desired)); 875 data->zone[nr].off_desired));
916 if ( nr == 0 || nr == 1 ) { 876 if (nr == 0 || nr == 1) {
917 lm85_write_value(client, LM85_REG_AFAN_HYST1, 877 lm85_write_value(client, LM85_REG_AFAN_HYST1,
918 (data->zone[0].hyst << 4) 878 (data->zone[0].hyst << 4)
919 | data->zone[1].hyst 879 | data->zone[1].hyst);
920 );
921 } else { 880 } else {
922 lm85_write_value(client, LM85_REG_AFAN_HYST2, 881 lm85_write_value(client, LM85_REG_AFAN_HYST2,
923 (data->zone[2].hyst << 4) 882 (data->zone[2].hyst << 4));
924 );
925 } 883 }
926 mutex_unlock(&data->update_lock); 884 mutex_unlock(&data->update_lock);
927 return count; 885 return count;
@@ -932,7 +890,7 @@ static ssize_t show_temp_auto_temp_max(struct device *dev,
932{ 890{
933 int nr = to_sensor_dev_attr(attr)->index; 891 int nr = to_sensor_dev_attr(attr)->index;
934 struct lm85_data *data = lm85_update_device(dev); 892 struct lm85_data *data = lm85_update_device(dev);
935 return sprintf(buf,"%d\n", TEMP_FROM_REG(data->zone[nr].limit) + 893 return sprintf(buf, "%d\n", TEMP_FROM_REG(data->zone[nr].limit) +
936 RANGE_FROM_REG(data->zone[nr].range)); 894 RANGE_FROM_REG(data->zone[nr].range));
937} 895}
938 896
@@ -962,11 +920,11 @@ static ssize_t show_temp_auto_temp_crit(struct device *dev,
962{ 920{
963 int nr = to_sensor_dev_attr(attr)->index; 921 int nr = to_sensor_dev_attr(attr)->index;
964 struct lm85_data *data = lm85_update_device(dev); 922 struct lm85_data *data = lm85_update_device(dev);
965 return sprintf(buf,"%d\n", TEMP_FROM_REG(data->zone[nr].critical)); 923 return sprintf(buf, "%d\n", TEMP_FROM_REG(data->zone[nr].critical));
966} 924}
967 925
968static ssize_t set_temp_auto_temp_crit(struct device *dev, 926static ssize_t set_temp_auto_temp_crit(struct device *dev,
969 struct device_attribute *attr,const char *buf, size_t count) 927 struct device_attribute *attr, const char *buf, size_t count)
970{ 928{
971 int nr = to_sensor_dev_attr(attr)->index; 929 int nr = to_sensor_dev_attr(attr)->index;
972 struct i2c_client *client = to_i2c_client(dev); 930 struct i2c_client *client = to_i2c_client(dev);
@@ -1127,20 +1085,37 @@ static const struct attribute_group lm85_group_in567 = {
1127 .attrs = lm85_attributes_in567, 1085 .attrs = lm85_attributes_in567,
1128}; 1086};
1129 1087
1088static void lm85_init_client(struct i2c_client *client)
1089{
1090 int value;
1091
1092 /* Start monitoring if needed */
1093 value = lm85_read_value(client, LM85_REG_CONFIG);
1094 if (!(value & 0x01)) {
1095 dev_info(&client->dev, "Starting monitoring\n");
1096 lm85_write_value(client, LM85_REG_CONFIG, value | 0x01);
1097 }
1098
1099 /* Warn about unusual configuration bits */
1100 if (value & 0x02)
1101 dev_warn(&client->dev, "Device configuration is locked\n");
1102 if (!(value & 0x04))
1103 dev_warn(&client->dev, "Device is not ready\n");
1104}
1105
1130static int lm85_detect(struct i2c_adapter *adapter, int address, 1106static int lm85_detect(struct i2c_adapter *adapter, int address,
1131 int kind) 1107 int kind)
1132{ 1108{
1133 int company, verstep ; 1109 int company, verstep;
1134 struct i2c_client *new_client = NULL; 1110 struct i2c_client *client;
1135 struct lm85_data *data; 1111 struct lm85_data *data;
1136 int err = 0; 1112 int err = 0;
1137 const char *type_name = ""; 1113 const char *type_name;
1138 1114
1139 if (!i2c_check_functionality(adapter, 1115 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
1140 I2C_FUNC_SMBUS_BYTE_DATA)) {
1141 /* We need to be able to do byte I/O */ 1116 /* We need to be able to do byte I/O */
1142 goto ERROR0 ; 1117 goto ERROR0;
1143 }; 1118 }
1144 1119
1145 /* OK. For now, we presume we have a valid client. We now create the 1120 /* OK. For now, we presume we have a valid client. We now create the
1146 client structure, even though we cannot fill it completely yet. 1121 client structure, even though we cannot fill it completely yet.
@@ -1151,138 +1126,145 @@ static int lm85_detect(struct i2c_adapter *adapter, int address,
1151 goto ERROR0; 1126 goto ERROR0;
1152 } 1127 }
1153 1128
1154 new_client = &data->client; 1129 client = &data->client;
1155 i2c_set_clientdata(new_client, data); 1130 i2c_set_clientdata(client, data);
1156 new_client->addr = address; 1131 client->addr = address;
1157 new_client->adapter = adapter; 1132 client->adapter = adapter;
1158 new_client->driver = &lm85_driver; 1133 client->driver = &lm85_driver;
1159 new_client->flags = 0;
1160 1134
1161 /* Now, we do the remaining detection. */ 1135 /* Now, we do the remaining detection. */
1162 1136
1163 company = lm85_read_value(new_client, LM85_REG_COMPANY); 1137 company = lm85_read_value(client, LM85_REG_COMPANY);
1164 verstep = lm85_read_value(new_client, LM85_REG_VERSTEP); 1138 verstep = lm85_read_value(client, LM85_REG_VERSTEP);
1165 1139
1166 dev_dbg(&adapter->dev, "Detecting device at %d,0x%02x with" 1140 dev_dbg(&adapter->dev, "Detecting device at %d,0x%02x with"
1167 " COMPANY: 0x%02x and VERSTEP: 0x%02x\n", 1141 " COMPANY: 0x%02x and VERSTEP: 0x%02x\n",
1168 i2c_adapter_id(new_client->adapter), new_client->addr, 1142 i2c_adapter_id(client->adapter), client->addr,
1169 company, verstep); 1143 company, verstep);
1170 1144
1171 /* If auto-detecting, Determine the chip type. */ 1145 /* If auto-detecting, Determine the chip type. */
1172 if (kind <= 0) { 1146 if (kind <= 0) {
1173 dev_dbg(&adapter->dev, "Autodetecting device at %d,0x%02x ...\n", 1147 dev_dbg(&adapter->dev, "Autodetecting device at %d,0x%02x ...\n",
1174 i2c_adapter_id(adapter), address ); 1148 i2c_adapter_id(adapter), address);
1175 if( company == LM85_COMPANY_NATIONAL 1149 if (company == LM85_COMPANY_NATIONAL
1176 && verstep == LM85_VERSTEP_LM85C ) { 1150 && verstep == LM85_VERSTEP_LM85C) {
1177 kind = lm85c ; 1151 kind = lm85c;
1178 } else if( company == LM85_COMPANY_NATIONAL 1152 } else if (company == LM85_COMPANY_NATIONAL
1179 && verstep == LM85_VERSTEP_LM85B ) { 1153 && verstep == LM85_VERSTEP_LM85B) {
1180 kind = lm85b ; 1154 kind = lm85b;
1181 } else if( company == LM85_COMPANY_NATIONAL 1155 } else if (company == LM85_COMPANY_NATIONAL
1182 && (verstep & LM85_VERSTEP_VMASK) == LM85_VERSTEP_GENERIC ) { 1156 && (verstep & LM85_VERSTEP_VMASK) == LM85_VERSTEP_GENERIC) {
1183 dev_err(&adapter->dev, "Unrecognized version/stepping 0x%02x" 1157 dev_err(&adapter->dev, "Unrecognized version/stepping 0x%02x"
1184 " Defaulting to LM85.\n", verstep); 1158 " Defaulting to LM85.\n", verstep);
1185 kind = any_chip ; 1159 kind = any_chip;
1186 } else if( company == LM85_COMPANY_ANALOG_DEV 1160 } else if (company == LM85_COMPANY_ANALOG_DEV
1187 && verstep == LM85_VERSTEP_ADM1027 ) { 1161 && verstep == LM85_VERSTEP_ADM1027) {
1188 kind = adm1027 ; 1162 kind = adm1027;
1189 } else if( company == LM85_COMPANY_ANALOG_DEV 1163 } else if (company == LM85_COMPANY_ANALOG_DEV
1190 && (verstep == LM85_VERSTEP_ADT7463 1164 && (verstep == LM85_VERSTEP_ADT7463
1191 || verstep == LM85_VERSTEP_ADT7463C) ) { 1165 || verstep == LM85_VERSTEP_ADT7463C)) {
1192 kind = adt7463 ; 1166 kind = adt7463;
1193 } else if( company == LM85_COMPANY_ANALOG_DEV 1167 } else if (company == LM85_COMPANY_ANALOG_DEV
1194 && (verstep & LM85_VERSTEP_VMASK) == LM85_VERSTEP_GENERIC ) { 1168 && (verstep & LM85_VERSTEP_VMASK) == LM85_VERSTEP_GENERIC) {
1195 dev_err(&adapter->dev, "Unrecognized version/stepping 0x%02x" 1169 dev_err(&adapter->dev, "Unrecognized version/stepping 0x%02x"
1196 " Defaulting to Generic LM85.\n", verstep ); 1170 " Defaulting to Generic LM85.\n", verstep);
1197 kind = any_chip ; 1171 kind = any_chip;
1198 } else if( company == LM85_COMPANY_SMSC 1172 } else if (company == LM85_COMPANY_SMSC
1199 && (verstep == LM85_VERSTEP_EMC6D100_A0 1173 && (verstep == LM85_VERSTEP_EMC6D100_A0
1200 || verstep == LM85_VERSTEP_EMC6D100_A1) ) { 1174 || verstep == LM85_VERSTEP_EMC6D100_A1)) {
1201 /* Unfortunately, we can't tell a '100 from a '101 1175 /* Unfortunately, we can't tell a '100 from a '101
1202 * from the registers. Since a '101 is a '100 1176 * from the registers. Since a '101 is a '100
1203 * in a package with fewer pins and therefore no 1177 * in a package with fewer pins and therefore no
1204 * 3.3V, 1.5V or 1.8V inputs, perhaps if those 1178 * 3.3V, 1.5V or 1.8V inputs, perhaps if those
1205 * inputs read 0, then it's a '101. 1179 * inputs read 0, then it's a '101.
1206 */ 1180 */
1207 kind = emc6d100 ; 1181 kind = emc6d100;
1208 } else if( company == LM85_COMPANY_SMSC 1182 } else if (company == LM85_COMPANY_SMSC
1209 && verstep == LM85_VERSTEP_EMC6D102) { 1183 && verstep == LM85_VERSTEP_EMC6D102) {
1210 kind = emc6d102 ; 1184 kind = emc6d102;
1211 } else if( company == LM85_COMPANY_SMSC 1185 } else if (company == LM85_COMPANY_SMSC
1212 && (verstep & LM85_VERSTEP_VMASK) == LM85_VERSTEP_GENERIC) { 1186 && (verstep & LM85_VERSTEP_VMASK) == LM85_VERSTEP_GENERIC) {
1213 dev_err(&adapter->dev, "lm85: Detected SMSC chip\n"); 1187 dev_err(&adapter->dev, "lm85: Detected SMSC chip\n");
1214 dev_err(&adapter->dev, "lm85: Unrecognized version/stepping 0x%02x" 1188 dev_err(&adapter->dev, "lm85: Unrecognized version/stepping 0x%02x"
1215 " Defaulting to Generic LM85.\n", verstep ); 1189 " Defaulting to Generic LM85.\n", verstep);
1216 kind = any_chip ; 1190 kind = any_chip;
1217 } else if( kind == any_chip 1191 } else if (kind == any_chip
1218 && (verstep & LM85_VERSTEP_VMASK) == LM85_VERSTEP_GENERIC) { 1192 && (verstep & LM85_VERSTEP_VMASK) == LM85_VERSTEP_GENERIC) {
1219 dev_err(&adapter->dev, "Generic LM85 Version 6 detected\n"); 1193 dev_err(&adapter->dev, "Generic LM85 Version 6 detected\n");
1220 /* Leave kind as "any_chip" */ 1194 /* Leave kind as "any_chip" */
1221 } else { 1195 } else {
1222 dev_dbg(&adapter->dev, "Autodetection failed\n"); 1196 dev_dbg(&adapter->dev, "Autodetection failed\n");
1223 /* Not an LM85 ... */ 1197 /* Not an LM85... */
1224 if( kind == any_chip ) { /* User used force=x,y */ 1198 if (kind == any_chip) { /* User used force=x,y */
1225 dev_err(&adapter->dev, "Generic LM85 Version 6 not" 1199 dev_err(&adapter->dev, "Generic LM85 Version 6 not"
1226 " found at %d,0x%02x. Try force_lm85c.\n", 1200 " found at %d,0x%02x. Try force_lm85c.\n",
1227 i2c_adapter_id(adapter), address ); 1201 i2c_adapter_id(adapter), address);
1228 } 1202 }
1229 err = 0 ; 1203 err = 0;
1230 goto ERROR1; 1204 goto ERROR1;
1231 } 1205 }
1232 } 1206 }
1233 1207
1234 /* Fill in the chip specific driver values */ 1208 /* Fill in the chip specific driver values */
1235 if ( kind == any_chip ) { 1209 switch (kind) {
1236 type_name = "lm85"; 1210 case lm85b:
1237 } else if ( kind == lm85b ) {
1238 type_name = "lm85b"; 1211 type_name = "lm85b";
1239 } else if ( kind == lm85c ) { 1212 break;
1213 case lm85c:
1240 type_name = "lm85c"; 1214 type_name = "lm85c";
1241 } else if ( kind == adm1027 ) { 1215 break;
1216 case adm1027:
1242 type_name = "adm1027"; 1217 type_name = "adm1027";
1243 } else if ( kind == adt7463 ) { 1218 break;
1219 case adt7463:
1244 type_name = "adt7463"; 1220 type_name = "adt7463";
1245 } else if ( kind == emc6d100){ 1221 break;
1222 case emc6d100:
1246 type_name = "emc6d100"; 1223 type_name = "emc6d100";
1247 } else if ( kind == emc6d102 ) { 1224 break;
1225 case emc6d102:
1248 type_name = "emc6d102"; 1226 type_name = "emc6d102";
1227 break;
1228 default:
1229 type_name = "lm85";
1249 } 1230 }
1250 strlcpy(new_client->name, type_name, I2C_NAME_SIZE); 1231 strlcpy(client->name, type_name, I2C_NAME_SIZE);
1251 1232
1252 /* Fill in the remaining client fields */ 1233 /* Fill in the remaining client fields */
1253 data->type = kind; 1234 data->type = kind;
1254 data->valid = 0;
1255 mutex_init(&data->update_lock); 1235 mutex_init(&data->update_lock);
1256 1236
1257 /* Tell the I2C layer a new client has arrived */ 1237 /* Tell the I2C layer a new client has arrived */
1258 if ((err = i2c_attach_client(new_client))) 1238 err = i2c_attach_client(client);
1239 if (err)
1259 goto ERROR1; 1240 goto ERROR1;
1260 1241
1261 /* Set the VRM version */ 1242 /* Set the VRM version */
1262 data->vrm = vid_which_vrm(); 1243 data->vrm = vid_which_vrm();
1263 1244
1264 /* Initialize the LM85 chip */ 1245 /* Initialize the LM85 chip */
1265 lm85_init_client(new_client); 1246 lm85_init_client(client);
1266 1247
1267 /* Register sysfs hooks */ 1248 /* Register sysfs hooks */
1268 if ((err = sysfs_create_group(&new_client->dev.kobj, &lm85_group))) 1249 err = sysfs_create_group(&client->dev.kobj, &lm85_group);
1250 if (err)
1269 goto ERROR2; 1251 goto ERROR2;
1270 1252
1271 /* The ADT7463 has an optional VRM 10 mode where pin 21 is used 1253 /* The ADT7463 has an optional VRM 10 mode where pin 21 is used
1272 as a sixth digital VID input rather than an analog input. */ 1254 as a sixth digital VID input rather than an analog input. */
1273 data->vid = lm85_read_value(new_client, LM85_REG_VID); 1255 data->vid = lm85_read_value(client, LM85_REG_VID);
1274 if (!(kind == adt7463 && (data->vid & 0x80))) 1256 if (!(kind == adt7463 && (data->vid & 0x80)))
1275 if ((err = sysfs_create_group(&new_client->dev.kobj, 1257 if ((err = sysfs_create_group(&client->dev.kobj,
1276 &lm85_group_in4))) 1258 &lm85_group_in4)))
1277 goto ERROR3; 1259 goto ERROR3;
1278 1260
1279 /* The EMC6D100 has 3 additional voltage inputs */ 1261 /* The EMC6D100 has 3 additional voltage inputs */
1280 if (kind == emc6d100) 1262 if (kind == emc6d100)
1281 if ((err = sysfs_create_group(&new_client->dev.kobj, 1263 if ((err = sysfs_create_group(&client->dev.kobj,
1282 &lm85_group_in567))) 1264 &lm85_group_in567)))
1283 goto ERROR3; 1265 goto ERROR3;
1284 1266
1285 data->hwmon_dev = hwmon_device_register(&new_client->dev); 1267 data->hwmon_dev = hwmon_device_register(&client->dev);
1286 if (IS_ERR(data->hwmon_dev)) { 1268 if (IS_ERR(data->hwmon_dev)) {
1287 err = PTR_ERR(data->hwmon_dev); 1269 err = PTR_ERR(data->hwmon_dev);
1288 goto ERROR3; 1270 goto ERROR3;
@@ -1291,16 +1273,16 @@ static int lm85_detect(struct i2c_adapter *adapter, int address,
1291 return 0; 1273 return 0;
1292 1274
1293 /* Error out and cleanup code */ 1275 /* Error out and cleanup code */
1294 ERROR3: 1276 ERROR3:
1295 sysfs_remove_group(&new_client->dev.kobj, &lm85_group); 1277 sysfs_remove_group(&client->dev.kobj, &lm85_group);
1296 sysfs_remove_group(&new_client->dev.kobj, &lm85_group_in4); 1278 sysfs_remove_group(&client->dev.kobj, &lm85_group_in4);
1297 if (kind == emc6d100) 1279 if (kind == emc6d100)
1298 sysfs_remove_group(&new_client->dev.kobj, &lm85_group_in567); 1280 sysfs_remove_group(&client->dev.kobj, &lm85_group_in567);
1299 ERROR2: 1281 ERROR2:
1300 i2c_detach_client(new_client); 1282 i2c_detach_client(client);
1301 ERROR1: 1283 ERROR1:
1302 kfree(data); 1284 kfree(data);
1303 ERROR0: 1285 ERROR0:
1304 return err; 1286 return err;
1305} 1287}
1306 1288
@@ -1323,100 +1305,46 @@ static int lm85_read_value(struct i2c_client *client, u8 reg)
1323 int res; 1305 int res;
1324 1306
1325 /* What size location is it? */ 1307 /* What size location is it? */
1326 switch( reg ) { 1308 switch (reg) {
1327 case LM85_REG_FAN(0) : /* Read WORD data */ 1309 case LM85_REG_FAN(0): /* Read WORD data */
1328 case LM85_REG_FAN(1) : 1310 case LM85_REG_FAN(1):
1329 case LM85_REG_FAN(2) : 1311 case LM85_REG_FAN(2):
1330 case LM85_REG_FAN(3) : 1312 case LM85_REG_FAN(3):
1331 case LM85_REG_FAN_MIN(0) : 1313 case LM85_REG_FAN_MIN(0):
1332 case LM85_REG_FAN_MIN(1) : 1314 case LM85_REG_FAN_MIN(1):
1333 case LM85_REG_FAN_MIN(2) : 1315 case LM85_REG_FAN_MIN(2):
1334 case LM85_REG_FAN_MIN(3) : 1316 case LM85_REG_FAN_MIN(3):
1335 case LM85_REG_ALARM1 : /* Read both bytes at once */ 1317 case LM85_REG_ALARM1: /* Read both bytes at once */
1336 res = i2c_smbus_read_byte_data(client, reg) & 0xff ; 1318 res = i2c_smbus_read_byte_data(client, reg) & 0xff;
1337 res |= i2c_smbus_read_byte_data(client, reg+1) << 8 ; 1319 res |= i2c_smbus_read_byte_data(client, reg + 1) << 8;
1338 break ; 1320 break;
1339 case ADT7463_REG_TMIN_CTL1 : /* Read WORD MSB, LSB */
1340 res = i2c_smbus_read_byte_data(client, reg) << 8 ;
1341 res |= i2c_smbus_read_byte_data(client, reg+1) & 0xff ;
1342 break ;
1343 default: /* Read BYTE data */ 1321 default: /* Read BYTE data */
1344 res = i2c_smbus_read_byte_data(client, reg); 1322 res = i2c_smbus_read_byte_data(client, reg);
1345 break ; 1323 break;
1346 } 1324 }
1347 1325
1348 return res ; 1326 return res;
1349} 1327}
1350 1328
1351static int lm85_write_value(struct i2c_client *client, u8 reg, int value) 1329static void lm85_write_value(struct i2c_client *client, u8 reg, int value)
1352{ 1330{
1353 int res ; 1331 switch (reg) {
1354 1332 case LM85_REG_FAN(0): /* Write WORD data */
1355 switch( reg ) { 1333 case LM85_REG_FAN(1):
1356 case LM85_REG_FAN(0) : /* Write WORD data */ 1334 case LM85_REG_FAN(2):
1357 case LM85_REG_FAN(1) : 1335 case LM85_REG_FAN(3):
1358 case LM85_REG_FAN(2) : 1336 case LM85_REG_FAN_MIN(0):
1359 case LM85_REG_FAN(3) : 1337 case LM85_REG_FAN_MIN(1):
1360 case LM85_REG_FAN_MIN(0) : 1338 case LM85_REG_FAN_MIN(2):
1361 case LM85_REG_FAN_MIN(1) : 1339 case LM85_REG_FAN_MIN(3):
1362 case LM85_REG_FAN_MIN(2) :
1363 case LM85_REG_FAN_MIN(3) :
1364 /* NOTE: ALARM is read only, so not included here */ 1340 /* NOTE: ALARM is read only, so not included here */
1365 res = i2c_smbus_write_byte_data(client, reg, value & 0xff) ; 1341 i2c_smbus_write_byte_data(client, reg, value & 0xff);
1366 res |= i2c_smbus_write_byte_data(client, reg+1, (value>>8) & 0xff) ; 1342 i2c_smbus_write_byte_data(client, reg + 1, value >> 8);
1367 break ; 1343 break;
1368 case ADT7463_REG_TMIN_CTL1 : /* Write WORD MSB, LSB */
1369 res = i2c_smbus_write_byte_data(client, reg, (value>>8) & 0xff);
1370 res |= i2c_smbus_write_byte_data(client, reg+1, value & 0xff) ;
1371 break ;
1372 default: /* Write BYTE data */ 1344 default: /* Write BYTE data */
1373 res = i2c_smbus_write_byte_data(client, reg, value); 1345 i2c_smbus_write_byte_data(client, reg, value);
1374 break ; 1346 break;
1375 } 1347 }
1376
1377 return res ;
1378}
1379
1380static void lm85_init_client(struct i2c_client *client)
1381{
1382 int value;
1383 struct lm85_data *data = i2c_get_clientdata(client);
1384
1385 dev_dbg(&client->dev, "Initializing device\n");
1386
1387 /* Warn if part was not "READY" */
1388 value = lm85_read_value(client, LM85_REG_CONFIG);
1389 dev_dbg(&client->dev, "LM85_REG_CONFIG is: 0x%02x\n", value);
1390 if( value & 0x02 ) {
1391 dev_err(&client->dev, "Client (%d,0x%02x) config is locked.\n",
1392 i2c_adapter_id(client->adapter), client->addr );
1393 };
1394 if( ! (value & 0x04) ) {
1395 dev_err(&client->dev, "Client (%d,0x%02x) is not ready.\n",
1396 i2c_adapter_id(client->adapter), client->addr );
1397 };
1398 if( value & 0x10
1399 && ( data->type == adm1027
1400 || data->type == adt7463 ) ) {
1401 dev_err(&client->dev, "Client (%d,0x%02x) VxI mode is set. "
1402 "Please report this to the lm85 maintainer.\n",
1403 i2c_adapter_id(client->adapter), client->addr );
1404 };
1405
1406 /* WE INTENTIONALLY make no changes to the limits,
1407 * offsets, pwms, fans and zones. If they were
1408 * configured, we don't want to mess with them.
1409 * If they weren't, the default is 100% PWM, no
1410 * control and will suffice until 'sensors -s'
1411 * can be run by the user.
1412 */
1413
1414 /* Start monitoring */
1415 value = lm85_read_value(client, LM85_REG_CONFIG);
1416 /* Try to clear LOCK, Set START, save everything else */
1417 value = (value & ~ 0x02) | 0x01 ;
1418 dev_dbg(&client->dev, "Setting CONFIG to: 0x%02x\n", value);
1419 lm85_write_value(client, LM85_REG_CONFIG, value);
1420} 1348}
1421 1349
1422static struct lm85_data *lm85_update_device(struct device *dev) 1350static struct lm85_data *lm85_update_device(struct device *dev)
@@ -1427,28 +1355,30 @@ static struct lm85_data *lm85_update_device(struct device *dev)
1427 1355
1428 mutex_lock(&data->update_lock); 1356 mutex_lock(&data->update_lock);
1429 1357
1430 if ( !data->valid || 1358 if (!data->valid ||
1431 time_after(jiffies, data->last_reading + LM85_DATA_INTERVAL) ) { 1359 time_after(jiffies, data->last_reading + LM85_DATA_INTERVAL)) {
1432 /* Things that change quickly */ 1360 /* Things that change quickly */
1433 dev_dbg(&client->dev, "Reading sensor values\n"); 1361 dev_dbg(&client->dev, "Reading sensor values\n");
1434 1362
1435 /* Have to read extended bits first to "freeze" the 1363 /* Have to read extended bits first to "freeze" the
1436 * more significant bits that are read later. 1364 * more significant bits that are read later.
1437 * There are 2 additional resolution bits per channel and we 1365 * There are 2 additional resolution bits per channel and we
1438 * have room for 4, so we shift them to the left. 1366 * have room for 4, so we shift them to the left.
1439 */ 1367 */
1440 if ( (data->type == adm1027) || (data->type == adt7463) ) { 1368 if (data->type == adm1027 || data->type == adt7463) {
1441 int ext1 = lm85_read_value(client, 1369 int ext1 = lm85_read_value(client,
1442 ADM1027_REG_EXTEND_ADC1); 1370 ADM1027_REG_EXTEND_ADC1);
1443 int ext2 = lm85_read_value(client, 1371 int ext2 = lm85_read_value(client,
1444 ADM1027_REG_EXTEND_ADC2); 1372 ADM1027_REG_EXTEND_ADC2);
1445 int val = (ext1 << 8) + ext2; 1373 int val = (ext1 << 8) + ext2;
1446 1374
1447 for(i = 0; i <= 4; i++) 1375 for (i = 0; i <= 4; i++)
1448 data->in_ext[i] = ((val>>(i * 2))&0x03) << 2; 1376 data->in_ext[i] =
1377 ((val >> (i * 2)) & 0x03) << 2;
1449 1378
1450 for(i = 0; i <= 2; i++) 1379 for (i = 0; i <= 2; i++)
1451 data->temp_ext[i] = (val>>((i + 4) * 2))&0x0c; 1380 data->temp_ext[i] =
1381 (val >> ((i + 4) * 2)) & 0x0c;
1452 } 1382 }
1453 1383
1454 data->vid = lm85_read_value(client, LM85_REG_VID); 1384 data->vid = lm85_read_value(client, LM85_REG_VID);
@@ -1456,6 +1386,8 @@ static struct lm85_data *lm85_update_device(struct device *dev)
1456 for (i = 0; i <= 3; ++i) { 1386 for (i = 0; i <= 3; ++i) {
1457 data->in[i] = 1387 data->in[i] =
1458 lm85_read_value(client, LM85_REG_IN(i)); 1388 lm85_read_value(client, LM85_REG_IN(i));
1389 data->fan[i] =
1390 lm85_read_value(client, LM85_REG_FAN(i));
1459 } 1391 }
1460 1392
1461 if (!(data->type == adt7463 && (data->vid & 0x80))) { 1393 if (!(data->type == adt7463 && (data->vid & 0x80))) {
@@ -1463,38 +1395,25 @@ static struct lm85_data *lm85_update_device(struct device *dev)
1463 LM85_REG_IN(4)); 1395 LM85_REG_IN(4));
1464 } 1396 }
1465 1397
1466 for (i = 0; i <= 3; ++i) {
1467 data->fan[i] =
1468 lm85_read_value(client, LM85_REG_FAN(i));
1469 }
1470
1471 for (i = 0; i <= 2; ++i) { 1398 for (i = 0; i <= 2; ++i) {
1472 data->temp[i] = 1399 data->temp[i] =
1473 lm85_read_value(client, LM85_REG_TEMP(i)); 1400 lm85_read_value(client, LM85_REG_TEMP(i));
1474 }
1475
1476 for (i = 0; i <= 2; ++i) {
1477 data->pwm[i] = 1401 data->pwm[i] =
1478 lm85_read_value(client, LM85_REG_PWM(i)); 1402 lm85_read_value(client, LM85_REG_PWM(i));
1479 } 1403 }
1480 1404
1481 data->alarms = lm85_read_value(client, LM85_REG_ALARM1); 1405 data->alarms = lm85_read_value(client, LM85_REG_ALARM1);
1482 1406
1483 if ( data->type == adt7463 ) { 1407 if (data->type == emc6d100) {
1484 if( data->therm_total < ULONG_MAX - 256 ) {
1485 data->therm_total +=
1486 lm85_read_value(client, ADT7463_REG_THERM );
1487 }
1488 } else if ( data->type == emc6d100 ) {
1489 /* Three more voltage sensors */ 1408 /* Three more voltage sensors */
1490 for (i = 5; i <= 7; ++i) { 1409 for (i = 5; i <= 7; ++i) {
1491 data->in[i] = 1410 data->in[i] = lm85_read_value(client,
1492 lm85_read_value(client, EMC6D100_REG_IN(i)); 1411 EMC6D100_REG_IN(i));
1493 } 1412 }
1494 /* More alarm bits */ 1413 /* More alarm bits */
1495 data->alarms |= 1414 data->alarms |= lm85_read_value(client,
1496 lm85_read_value(client, EMC6D100_REG_ALARM3) << 16; 1415 EMC6D100_REG_ALARM3) << 16;
1497 } else if (data->type == emc6d102 ) { 1416 } else if (data->type == emc6d102) {
1498 /* Have to read LSB bits after the MSB ones because 1417 /* Have to read LSB bits after the MSB ones because
1499 the reading of the MSB bits has frozen the 1418 the reading of the MSB bits has frozen the
1500 LSBs (backward from the ADM1027). 1419 LSBs (backward from the ADM1027).
@@ -1509,20 +1428,20 @@ static struct lm85_data *lm85_update_device(struct device *dev)
1509 EMC6D102_REG_EXTEND_ADC4); 1428 EMC6D102_REG_EXTEND_ADC4);
1510 data->in_ext[0] = ext3 & 0x0f; 1429 data->in_ext[0] = ext3 & 0x0f;
1511 data->in_ext[1] = ext4 & 0x0f; 1430 data->in_ext[1] = ext4 & 0x0f;
1512 data->in_ext[2] = (ext4 >> 4) & 0x0f; 1431 data->in_ext[2] = ext4 >> 4;
1513 data->in_ext[3] = (ext3 >> 4) & 0x0f; 1432 data->in_ext[3] = ext3 >> 4;
1514 data->in_ext[4] = (ext2 >> 4) & 0x0f; 1433 data->in_ext[4] = ext2 >> 4;
1515 1434
1516 data->temp_ext[0] = ext1 & 0x0f; 1435 data->temp_ext[0] = ext1 & 0x0f;
1517 data->temp_ext[1] = ext2 & 0x0f; 1436 data->temp_ext[1] = ext2 & 0x0f;
1518 data->temp_ext[2] = (ext1 >> 4) & 0x0f; 1437 data->temp_ext[2] = ext1 >> 4;
1519 } 1438 }
1520 1439
1521 data->last_reading = jiffies ; 1440 data->last_reading = jiffies;
1522 }; /* last_reading */ 1441 } /* last_reading */
1523 1442
1524 if ( !data->valid || 1443 if (!data->valid ||
1525 time_after(jiffies, data->last_config + LM85_CONFIG_INTERVAL) ) { 1444 time_after(jiffies, data->last_config + LM85_CONFIG_INTERVAL)) {
1526 /* Things that don't change often */ 1445 /* Things that don't change often */
1527 dev_dbg(&client->dev, "Reading config values\n"); 1446 dev_dbg(&client->dev, "Reading config values\n");
1528 1447
@@ -1531,6 +1450,8 @@ static struct lm85_data *lm85_update_device(struct device *dev)
1531 lm85_read_value(client, LM85_REG_IN_MIN(i)); 1450 lm85_read_value(client, LM85_REG_IN_MIN(i));
1532 data->in_max[i] = 1451 data->in_max[i] =
1533 lm85_read_value(client, LM85_REG_IN_MAX(i)); 1452 lm85_read_value(client, LM85_REG_IN_MAX(i));
1453 data->fan_min[i] =
1454 lm85_read_value(client, LM85_REG_FAN_MIN(i));
1534 } 1455 }
1535 1456
1536 if (!(data->type == adt7463 && (data->vid & 0x80))) { 1457 if (!(data->type == adt7463 && (data->vid & 0x80))) {
@@ -1540,34 +1461,28 @@ static struct lm85_data *lm85_update_device(struct device *dev)
1540 LM85_REG_IN_MAX(4)); 1461 LM85_REG_IN_MAX(4));
1541 } 1462 }
1542 1463
1543 if ( data->type == emc6d100 ) { 1464 if (data->type == emc6d100) {
1544 for (i = 5; i <= 7; ++i) { 1465 for (i = 5; i <= 7; ++i) {
1545 data->in_min[i] = 1466 data->in_min[i] = lm85_read_value(client,
1546 lm85_read_value(client, EMC6D100_REG_IN_MIN(i)); 1467 EMC6D100_REG_IN_MIN(i));
1547 data->in_max[i] = 1468 data->in_max[i] = lm85_read_value(client,
1548 lm85_read_value(client, EMC6D100_REG_IN_MAX(i)); 1469 EMC6D100_REG_IN_MAX(i));
1549 } 1470 }
1550 } 1471 }
1551 1472
1552 for (i = 0; i <= 3; ++i) {
1553 data->fan_min[i] =
1554 lm85_read_value(client, LM85_REG_FAN_MIN(i));
1555 }
1556
1557 for (i = 0; i <= 2; ++i) { 1473 for (i = 0; i <= 2; ++i) {
1474 int val;
1475
1558 data->temp_min[i] = 1476 data->temp_min[i] =
1559 lm85_read_value(client, LM85_REG_TEMP_MIN(i)); 1477 lm85_read_value(client, LM85_REG_TEMP_MIN(i));
1560 data->temp_max[i] = 1478 data->temp_max[i] =
1561 lm85_read_value(client, LM85_REG_TEMP_MAX(i)); 1479 lm85_read_value(client, LM85_REG_TEMP_MAX(i));
1562 }
1563 1480
1564 for (i = 0; i <= 2; ++i) {
1565 int val ;
1566 data->autofan[i].config = 1481 data->autofan[i].config =
1567 lm85_read_value(client, LM85_REG_AFAN_CONFIG(i)); 1482 lm85_read_value(client, LM85_REG_AFAN_CONFIG(i));
1568 val = lm85_read_value(client, LM85_REG_AFAN_RANGE(i)); 1483 val = lm85_read_value(client, LM85_REG_AFAN_RANGE(i));
1569 data->autofan[i].freq = val & 0x07 ; 1484 data->autofan[i].freq = val & 0x07;
1570 data->zone[i].range = (val >> 4) & 0x0f ; 1485 data->zone[i].range = val >> 4;
1571 data->autofan[i].min_pwm = 1486 data->autofan[i].min_pwm =
1572 lm85_read_value(client, LM85_REG_AFAN_MINPWM(i)); 1487 lm85_read_value(client, LM85_REG_AFAN_MINPWM(i));
1573 data->zone[i].limit = 1488 data->zone[i].limit =
@@ -1577,50 +1492,19 @@ static struct lm85_data *lm85_update_device(struct device *dev)
1577 } 1492 }
1578 1493
1579 i = lm85_read_value(client, LM85_REG_AFAN_SPIKE1); 1494 i = lm85_read_value(client, LM85_REG_AFAN_SPIKE1);
1580 data->smooth[0] = i & 0x0f ; 1495 data->autofan[0].min_off = (i & 0x20) != 0;
1581 data->syncpwm3 = i & 0x10 ; /* Save PWM3 config */ 1496 data->autofan[1].min_off = (i & 0x40) != 0;
1582 data->autofan[0].min_off = (i & 0x20) != 0 ; 1497 data->autofan[2].min_off = (i & 0x80) != 0;
1583 data->autofan[1].min_off = (i & 0x40) != 0 ;
1584 data->autofan[2].min_off = (i & 0x80) != 0 ;
1585 i = lm85_read_value(client, LM85_REG_AFAN_SPIKE2);
1586 data->smooth[1] = (i>>4) & 0x0f ;
1587 data->smooth[2] = i & 0x0f ;
1588 1498
1589 i = lm85_read_value(client, LM85_REG_AFAN_HYST1); 1499 i = lm85_read_value(client, LM85_REG_AFAN_HYST1);
1590 data->zone[0].hyst = (i>>4) & 0x0f ; 1500 data->zone[0].hyst = i >> 4;
1591 data->zone[1].hyst = i & 0x0f ; 1501 data->zone[1].hyst = i & 0x0f;
1592 1502
1593 i = lm85_read_value(client, LM85_REG_AFAN_HYST2); 1503 i = lm85_read_value(client, LM85_REG_AFAN_HYST2);
1594 data->zone[2].hyst = (i>>4) & 0x0f ; 1504 data->zone[2].hyst = i >> 4;
1595 1505
1596 if ( (data->type == lm85b) || (data->type == lm85c) ) {
1597 data->tach_mode = lm85_read_value(client,
1598 LM85_REG_TACH_MODE );
1599 data->spinup_ctl = lm85_read_value(client,
1600 LM85_REG_SPINUP_CTL );
1601 } else if ( (data->type == adt7463) || (data->type == adm1027) ) {
1602 if ( data->type == adt7463 ) {
1603 for (i = 0; i <= 2; ++i) {
1604 data->oppoint[i] = lm85_read_value(client,
1605 ADT7463_REG_OPPOINT(i) );
1606 }
1607 data->tmin_ctl = lm85_read_value(client,
1608 ADT7463_REG_TMIN_CTL1 );
1609 data->therm_limit = lm85_read_value(client,
1610 ADT7463_REG_THERM_LIMIT );
1611 }
1612 for (i = 0; i <= 2; ++i) {
1613 data->temp_offset[i] = lm85_read_value(client,
1614 ADM1027_REG_TEMP_OFFSET(i) );
1615 }
1616 data->tach_mode = lm85_read_value(client,
1617 ADM1027_REG_CONFIG3 );
1618 data->fan_ppr = lm85_read_value(client,
1619 ADM1027_REG_FAN_PPR );
1620 }
1621
1622 data->last_config = jiffies; 1506 data->last_config = jiffies;
1623 }; /* last_config */ 1507 } /* last_config */
1624 1508
1625 data->valid = 1; 1509 data->valid = 1;
1626 1510
@@ -1635,17 +1519,15 @@ static int __init sm_lm85_init(void)
1635 return i2c_add_driver(&lm85_driver); 1519 return i2c_add_driver(&lm85_driver);
1636} 1520}
1637 1521
1638static void __exit sm_lm85_exit(void) 1522static void __exit sm_lm85_exit(void)
1639{ 1523{
1640 i2c_del_driver(&lm85_driver); 1524 i2c_del_driver(&lm85_driver);
1641} 1525}
1642 1526
1643/* Thanks to Richard Barrington for adding the LM85 to sensors-detect.
1644 * Thanks to Margit Schubert-While <margitsw@t-online.de> for help with
1645 * post 2.7.0 CVS changes.
1646 */
1647MODULE_LICENSE("GPL"); 1527MODULE_LICENSE("GPL");
1648MODULE_AUTHOR("Philip Pokorny <ppokorny@penguincomputing.com>, Margit Schubert-While <margitsw@t-online.de>, Justin Thiessen <jthiessen@penguincomputing.com"); 1528MODULE_AUTHOR("Philip Pokorny <ppokorny@penguincomputing.com>, "
1529 "Margit Schubert-While <margitsw@t-online.de>, "
1530 "Justin Thiessen <jthiessen@penguincomputing.com>");
1649MODULE_DESCRIPTION("LM85-B, LM85-C driver"); 1531MODULE_DESCRIPTION("LM85-B, LM85-C driver");
1650 1532
1651module_init(sm_lm85_init); 1533module_init(sm_lm85_init);
diff --git a/drivers/hwmon/thmc50.c b/drivers/hwmon/thmc50.c
index 3b01001108c1..7d97431e132f 100644
--- a/drivers/hwmon/thmc50.c
+++ b/drivers/hwmon/thmc50.c
@@ -55,8 +55,11 @@ I2C_CLIENT_MODULE_PARM(adm1022_temp3, "List of adapter,address pairs "
55static const u8 THMC50_REG_TEMP[] = { 0x27, 0x26, 0x20 }; 55static const u8 THMC50_REG_TEMP[] = { 0x27, 0x26, 0x20 };
56static const u8 THMC50_REG_TEMP_MIN[] = { 0x3A, 0x38, 0x2C }; 56static const u8 THMC50_REG_TEMP_MIN[] = { 0x3A, 0x38, 0x2C };
57static const u8 THMC50_REG_TEMP_MAX[] = { 0x39, 0x37, 0x2B }; 57static const u8 THMC50_REG_TEMP_MAX[] = { 0x39, 0x37, 0x2B };
58static const u8 THMC50_REG_TEMP_CRITICAL[] = { 0x13, 0x14, 0x14 };
59static const u8 THMC50_REG_TEMP_DEFAULT[] = { 0x17, 0x18, 0x18 };
58 60
59#define THMC50_REG_CONF_nFANOFF 0x20 61#define THMC50_REG_CONF_nFANOFF 0x20
62#define THMC50_REG_CONF_PROGRAMMED 0x08
60 63
61/* Each client has this additional data */ 64/* Each client has this additional data */
62struct thmc50_data { 65struct thmc50_data {
@@ -72,6 +75,7 @@ struct thmc50_data {
72 s8 temp_input[3]; 75 s8 temp_input[3];
73 s8 temp_max[3]; 76 s8 temp_max[3];
74 s8 temp_min[3]; 77 s8 temp_min[3];
78 s8 temp_critical[3];
75 u8 analog_out; 79 u8 analog_out;
76 u8 alarms; 80 u8 alarms;
77}; 81};
@@ -199,6 +203,15 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
199 return count; 203 return count;
200} 204}
201 205
206static ssize_t show_temp_critical(struct device *dev,
207 struct device_attribute *attr,
208 char *buf)
209{
210 int nr = to_sensor_dev_attr(attr)->index;
211 struct thmc50_data *data = thmc50_update_device(dev);
212 return sprintf(buf, "%d\n", data->temp_critical[nr] * 1000);
213}
214
202static ssize_t show_alarm(struct device *dev, struct device_attribute *attr, 215static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
203 char *buf) 216 char *buf)
204{ 217{
@@ -214,7 +227,9 @@ static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, show_temp, \
214static SENSOR_DEVICE_ATTR(temp##offset##_min, S_IRUGO | S_IWUSR, \ 227static SENSOR_DEVICE_ATTR(temp##offset##_min, S_IRUGO | S_IWUSR, \
215 show_temp_min, set_temp_min, offset - 1); \ 228 show_temp_min, set_temp_min, offset - 1); \
216static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IRUGO | S_IWUSR, \ 229static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IRUGO | S_IWUSR, \
217 show_temp_max, set_temp_max, offset - 1); 230 show_temp_max, set_temp_max, offset - 1); \
231static SENSOR_DEVICE_ATTR(temp##offset##_crit, S_IRUGO, \
232 show_temp_critical, NULL, offset - 1);
218 233
219temp_reg(1); 234temp_reg(1);
220temp_reg(2); 235temp_reg(2);
@@ -234,10 +249,12 @@ static struct attribute *thmc50_attributes[] = {
234 &sensor_dev_attr_temp1_max.dev_attr.attr, 249 &sensor_dev_attr_temp1_max.dev_attr.attr,
235 &sensor_dev_attr_temp1_min.dev_attr.attr, 250 &sensor_dev_attr_temp1_min.dev_attr.attr,
236 &sensor_dev_attr_temp1_input.dev_attr.attr, 251 &sensor_dev_attr_temp1_input.dev_attr.attr,
252 &sensor_dev_attr_temp1_crit.dev_attr.attr,
237 &sensor_dev_attr_temp1_alarm.dev_attr.attr, 253 &sensor_dev_attr_temp1_alarm.dev_attr.attr,
238 &sensor_dev_attr_temp2_max.dev_attr.attr, 254 &sensor_dev_attr_temp2_max.dev_attr.attr,
239 &sensor_dev_attr_temp2_min.dev_attr.attr, 255 &sensor_dev_attr_temp2_min.dev_attr.attr,
240 &sensor_dev_attr_temp2_input.dev_attr.attr, 256 &sensor_dev_attr_temp2_input.dev_attr.attr,
257 &sensor_dev_attr_temp2_crit.dev_attr.attr,
241 &sensor_dev_attr_temp2_alarm.dev_attr.attr, 258 &sensor_dev_attr_temp2_alarm.dev_attr.attr,
242 &sensor_dev_attr_temp2_fault.dev_attr.attr, 259 &sensor_dev_attr_temp2_fault.dev_attr.attr,
243 &sensor_dev_attr_pwm1.dev_attr.attr, 260 &sensor_dev_attr_pwm1.dev_attr.attr,
@@ -254,6 +271,7 @@ static struct attribute *temp3_attributes[] = {
254 &sensor_dev_attr_temp3_max.dev_attr.attr, 271 &sensor_dev_attr_temp3_max.dev_attr.attr,
255 &sensor_dev_attr_temp3_min.dev_attr.attr, 272 &sensor_dev_attr_temp3_min.dev_attr.attr,
256 &sensor_dev_attr_temp3_input.dev_attr.attr, 273 &sensor_dev_attr_temp3_input.dev_attr.attr,
274 &sensor_dev_attr_temp3_crit.dev_attr.attr,
257 &sensor_dev_attr_temp3_alarm.dev_attr.attr, 275 &sensor_dev_attr_temp3_alarm.dev_attr.attr,
258 &sensor_dev_attr_temp3_fault.dev_attr.attr, 276 &sensor_dev_attr_temp3_fault.dev_attr.attr,
259 NULL 277 NULL
@@ -429,6 +447,10 @@ static struct thmc50_data *thmc50_update_device(struct device *dev)
429 447
430 int temps = data->has_temp3 ? 3 : 2; 448 int temps = data->has_temp3 ? 3 : 2;
431 int i; 449 int i;
450 int prog = i2c_smbus_read_byte_data(client, THMC50_REG_CONF);
451
452 prog &= THMC50_REG_CONF_PROGRAMMED;
453
432 for (i = 0; i < temps; i++) { 454 for (i = 0; i < temps; i++) {
433 data->temp_input[i] = i2c_smbus_read_byte_data(client, 455 data->temp_input[i] = i2c_smbus_read_byte_data(client,
434 THMC50_REG_TEMP[i]); 456 THMC50_REG_TEMP[i]);
@@ -436,6 +458,10 @@ static struct thmc50_data *thmc50_update_device(struct device *dev)
436 THMC50_REG_TEMP_MAX[i]); 458 THMC50_REG_TEMP_MAX[i]);
437 data->temp_min[i] = i2c_smbus_read_byte_data(client, 459 data->temp_min[i] = i2c_smbus_read_byte_data(client,
438 THMC50_REG_TEMP_MIN[i]); 460 THMC50_REG_TEMP_MIN[i]);
461 data->temp_critical[i] =
462 i2c_smbus_read_byte_data(client,
463 prog ? THMC50_REG_TEMP_CRITICAL[i]
464 : THMC50_REG_TEMP_DEFAULT[i]);
439 } 465 }
440 data->analog_out = 466 data->analog_out =
441 i2c_smbus_read_byte_data(client, THMC50_REG_ANALOG_OUT); 467 i2c_smbus_read_byte_data(client, THMC50_REG_ANALOG_OUT);
diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
index 9564fb069957..b30e5796cb26 100644
--- a/drivers/hwmon/w83627hf.c
+++ b/drivers/hwmon/w83627hf.c
@@ -67,10 +67,6 @@ module_param(force_i2c, byte, 0);
67MODULE_PARM_DESC(force_i2c, 67MODULE_PARM_DESC(force_i2c,
68 "Initialize the i2c address of the sensors"); 68 "Initialize the i2c address of the sensors");
69 69
70static int reset;
71module_param(reset, bool, 0);
72MODULE_PARM_DESC(reset, "Set to one to reset chip on load");
73
74static int init = 1; 70static int init = 1;
75module_param(init, bool, 0); 71module_param(init, bool, 0);
76MODULE_PARM_DESC(init, "Set to zero to bypass chip initialization"); 72MODULE_PARM_DESC(init, "Set to zero to bypass chip initialization");
@@ -209,6 +205,13 @@ static const u16 w83627hf_reg_temp_over[] = { 0x39, 0x155, 0x255 };
209#define W83627HF_REG_PWM1 0x5A 205#define W83627HF_REG_PWM1 0x5A
210#define W83627HF_REG_PWM2 0x5B 206#define W83627HF_REG_PWM2 0x5B
211 207
208static const u8 W83627THF_REG_PWM_ENABLE[] = {
209 0x04, /* FAN 1 mode */
210 0x04, /* FAN 2 mode */
211 0x12, /* FAN AUX mode */
212};
213static const u8 W83627THF_PWM_ENABLE_SHIFT[] = { 2, 4, 1 };
214
212#define W83627THF_REG_PWM1 0x01 /* 697HF/637HF/687THF too */ 215#define W83627THF_REG_PWM1 0x01 /* 697HF/637HF/687THF too */
213#define W83627THF_REG_PWM2 0x03 /* 697HF/637HF/687THF too */ 216#define W83627THF_REG_PWM2 0x03 /* 697HF/637HF/687THF too */
214#define W83627THF_REG_PWM3 0x11 /* 637HF/687THF too */ 217#define W83627THF_REG_PWM3 0x11 /* 637HF/687THF too */
@@ -366,6 +369,9 @@ struct w83627hf_data {
366 u32 alarms; /* Register encoding, combined */ 369 u32 alarms; /* Register encoding, combined */
367 u32 beep_mask; /* Register encoding, combined */ 370 u32 beep_mask; /* Register encoding, combined */
368 u8 pwm[3]; /* Register value */ 371 u8 pwm[3]; /* Register value */
372 u8 pwm_enable[3]; /* 1 = manual
373 2 = thermal cruise (also called SmartFan I)
374 3 = fan speed cruise */
369 u8 pwm_freq[3]; /* Register value */ 375 u8 pwm_freq[3]; /* Register value */
370 u16 sens[3]; /* 1 = pentium diode; 2 = 3904 diode; 376 u16 sens[3]; /* 1 = pentium diode; 2 = 3904 diode;
371 4 = thermistor */ 377 4 = thermistor */
@@ -957,6 +963,42 @@ static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO|S_IWUSR, show_pwm, store_pwm, 1);
957static SENSOR_DEVICE_ATTR(pwm3, S_IRUGO|S_IWUSR, show_pwm, store_pwm, 2); 963static SENSOR_DEVICE_ATTR(pwm3, S_IRUGO|S_IWUSR, show_pwm, store_pwm, 2);
958 964
959static ssize_t 965static ssize_t
966show_pwm_enable(struct device *dev, struct device_attribute *devattr, char *buf)
967{
968 int nr = to_sensor_dev_attr(devattr)->index;
969 struct w83627hf_data *data = w83627hf_update_device(dev);
970 return sprintf(buf, "%d\n", data->pwm_enable[nr]);
971}
972
973static ssize_t
974store_pwm_enable(struct device *dev, struct device_attribute *devattr,
975 const char *buf, size_t count)
976{
977 int nr = to_sensor_dev_attr(devattr)->index;
978 struct w83627hf_data *data = dev_get_drvdata(dev);
979 unsigned long val = simple_strtoul(buf, NULL, 10);
980 u8 reg;
981
982 if (!val || (val > 3)) /* modes 1, 2 and 3 are supported */
983 return -EINVAL;
984 mutex_lock(&data->update_lock);
985 data->pwm_enable[nr] = val;
986 reg = w83627hf_read_value(data, W83627THF_REG_PWM_ENABLE[nr]);
987 reg &= ~(0x03 << W83627THF_PWM_ENABLE_SHIFT[nr]);
988 reg |= (val - 1) << W83627THF_PWM_ENABLE_SHIFT[nr];
989 w83627hf_write_value(data, W83627THF_REG_PWM_ENABLE[nr], reg);
990 mutex_unlock(&data->update_lock);
991 return count;
992}
993
994static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO|S_IWUSR, show_pwm_enable,
995 store_pwm_enable, 0);
996static SENSOR_DEVICE_ATTR(pwm2_enable, S_IRUGO|S_IWUSR, show_pwm_enable,
997 store_pwm_enable, 1);
998static SENSOR_DEVICE_ATTR(pwm3_enable, S_IRUGO|S_IWUSR, show_pwm_enable,
999 store_pwm_enable, 2);
1000
1001static ssize_t
960show_pwm_freq(struct device *dev, struct device_attribute *devattr, char *buf) 1002show_pwm_freq(struct device *dev, struct device_attribute *devattr, char *buf)
961{ 1003{
962 int nr = to_sensor_dev_attr(devattr)->index; 1004 int nr = to_sensor_dev_attr(devattr)->index;
@@ -1223,6 +1265,11 @@ static struct attribute *w83627hf_attributes_opt[] = {
1223 &sensor_dev_attr_pwm1_freq.dev_attr.attr, 1265 &sensor_dev_attr_pwm1_freq.dev_attr.attr,
1224 &sensor_dev_attr_pwm2_freq.dev_attr.attr, 1266 &sensor_dev_attr_pwm2_freq.dev_attr.attr,
1225 &sensor_dev_attr_pwm3_freq.dev_attr.attr, 1267 &sensor_dev_attr_pwm3_freq.dev_attr.attr,
1268
1269 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
1270 &sensor_dev_attr_pwm2_enable.dev_attr.attr,
1271 &sensor_dev_attr_pwm3_enable.dev_attr.attr,
1272
1226 NULL 1273 NULL
1227}; 1274};
1228 1275
@@ -1366,6 +1413,19 @@ static int __devinit w83627hf_probe(struct platform_device *pdev)
1366 &sensor_dev_attr_pwm3_freq.dev_attr))) 1413 &sensor_dev_attr_pwm3_freq.dev_attr)))
1367 goto ERROR4; 1414 goto ERROR4;
1368 1415
1416 if (data->type != w83627hf)
1417 if ((err = device_create_file(dev,
1418 &sensor_dev_attr_pwm1_enable.dev_attr))
1419 || (err = device_create_file(dev,
1420 &sensor_dev_attr_pwm2_enable.dev_attr)))
1421 goto ERROR4;
1422
1423 if (data->type == w83627thf || data->type == w83637hf
1424 || data->type == w83687thf)
1425 if ((err = device_create_file(dev,
1426 &sensor_dev_attr_pwm3_enable.dev_attr)))
1427 goto ERROR4;
1428
1369 data->hwmon_dev = hwmon_device_register(dev); 1429 data->hwmon_dev = hwmon_device_register(dev);
1370 if (IS_ERR(data->hwmon_dev)) { 1430 if (IS_ERR(data->hwmon_dev)) {
1371 err = PTR_ERR(data->hwmon_dev); 1431 err = PTR_ERR(data->hwmon_dev);
@@ -1536,29 +1596,6 @@ static void __devinit w83627hf_init_device(struct platform_device *pdev)
1536 enum chips type = data->type; 1596 enum chips type = data->type;
1537 u8 tmp; 1597 u8 tmp;
1538 1598
1539 if (reset) {
1540 /* Resetting the chip has been the default for a long time,
1541 but repeatedly caused problems (fans going to full
1542 speed...) so it is now optional. It might even go away if
1543 nobody reports it as being useful, as I see very little
1544 reason why this would be needed at all. */
1545 dev_info(&pdev->dev, "If reset=1 solved a problem you were "
1546 "having, please report!\n");
1547
1548 /* save this register */
1549 i = w83627hf_read_value(data, W83781D_REG_BEEP_CONFIG);
1550 /* Reset all except Watchdog values and last conversion values
1551 This sets fan-divs to 2, among others */
1552 w83627hf_write_value(data, W83781D_REG_CONFIG, 0x80);
1553 /* Restore the register and disable power-on abnormal beep.
1554 This saves FAN 1/2/3 input/output values set by BIOS. */
1555 w83627hf_write_value(data, W83781D_REG_BEEP_CONFIG, i | 0x80);
1556 /* Disable master beep-enable (reset turns it on).
1557 Individual beeps should be reset to off but for some reason
1558 disabling this bit helps some people not get beeped */
1559 w83627hf_write_value(data, W83781D_REG_BEEP_INTS2, 0);
1560 }
1561
1562 /* Minimize conflicts with other winbond i2c-only clients... */ 1599 /* Minimize conflicts with other winbond i2c-only clients... */
1563 /* disable i2c subclients... how to disable main i2c client?? */ 1600 /* disable i2c subclients... how to disable main i2c client?? */
1564 /* force i2c address to relatively uncommon address */ 1601 /* force i2c address to relatively uncommon address */
@@ -1655,6 +1692,7 @@ static struct w83627hf_data *w83627hf_update_device(struct device *dev)
1655{ 1692{
1656 struct w83627hf_data *data = dev_get_drvdata(dev); 1693 struct w83627hf_data *data = dev_get_drvdata(dev);
1657 int i, num_temps = (data->type == w83697hf) ? 2 : 3; 1694 int i, num_temps = (data->type == w83697hf) ? 2 : 3;
1695 int num_pwms = (data->type == w83697hf) ? 2 : 3;
1658 1696
1659 mutex_lock(&data->update_lock); 1697 mutex_lock(&data->update_lock);
1660 1698
@@ -1707,6 +1745,15 @@ static struct w83627hf_data *w83627hf_update_device(struct device *dev)
1707 break; 1745 break;
1708 } 1746 }
1709 } 1747 }
1748 if (data->type != w83627hf) {
1749 for (i = 0; i < num_pwms; i++) {
1750 u8 tmp = w83627hf_read_value(data,
1751 W83627THF_REG_PWM_ENABLE[i]);
1752 data->pwm_enable[i] =
1753 ((tmp >> W83627THF_PWM_ENABLE_SHIFT[i])
1754 & 0x03) + 1;
1755 }
1756 }
1710 for (i = 0; i < num_temps; i++) { 1757 for (i = 0; i < num_temps; i++) {
1711 data->temp[i] = w83627hf_read_value( 1758 data->temp[i] = w83627hf_read_value(
1712 data, w83627hf_reg_temp[i]); 1759 data, w83627hf_reg_temp[i]);
diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
index e4e91c9d480a..daa7d121483b 100644
--- a/drivers/hwmon/w83791d.c
+++ b/drivers/hwmon/w83791d.c
@@ -233,11 +233,9 @@ static u8 fan_to_reg(long rpm, int div)
233static u8 div_to_reg(int nr, long val) 233static u8 div_to_reg(int nr, long val)
234{ 234{
235 int i; 235 int i;
236 int max;
237 236
238 /* first three fan's divisor max out at 8, rest max out at 128 */ 237 /* fan divisors max out at 128 */
239 max = (nr < 3) ? 8 : 128; 238 val = SENSORS_LIMIT(val, 1, 128) >> 1;
240 val = SENSORS_LIMIT(val, 1, max) >> 1;
241 for (i = 0; i < 7; i++) { 239 for (i = 0; i < 7; i++) {
242 if (val == 0) 240 if (val == 0)
243 break; 241 break;
@@ -530,6 +528,7 @@ static ssize_t store_fan_div(struct device *dev, struct device_attribute *attr,
530 unsigned long min; 528 unsigned long min;
531 u8 tmp_fan_div; 529 u8 tmp_fan_div;
532 u8 fan_div_reg; 530 u8 fan_div_reg;
531 u8 vbat_reg;
533 int indx = 0; 532 int indx = 0;
534 u8 keep_mask = 0; 533 u8 keep_mask = 0;
535 u8 new_shift = 0; 534 u8 new_shift = 0;
@@ -581,6 +580,16 @@ static ssize_t store_fan_div(struct device *dev, struct device_attribute *attr,
581 w83791d_write(client, W83791D_REG_FAN_DIV[indx], 580 w83791d_write(client, W83791D_REG_FAN_DIV[indx],
582 fan_div_reg | tmp_fan_div); 581 fan_div_reg | tmp_fan_div);
583 582
583 /* Bit 2 of fans 0-2 is stored in the vbat register (bits 5-7) */
584 if (nr < 3) {
585 keep_mask = ~(1 << (nr + 5));
586 vbat_reg = w83791d_read(client, W83791D_REG_VBAT)
587 & keep_mask;
588 tmp_fan_div = (data->fan_div[nr] << (3 + nr)) & ~keep_mask;
589 w83791d_write(client, W83791D_REG_VBAT,
590 vbat_reg | tmp_fan_div);
591 }
592
584 /* Restore fan_min */ 593 /* Restore fan_min */
585 data->fan_min[nr] = fan_to_reg(min, DIV_FROM_REG(data->fan_div[nr])); 594 data->fan_min[nr] = fan_to_reg(min, DIV_FROM_REG(data->fan_div[nr]));
586 w83791d_write(client, W83791D_REG_FAN_MIN[nr], data->fan_min[nr]); 595 w83791d_write(client, W83791D_REG_FAN_MIN[nr], data->fan_min[nr]);
@@ -1182,6 +1191,7 @@ static struct w83791d_data *w83791d_update_device(struct device *dev)
1182 struct w83791d_data *data = i2c_get_clientdata(client); 1191 struct w83791d_data *data = i2c_get_clientdata(client);
1183 int i, j; 1192 int i, j;
1184 u8 reg_array_tmp[3]; 1193 u8 reg_array_tmp[3];
1194 u8 vbat_reg;
1185 1195
1186 mutex_lock(&data->update_lock); 1196 mutex_lock(&data->update_lock);
1187 1197
@@ -1219,6 +1229,12 @@ static struct w83791d_data *w83791d_update_device(struct device *dev)
1219 data->fan_div[3] = reg_array_tmp[2] & 0x07; 1229 data->fan_div[3] = reg_array_tmp[2] & 0x07;
1220 data->fan_div[4] = (reg_array_tmp[2] >> 4) & 0x07; 1230 data->fan_div[4] = (reg_array_tmp[2] >> 4) & 0x07;
1221 1231
1232 /* The fan divisor for fans 0-2 get bit 2 from
1233 bits 5-7 respectively of vbat register */
1234 vbat_reg = w83791d_read(client, W83791D_REG_VBAT);
1235 for (i = 0; i < 3; i++)
1236 data->fan_div[i] |= (vbat_reg >> (3 + i)) & 0x04;
1237
1222 /* Update the first temperature sensor */ 1238 /* Update the first temperature sensor */
1223 for (i = 0; i < 3; i++) { 1239 for (i = 0; i < 3; i++) {
1224 data->temp1[i] = w83791d_read(client, 1240 data->temp1[i] = w83791d_read(client,
diff --git a/drivers/i2c/busses/i2c-acorn.c b/drivers/i2c/busses/i2c-acorn.c
index 7c2be3558a24..75089febbc13 100644
--- a/drivers/i2c/busses/i2c-acorn.c
+++ b/drivers/i2c/busses/i2c-acorn.c
@@ -16,7 +16,7 @@
16#include <linux/i2c.h> 16#include <linux/i2c.h>
17#include <linux/i2c-algo-bit.h> 17#include <linux/i2c-algo-bit.h>
18 18
19#include <asm/hardware.h> 19#include <mach/hardware.h>
20#include <asm/io.h> 20#include <asm/io.h>
21#include <asm/hardware/ioc.h> 21#include <asm/hardware/ioc.h>
22#include <asm/system.h> 22#include <asm/system.h>
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index 73d61946a534..c1adcdbf7979 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -27,9 +27,9 @@
27 27
28#include <asm/io.h> 28#include <asm/io.h>
29 29
30#include <asm/arch/at91_twi.h> 30#include <mach/at91_twi.h>
31#include <asm/arch/board.h> 31#include <mach/board.h>
32#include <asm/arch/cpu.h> 32#include <mach/cpu.h>
33 33
34#define TWI_CLOCK 100000 /* Hz. max 400 Kbits/sec */ 34#define TWI_CLOCK 100000 /* Hz. max 400 Kbits/sec */
35 35
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index af3846eda985..5d7789834b95 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -36,10 +36,9 @@
36#include <linux/platform_device.h> 36#include <linux/platform_device.h>
37#include <linux/io.h> 37#include <linux/io.h>
38 38
39#include <asm/hardware.h> 39#include <mach/hardware.h>
40#include <asm/mach-types.h>
41 40
42#include <asm/arch/i2c.h> 41#include <mach/i2c.h>
43 42
44/* ----- global defines ----------------------------------------------- */ 43/* ----- global defines ----------------------------------------------- */
45 44
diff --git a/drivers/i2c/busses/i2c-ixp2000.c b/drivers/i2c/busses/i2c-ixp2000.c
index 5af9e6521e6c..05d72e981353 100644
--- a/drivers/i2c/busses/i2c-ixp2000.c
+++ b/drivers/i2c/busses/i2c-ixp2000.c
@@ -33,8 +33,8 @@
33#include <linux/i2c.h> 33#include <linux/i2c.h>
34#include <linux/i2c-algo-bit.h> 34#include <linux/i2c-algo-bit.h>
35 35
36#include <asm/hardware.h> /* Pick up IXP2000-specific bits */ 36#include <mach/hardware.h> /* Pick up IXP2000-specific bits */
37#include <asm/arch/gpio.h> 37#include <mach/gpio.h>
38 38
39static inline int ixp2000_scl_pin(void *data) 39static inline int ixp2000_scl_pin(void *data)
40{ 40{
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
index 1ca21084ffcf..ec15cff556b9 100644
--- a/drivers/i2c/busses/i2c-pnx.c
+++ b/drivers/i2c/busses/i2c-pnx.c
@@ -19,7 +19,7 @@
19#include <linux/completion.h> 19#include <linux/completion.h>
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/i2c-pnx.h> 21#include <linux/i2c-pnx.h>
22#include <asm/hardware.h> 22#include <mach/hardware.h>
23#include <asm/irq.h> 23#include <asm/irq.h>
24#include <asm/uaccess.h> 24#include <asm/uaccess.h>
25 25
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index af9e6034d7fb..44d838410f15 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -34,11 +34,11 @@
34#include <linux/err.h> 34#include <linux/err.h>
35#include <linux/clk.h> 35#include <linux/clk.h>
36 36
37#include <asm/hardware.h> 37#include <mach/hardware.h>
38#include <asm/irq.h> 38#include <asm/irq.h>
39#include <asm/io.h> 39#include <asm/io.h>
40#include <asm/arch/i2c.h> 40#include <mach/i2c.h>
41#include <asm/arch/pxa-regs.h> 41#include <mach/pxa-regs.h>
42 42
43struct pxa_i2c { 43struct pxa_i2c {
44 spinlock_t lock; 44 spinlock_t lock;
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 4864723c7425..c772e02c2803 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -35,11 +35,11 @@
35#include <linux/clk.h> 35#include <linux/clk.h>
36#include <linux/cpufreq.h> 36#include <linux/cpufreq.h>
37 37
38#include <asm/hardware.h> 38#include <mach/hardware.h>
39#include <asm/irq.h> 39#include <asm/irq.h>
40#include <asm/io.h> 40#include <asm/io.h>
41 41
42#include <asm/arch/regs-gpio.h> 42#include <mach/regs-gpio.h>
43#include <asm/plat-s3c/regs-iic.h> 43#include <asm/plat-s3c/regs-iic.h>
44#include <asm/plat-s3c/iic.h> 44#include <asm/plat-s3c/iic.h>
45 45
diff --git a/drivers/i2c/chips/isp1301_omap.c b/drivers/i2c/chips/isp1301_omap.c
index 03a33f1b9cd3..18355ae2155d 100644
--- a/drivers/i2c/chips/isp1301_omap.c
+++ b/drivers/i2c/chips/isp1301_omap.c
@@ -33,7 +33,7 @@
33#include <linux/workqueue.h> 33#include <linux/workqueue.h>
34 34
35#include <asm/irq.h> 35#include <asm/irq.h>
36#include <asm/arch/usb.h> 36#include <mach/usb.h>
37 37
38 38
39#ifndef DEBUG 39#ifndef DEBUG
@@ -94,7 +94,7 @@ struct isp1301 {
94/* board-specific PM hooks */ 94/* board-specific PM hooks */
95 95
96#include <asm/gpio.h> 96#include <asm/gpio.h>
97#include <asm/arch/mux.h> 97#include <mach/mux.h>
98#include <asm/mach-types.h> 98#include <asm/mach-types.h>
99 99
100 100
diff --git a/drivers/i2c/chips/menelaus.c b/drivers/i2c/chips/menelaus.c
index b36db1797c11..176126d3a01d 100644
--- a/drivers/i2c/chips/menelaus.c
+++ b/drivers/i2c/chips/menelaus.c
@@ -41,11 +41,10 @@
41#include <linux/rtc.h> 41#include <linux/rtc.h>
42#include <linux/bcd.h> 42#include <linux/bcd.h>
43 43
44#include <asm/mach-types.h>
45#include <asm/mach/irq.h> 44#include <asm/mach/irq.h>
46 45
47#include <asm/arch/gpio.h> 46#include <mach/gpio.h>
48#include <asm/arch/menelaus.h> 47#include <mach/menelaus.h>
49 48
50#define DRIVER_NAME "menelaus" 49#define DRIVER_NAME "menelaus"
51 50
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 130ef64b44f7..a34758d29516 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -54,16 +54,6 @@ menuconfig IDE
54 54
55if IDE 55if IDE
56 56
57config IDE_MAX_HWIFS
58 int "Max IDE interfaces"
59 depends on ALPHA || SUPERH || IA64 || EMBEDDED
60 range 1 10
61 default 4
62 help
63 This is the maximum number of IDE hardware interfaces that will
64 be supported by the driver. Make sure it is at least as high as
65 the number of IDE interfaces in your system.
66
67config BLK_DEV_IDE 57config BLK_DEV_IDE
68 tristate "Enhanced IDE/MFM/RLL disk/cdrom/tape/floppy support" 58 tristate "Enhanced IDE/MFM/RLL disk/cdrom/tape/floppy support"
69 ---help--- 59 ---help---
diff --git a/drivers/ide/arm/ide_arm.c b/drivers/ide/arm/ide_arm.c
index 176532ffae0e..f728f2927b5a 100644
--- a/drivers/ide/arm/ide_arm.c
+++ b/drivers/ide/arm/ide_arm.c
@@ -11,13 +11,12 @@
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/ide.h> 12#include <linux/ide.h>
13 13
14#include <asm/mach-types.h>
15#include <asm/irq.h> 14#include <asm/irq.h>
16 15
17#define DRV_NAME "ide_arm" 16#define DRV_NAME "ide_arm"
18 17
19#ifdef CONFIG_ARCH_CLPS7500 18#ifdef CONFIG_ARCH_CLPS7500
20# include <asm/arch/hardware.h> 19# include <mach/hardware.h>
21# 20#
22# define IDE_ARM_IO (ISASLOT_IO + 0x1f0) 21# define IDE_ARM_IO (ISASLOT_IO + 0x1f0)
23# define IDE_ARM_IRQ IRQ_ISA_14 22# define IDE_ARM_IRQ IRQ_ISA_14
diff --git a/drivers/ide/arm/palm_bk3710.c b/drivers/ide/arm/palm_bk3710.c
index 3e842d60eae9..f788fa5a977b 100644
--- a/drivers/ide/arm/palm_bk3710.c
+++ b/drivers/ide/arm/palm_bk3710.c
@@ -309,7 +309,7 @@ static void __devinit palm_bk3710_chipinit(void __iomem *base)
309 palm_bk3710_setpiomode(base, NULL, 1, 600, 0); 309 palm_bk3710_setpiomode(base, NULL, 1, 600, 0);
310} 310}
311 311
312static u8 __devinit palm_bk3710_cable_detect(ide_hwif_t *hwif) 312static u8 palm_bk3710_cable_detect(ide_hwif_t *hwif)
313{ 313{
314 return ATA_CBL_PATA80; 314 return ATA_CBL_PATA80;
315} 315}
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index e617cf08aef6..89a112d513ad 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -66,11 +66,11 @@ static struct cdrom_info *ide_cd_get(struct gendisk *disk)
66 mutex_lock(&idecd_ref_mutex); 66 mutex_lock(&idecd_ref_mutex);
67 cd = ide_cd_g(disk); 67 cd = ide_cd_g(disk);
68 if (cd) { 68 if (cd) {
69 kref_get(&cd->kref); 69 if (ide_device_get(cd->drive))
70 if (ide_device_get(cd->drive)) {
71 kref_put(&cd->kref, ide_cd_release);
72 cd = NULL; 70 cd = NULL;
73 } 71 else
72 kref_get(&cd->kref);
73
74 } 74 }
75 mutex_unlock(&idecd_ref_mutex); 75 mutex_unlock(&idecd_ref_mutex);
76 return cd; 76 return cd;
@@ -78,9 +78,11 @@ static struct cdrom_info *ide_cd_get(struct gendisk *disk)
78 78
79static void ide_cd_put(struct cdrom_info *cd) 79static void ide_cd_put(struct cdrom_info *cd)
80{ 80{
81 ide_drive_t *drive = cd->drive;
82
81 mutex_lock(&idecd_ref_mutex); 83 mutex_lock(&idecd_ref_mutex);
82 ide_device_put(cd->drive);
83 kref_put(&cd->kref, ide_cd_release); 84 kref_put(&cd->kref, ide_cd_release);
85 ide_device_put(drive);
84 mutex_unlock(&idecd_ref_mutex); 86 mutex_unlock(&idecd_ref_mutex);
85} 87}
86 88
@@ -1305,6 +1307,7 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
1305 int stat; 1307 int stat;
1306 unsigned char cmd[BLK_MAX_CDB]; 1308 unsigned char cmd[BLK_MAX_CDB];
1307 unsigned len = sizeof(capbuf); 1309 unsigned len = sizeof(capbuf);
1310 u32 blocklen;
1308 1311
1309 memset(cmd, 0, BLK_MAX_CDB); 1312 memset(cmd, 0, BLK_MAX_CDB);
1310 cmd[0] = GPCMD_READ_CDVD_CAPACITY; 1313 cmd[0] = GPCMD_READ_CDVD_CAPACITY;
@@ -1317,23 +1320,24 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
1317 /* 1320 /*
1318 * Sanity check the given block size 1321 * Sanity check the given block size
1319 */ 1322 */
1320 switch (capbuf.blocklen) { 1323 blocklen = be32_to_cpu(capbuf.blocklen);
1321 case __constant_cpu_to_be32(512): 1324 switch (blocklen) {
1322 case __constant_cpu_to_be32(1024): 1325 case 512:
1323 case __constant_cpu_to_be32(2048): 1326 case 1024:
1324 case __constant_cpu_to_be32(4096): 1327 case 2048:
1328 case 4096:
1325 break; 1329 break;
1326 default: 1330 default:
1327 printk(KERN_ERR "%s: weird block size %u\n", 1331 printk(KERN_ERR "%s: weird block size %u\n",
1328 drive->name, capbuf.blocklen); 1332 drive->name, blocklen);
1329 printk(KERN_ERR "%s: default to 2kb block size\n", 1333 printk(KERN_ERR "%s: default to 2kb block size\n",
1330 drive->name); 1334 drive->name);
1331 capbuf.blocklen = __constant_cpu_to_be32(2048); 1335 blocklen = 2048;
1332 break; 1336 break;
1333 } 1337 }
1334 1338
1335 *capacity = 1 + be32_to_cpu(capbuf.lba); 1339 *capacity = 1 + be32_to_cpu(capbuf.lba);
1336 *sectors_per_frame = be32_to_cpu(capbuf.blocklen) >> SECTOR_BITS; 1340 *sectors_per_frame = blocklen >> SECTOR_BITS;
1337 return 0; 1341 return 0;
1338} 1342}
1339 1343
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 28d85b410f7c..68b9cf0138b0 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -65,11 +65,10 @@ static struct ide_disk_obj *ide_disk_get(struct gendisk *disk)
65 mutex_lock(&idedisk_ref_mutex); 65 mutex_lock(&idedisk_ref_mutex);
66 idkp = ide_disk_g(disk); 66 idkp = ide_disk_g(disk);
67 if (idkp) { 67 if (idkp) {
68 kref_get(&idkp->kref); 68 if (ide_device_get(idkp->drive))
69 if (ide_device_get(idkp->drive)) {
70 kref_put(&idkp->kref, ide_disk_release);
71 idkp = NULL; 69 idkp = NULL;
72 } 70 else
71 kref_get(&idkp->kref);
73 } 72 }
74 mutex_unlock(&idedisk_ref_mutex); 73 mutex_unlock(&idedisk_ref_mutex);
75 return idkp; 74 return idkp;
@@ -77,9 +76,11 @@ static struct ide_disk_obj *ide_disk_get(struct gendisk *disk)
77 76
78static void ide_disk_put(struct ide_disk_obj *idkp) 77static void ide_disk_put(struct ide_disk_obj *idkp)
79{ 78{
79 ide_drive_t *drive = idkp->drive;
80
80 mutex_lock(&idedisk_ref_mutex); 81 mutex_lock(&idedisk_ref_mutex);
81 ide_device_put(idkp->drive);
82 kref_put(&idkp->kref, ide_disk_release); 82 kref_put(&idkp->kref, ide_disk_release);
83 ide_device_put(drive);
83 mutex_unlock(&idedisk_ref_mutex); 84 mutex_unlock(&idedisk_ref_mutex);
84} 85}
85 86
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index 71c377a7bcf2..adc682755857 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -649,11 +649,7 @@ static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base, u8 req_mode)
649 if (id->field_valid & 2) { 649 if (id->field_valid & 2) {
650 mask = id->dma_1word & hwif->swdma_mask; 650 mask = id->dma_1word & hwif->swdma_mask;
651 } else if (id->tDMA) { 651 } else if (id->tDMA) {
652 /* 652 u8 mode = id->tDMA;
653 * ide_fix_driveid() doesn't convert ->tDMA to the
654 * CPU endianness so we need to do it here
655 */
656 u8 mode = le16_to_cpu(id->tDMA);
657 653
658 /* 654 /*
659 * if the mode is valid convert it to the mask 655 * if the mode is valid convert it to the mask
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index ca11a26746f1..e9034c0125f3 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -167,11 +167,10 @@ static struct ide_floppy_obj *ide_floppy_get(struct gendisk *disk)
167 mutex_lock(&idefloppy_ref_mutex); 167 mutex_lock(&idefloppy_ref_mutex);
168 floppy = ide_floppy_g(disk); 168 floppy = ide_floppy_g(disk);
169 if (floppy) { 169 if (floppy) {
170 kref_get(&floppy->kref); 170 if (ide_device_get(floppy->drive))
171 if (ide_device_get(floppy->drive)) {
172 kref_put(&floppy->kref, idefloppy_cleanup_obj);
173 floppy = NULL; 171 floppy = NULL;
174 } 172 else
173 kref_get(&floppy->kref);
175 } 174 }
176 mutex_unlock(&idefloppy_ref_mutex); 175 mutex_unlock(&idefloppy_ref_mutex);
177 return floppy; 176 return floppy;
@@ -179,9 +178,11 @@ static struct ide_floppy_obj *ide_floppy_get(struct gendisk *disk)
179 178
180static void ide_floppy_put(struct ide_floppy_obj *floppy) 179static void ide_floppy_put(struct ide_floppy_obj *floppy)
181{ 180{
181 ide_drive_t *drive = floppy->drive;
182
182 mutex_lock(&idefloppy_ref_mutex); 183 mutex_lock(&idefloppy_ref_mutex);
183 ide_device_put(floppy->drive);
184 kref_put(&floppy->kref, idefloppy_cleanup_obj); 184 kref_put(&floppy->kref, idefloppy_cleanup_obj);
185 ide_device_put(drive);
185 mutex_unlock(&idefloppy_ref_mutex); 186 mutex_unlock(&idefloppy_ref_mutex);
186} 187}
187 188
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index 8aae91764513..2cbadffe922e 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -484,11 +484,11 @@ void ide_fix_driveid (struct hd_driveid *id)
484 for (i = 0; i < 3; i++) 484 for (i = 0; i < 3; i++)
485 id->words157_159[i] = __le16_to_cpu(id->words157_159[i]); 485 id->words157_159[i] = __le16_to_cpu(id->words157_159[i]);
486 id->cfa_power = __le16_to_cpu(id->cfa_power); 486 id->cfa_power = __le16_to_cpu(id->cfa_power);
487 for (i = 0; i < 14; i++) 487 for (i = 0; i < 15; i++)
488 id->words161_175[i] = __le16_to_cpu(id->words161_175[i]); 488 id->words161_175[i] = __le16_to_cpu(id->words161_175[i]);
489 for (i = 0; i < 31; i++) 489 for (i = 0; i < 30; i++)
490 id->words176_205[i] = __le16_to_cpu(id->words176_205[i]); 490 id->words176_205[i] = __le16_to_cpu(id->words176_205[i]);
491 for (i = 0; i < 48; i++) 491 for (i = 0; i < 49; i++)
492 id->words206_254[i] = __le16_to_cpu(id->words206_254[i]); 492 id->words206_254[i] = __le16_to_cpu(id->words206_254[i]);
493 id->integrity_word = __le16_to_cpu(id->integrity_word); 493 id->integrity_word = __le16_to_cpu(id->integrity_word);
494# else 494# else
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 82c2afe4d28a..1bce84b56630 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -331,11 +331,10 @@ static struct ide_tape_obj *ide_tape_get(struct gendisk *disk)
331 mutex_lock(&idetape_ref_mutex); 331 mutex_lock(&idetape_ref_mutex);
332 tape = ide_tape_g(disk); 332 tape = ide_tape_g(disk);
333 if (tape) { 333 if (tape) {
334 kref_get(&tape->kref); 334 if (ide_device_get(tape->drive))
335 if (ide_device_get(tape->drive)) {
336 kref_put(&tape->kref, ide_tape_release);
337 tape = NULL; 335 tape = NULL;
338 } 336 else
337 kref_get(&tape->kref);
339 } 338 }
340 mutex_unlock(&idetape_ref_mutex); 339 mutex_unlock(&idetape_ref_mutex);
341 return tape; 340 return tape;
@@ -343,9 +342,11 @@ static struct ide_tape_obj *ide_tape_get(struct gendisk *disk)
343 342
344static void ide_tape_put(struct ide_tape_obj *tape) 343static void ide_tape_put(struct ide_tape_obj *tape)
345{ 344{
345 ide_drive_t *drive = tape->drive;
346
346 mutex_lock(&idetape_ref_mutex); 347 mutex_lock(&idetape_ref_mutex);
347 ide_device_put(tape->drive);
348 kref_put(&tape->kref, ide_tape_release); 348 kref_put(&tape->kref, ide_tape_release);
349 ide_device_put(drive);
349 mutex_unlock(&idetape_ref_mutex); 350 mutex_unlock(&idetape_ref_mutex);
350} 351}
351 352
diff --git a/drivers/ide/pci/aec62xx.c b/drivers/ide/pci/aec62xx.c
index e0c8fe7d9fea..40644b6f1c00 100644
--- a/drivers/ide/pci/aec62xx.c
+++ b/drivers/ide/pci/aec62xx.c
@@ -160,7 +160,7 @@ static unsigned int __devinit init_chipset_aec62xx(struct pci_dev *dev)
160 return dev->irq; 160 return dev->irq;
161} 161}
162 162
163static u8 __devinit atp86x_cable_detect(ide_hwif_t *hwif) 163static u8 atp86x_cable_detect(ide_hwif_t *hwif)
164{ 164{
165 struct pci_dev *dev = to_pci_dev(hwif->dev); 165 struct pci_dev *dev = to_pci_dev(hwif->dev);
166 u8 ata66 = 0, mask = hwif->channel ? 0x02 : 0x01; 166 u8 ata66 = 0, mask = hwif->channel ? 0x02 : 0x01;
diff --git a/drivers/ide/pci/alim15x3.c b/drivers/ide/pci/alim15x3.c
index b582687e0cd4..d647526af557 100644
--- a/drivers/ide/pci/alim15x3.c
+++ b/drivers/ide/pci/alim15x3.c
@@ -371,7 +371,7 @@ static int ali_cable_override(struct pci_dev *pdev)
371 * FIXME: frobs bits that are not defined on newer ALi devicea 371 * FIXME: frobs bits that are not defined on newer ALi devicea
372 */ 372 */
373 373
374static u8 __devinit ali_cable_detect(ide_hwif_t *hwif) 374static u8 ali_cable_detect(ide_hwif_t *hwif)
375{ 375{
376 struct pci_dev *dev = to_pci_dev(hwif->dev); 376 struct pci_dev *dev = to_pci_dev(hwif->dev);
377 unsigned long flags; 377 unsigned long flags;
diff --git a/drivers/ide/pci/amd74xx.c b/drivers/ide/pci/amd74xx.c
index 2cea7bf51a0f..1e66a960a96a 100644
--- a/drivers/ide/pci/amd74xx.c
+++ b/drivers/ide/pci/amd74xx.c
@@ -175,7 +175,7 @@ static unsigned int __devinit init_chipset_amd74xx(struct pci_dev *dev)
175 return dev->irq; 175 return dev->irq;
176} 176}
177 177
178static u8 __devinit amd_cable_detect(ide_hwif_t *hwif) 178static u8 amd_cable_detect(ide_hwif_t *hwif)
179{ 179{
180 if ((amd_80w >> hwif->channel) & 1) 180 if ((amd_80w >> hwif->channel) & 1)
181 return ATA_CBL_PATA80; 181 return ATA_CBL_PATA80;
diff --git a/drivers/ide/pci/atiixp.c b/drivers/ide/pci/atiixp.c
index 332f08f43b56..41f6cb6c163a 100644
--- a/drivers/ide/pci/atiixp.c
+++ b/drivers/ide/pci/atiixp.c
@@ -119,7 +119,7 @@ static void atiixp_set_dma_mode(ide_drive_t *drive, const u8 speed)
119 spin_unlock_irqrestore(&atiixp_lock, flags); 119 spin_unlock_irqrestore(&atiixp_lock, flags);
120} 120}
121 121
122static u8 __devinit atiixp_cable_detect(ide_hwif_t *hwif) 122static u8 atiixp_cable_detect(ide_hwif_t *hwif)
123{ 123{
124 struct pci_dev *pdev = to_pci_dev(hwif->dev); 124 struct pci_dev *pdev = to_pci_dev(hwif->dev);
125 u8 udma_mode = 0, ch = hwif->channel; 125 u8 udma_mode = 0, ch = hwif->channel;
diff --git a/drivers/ide/pci/cmd64x.c b/drivers/ide/pci/cmd64x.c
index 1360b4fa9fd3..e064398e03b4 100644
--- a/drivers/ide/pci/cmd64x.c
+++ b/drivers/ide/pci/cmd64x.c
@@ -354,7 +354,7 @@ static unsigned int __devinit init_chipset_cmd64x(struct pci_dev *dev)
354 return 0; 354 return 0;
355} 355}
356 356
357static u8 __devinit cmd64x_cable_detect(ide_hwif_t *hwif) 357static u8 cmd64x_cable_detect(ide_hwif_t *hwif)
358{ 358{
359 struct pci_dev *dev = to_pci_dev(hwif->dev); 359 struct pci_dev *dev = to_pci_dev(hwif->dev);
360 u8 bmidecsr = 0, mask = hwif->channel ? 0x02 : 0x01; 360 u8 bmidecsr = 0, mask = hwif->channel ? 0x02 : 0x01;
diff --git a/drivers/ide/pci/cs5520.c b/drivers/ide/pci/cs5520.c
index c0364b287f17..151844fcbb07 100644
--- a/drivers/ide/pci/cs5520.c
+++ b/drivers/ide/pci/cs5520.c
@@ -96,6 +96,7 @@ static const struct ide_port_ops cs5520_port_ops = {
96 96
97static const struct ide_port_info cyrix_chipset __devinitdata = { 97static const struct ide_port_info cyrix_chipset __devinitdata = {
98 .name = DRV_NAME, 98 .name = DRV_NAME,
99 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
99 .port_ops = &cs5520_port_ops, 100 .port_ops = &cs5520_port_ops,
100 .host_flags = IDE_HFLAG_ISA_PORTS | IDE_HFLAG_CS5520, 101 .host_flags = IDE_HFLAG_ISA_PORTS | IDE_HFLAG_CS5520,
101 .pio_mask = ATA_PIO4, 102 .pio_mask = ATA_PIO4,
diff --git a/drivers/ide/pci/cs5535.c b/drivers/ide/pci/cs5535.c
index f7b50cdeefa6..dd3dc23af995 100644
--- a/drivers/ide/pci/cs5535.c
+++ b/drivers/ide/pci/cs5535.c
@@ -153,7 +153,7 @@ static void cs5535_set_pio_mode(ide_drive_t *drive, const u8 pio)
153 cs5535_set_speed(drive, XFER_PIO_0 + pio); 153 cs5535_set_speed(drive, XFER_PIO_0 + pio);
154} 154}
155 155
156static u8 __devinit cs5535_cable_detect(ide_hwif_t *hwif) 156static u8 cs5535_cable_detect(ide_hwif_t *hwif)
157{ 157{
158 struct pci_dev *dev = to_pci_dev(hwif->dev); 158 struct pci_dev *dev = to_pci_dev(hwif->dev);
159 u8 bit; 159 u8 bit;
diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/pci/hpt366.c
index 5271b246b88c..748793a413ab 100644
--- a/drivers/ide/pci/hpt366.c
+++ b/drivers/ide/pci/hpt366.c
@@ -1214,7 +1214,7 @@ static unsigned int __devinit init_chipset_hpt366(struct pci_dev *dev)
1214 return dev->irq; 1214 return dev->irq;
1215} 1215}
1216 1216
1217static u8 __devinit hpt3xx_cable_detect(ide_hwif_t *hwif) 1217static u8 hpt3xx_cable_detect(ide_hwif_t *hwif)
1218{ 1218{
1219 struct pci_dev *dev = to_pci_dev(hwif->dev); 1219 struct pci_dev *dev = to_pci_dev(hwif->dev);
1220 struct ide_host *host = pci_get_drvdata(dev); 1220 struct ide_host *host = pci_get_drvdata(dev);
diff --git a/drivers/ide/pci/it8213.c b/drivers/ide/pci/it8213.c
index 6eba8f188264..652e47dd7e89 100644
--- a/drivers/ide/pci/it8213.c
+++ b/drivers/ide/pci/it8213.c
@@ -141,7 +141,7 @@ static void it8213_set_dma_mode(ide_drive_t *drive, const u8 speed)
141 } 141 }
142} 142}
143 143
144static u8 __devinit it8213_cable_detect(ide_hwif_t *hwif) 144static u8 it8213_cable_detect(ide_hwif_t *hwif)
145{ 145{
146 struct pci_dev *dev = to_pci_dev(hwif->dev); 146 struct pci_dev *dev = to_pci_dev(hwif->dev);
147 u8 reg42h = 0; 147 u8 reg42h = 0;
diff --git a/drivers/ide/pci/it821x.c b/drivers/ide/pci/it821x.c
index e16a1d113a2a..b6dc723de702 100644
--- a/drivers/ide/pci/it821x.c
+++ b/drivers/ide/pci/it821x.c
@@ -428,7 +428,7 @@ static void it821x_set_dma_mode(ide_drive_t *drive, const u8 speed)
428 * the needed logic onboard. 428 * the needed logic onboard.
429 */ 429 */
430 430
431static u8 __devinit it821x_cable_detect(ide_hwif_t *hwif) 431static u8 it821x_cable_detect(ide_hwif_t *hwif)
432{ 432{
433 /* The reference driver also only does disk side */ 433 /* The reference driver also only does disk side */
434 return ATA_CBL_PATA80; 434 return ATA_CBL_PATA80;
@@ -443,7 +443,7 @@ static u8 __devinit it821x_cable_detect(ide_hwif_t *hwif)
443 * final tuning that is needed, or fixups to work around bugs. 443 * final tuning that is needed, or fixups to work around bugs.
444 */ 444 */
445 445
446static void __devinit it821x_quirkproc(ide_drive_t *drive) 446static void it821x_quirkproc(ide_drive_t *drive)
447{ 447{
448 struct it821x_dev *itdev = ide_get_hwifdata(drive->hwif); 448 struct it821x_dev *itdev = ide_get_hwifdata(drive->hwif);
449 struct hd_driveid *id = drive->id; 449 struct hd_driveid *id = drive->id;
diff --git a/drivers/ide/pci/jmicron.c b/drivers/ide/pci/jmicron.c
index 545b6e172d9b..bb9d09d8f196 100644
--- a/drivers/ide/pci/jmicron.c
+++ b/drivers/ide/pci/jmicron.c
@@ -27,7 +27,7 @@ typedef enum {
27 * Returns the cable type. 27 * Returns the cable type.
28 */ 28 */
29 29
30static u8 __devinit jmicron_cable_detect(ide_hwif_t *hwif) 30static u8 jmicron_cable_detect(ide_hwif_t *hwif)
31{ 31{
32 struct pci_dev *pdev = to_pci_dev(hwif->dev); 32 struct pci_dev *pdev = to_pci_dev(hwif->dev);
33 33
diff --git a/drivers/ide/pci/pdc202xx_new.c b/drivers/ide/pci/pdc202xx_new.c
index 998615fa285f..0f609b72f470 100644
--- a/drivers/ide/pci/pdc202xx_new.c
+++ b/drivers/ide/pci/pdc202xx_new.c
@@ -193,7 +193,7 @@ static void pdcnew_set_pio_mode(ide_drive_t *drive, const u8 pio)
193 } 193 }
194} 194}
195 195
196static u8 __devinit pdcnew_cable_detect(ide_hwif_t *hwif) 196static u8 pdcnew_cable_detect(ide_hwif_t *hwif)
197{ 197{
198 if (get_indexed_reg(hwif, 0x0b) & 0x04) 198 if (get_indexed_reg(hwif, 0x0b) & 0x04)
199 return ATA_CBL_PATA40; 199 return ATA_CBL_PATA40;
diff --git a/drivers/ide/pci/pdc202xx_old.c b/drivers/ide/pci/pdc202xx_old.c
index 6ff2def58da0..de9a27400462 100644
--- a/drivers/ide/pci/pdc202xx_old.c
+++ b/drivers/ide/pci/pdc202xx_old.c
@@ -117,7 +117,7 @@ static void pdc202xx_set_pio_mode(ide_drive_t *drive, const u8 pio)
117 pdc202xx_set_mode(drive, XFER_PIO_0 + pio); 117 pdc202xx_set_mode(drive, XFER_PIO_0 + pio);
118} 118}
119 119
120static u8 __devinit pdc2026x_cable_detect(ide_hwif_t *hwif) 120static u8 pdc2026x_cable_detect(ide_hwif_t *hwif)
121{ 121{
122 struct pci_dev *dev = to_pci_dev(hwif->dev); 122 struct pci_dev *dev = to_pci_dev(hwif->dev);
123 u16 CIS, mask = hwif->channel ? (1 << 11) : (1 << 10); 123 u16 CIS, mask = hwif->channel ? (1 << 11) : (1 << 10);
diff --git a/drivers/ide/pci/piix.c b/drivers/ide/pci/piix.c
index 7fc3022dcf68..30cfc815fe31 100644
--- a/drivers/ide/pci/piix.c
+++ b/drivers/ide/pci/piix.c
@@ -256,7 +256,7 @@ static const struct ich_laptop ich_laptop[] = {
256 { 0, } 256 { 0, }
257}; 257};
258 258
259static u8 __devinit piix_cable_detect(ide_hwif_t *hwif) 259static u8 piix_cable_detect(ide_hwif_t *hwif)
260{ 260{
261 struct pci_dev *pdev = to_pci_dev(hwif->dev); 261 struct pci_dev *pdev = to_pci_dev(hwif->dev);
262 const struct ich_laptop *lap = &ich_laptop[0]; 262 const struct ich_laptop *lap = &ich_laptop[0];
diff --git a/drivers/ide/pci/scc_pata.c b/drivers/ide/pci/scc_pata.c
index 94a7ab864236..6cde48bba6f8 100644
--- a/drivers/ide/pci/scc_pata.c
+++ b/drivers/ide/pci/scc_pata.c
@@ -827,7 +827,7 @@ static void __devinit init_iops_scc(ide_hwif_t *hwif)
827 init_mmio_iops_scc(hwif); 827 init_mmio_iops_scc(hwif);
828} 828}
829 829
830static u8 __devinit scc_cable_detect(ide_hwif_t *hwif) 830static u8 scc_cable_detect(ide_hwif_t *hwif)
831{ 831{
832 return ATA_CBL_PATA80; 832 return ATA_CBL_PATA80;
833} 833}
diff --git a/drivers/ide/pci/serverworks.c b/drivers/ide/pci/serverworks.c
index d173f2937722..c3bdc6e51a48 100644
--- a/drivers/ide/pci/serverworks.c
+++ b/drivers/ide/pci/serverworks.c
@@ -272,7 +272,7 @@ static unsigned int __devinit init_chipset_svwks(struct pci_dev *dev)
272 return dev->irq; 272 return dev->irq;
273} 273}
274 274
275static u8 __devinit ata66_svwks_svwks(ide_hwif_t *hwif) 275static u8 ata66_svwks_svwks(ide_hwif_t *hwif)
276{ 276{
277 return ATA_CBL_PATA80; 277 return ATA_CBL_PATA80;
278} 278}
@@ -284,7 +284,7 @@ static u8 __devinit ata66_svwks_svwks(ide_hwif_t *hwif)
284 * Bit 14 clear = primary IDE channel does not have 80-pin cable. 284 * Bit 14 clear = primary IDE channel does not have 80-pin cable.
285 * Bit 14 set = primary IDE channel has 80-pin cable. 285 * Bit 14 set = primary IDE channel has 80-pin cable.
286 */ 286 */
287static u8 __devinit ata66_svwks_dell(ide_hwif_t *hwif) 287static u8 ata66_svwks_dell(ide_hwif_t *hwif)
288{ 288{
289 struct pci_dev *dev = to_pci_dev(hwif->dev); 289 struct pci_dev *dev = to_pci_dev(hwif->dev);
290 290
@@ -303,7 +303,7 @@ static u8 __devinit ata66_svwks_dell(ide_hwif_t *hwif)
303 * 303 *
304 * WARNING: this only works on Alpine hardware! 304 * WARNING: this only works on Alpine hardware!
305 */ 305 */
306static u8 __devinit ata66_svwks_cobalt(ide_hwif_t *hwif) 306static u8 ata66_svwks_cobalt(ide_hwif_t *hwif)
307{ 307{
308 struct pci_dev *dev = to_pci_dev(hwif->dev); 308 struct pci_dev *dev = to_pci_dev(hwif->dev);
309 309
@@ -315,7 +315,7 @@ static u8 __devinit ata66_svwks_cobalt(ide_hwif_t *hwif)
315 return ATA_CBL_PATA40; 315 return ATA_CBL_PATA40;
316} 316}
317 317
318static u8 __devinit svwks_cable_detect(ide_hwif_t *hwif) 318static u8 svwks_cable_detect(ide_hwif_t *hwif)
319{ 319{
320 struct pci_dev *dev = to_pci_dev(hwif->dev); 320 struct pci_dev *dev = to_pci_dev(hwif->dev);
321 321
diff --git a/drivers/ide/pci/siimage.c b/drivers/ide/pci/siimage.c
index b8ad9ad6cf0d..445ce6fbea33 100644
--- a/drivers/ide/pci/siimage.c
+++ b/drivers/ide/pci/siimage.c
@@ -639,7 +639,7 @@ static int is_dev_seagate_sata(ide_drive_t *drive)
639 * that can occur before we know what drives are present. 639 * that can occur before we know what drives are present.
640 */ 640 */
641 641
642static void __devinit sil_quirkproc(ide_drive_t *drive) 642static void sil_quirkproc(ide_drive_t *drive)
643{ 643{
644 ide_hwif_t *hwif = drive->hwif; 644 ide_hwif_t *hwif = drive->hwif;
645 645
@@ -679,7 +679,7 @@ static void __devinit init_iops_siimage(ide_hwif_t *hwif)
679 * Check for the presence of an ATA66 capable cable on the interface. 679 * Check for the presence of an ATA66 capable cable on the interface.
680 */ 680 */
681 681
682static u8 __devinit sil_cable_detect(ide_hwif_t *hwif) 682static u8 sil_cable_detect(ide_hwif_t *hwif)
683{ 683{
684 struct pci_dev *dev = to_pci_dev(hwif->dev); 684 struct pci_dev *dev = to_pci_dev(hwif->dev);
685 unsigned long addr = siimage_selreg(hwif, 0); 685 unsigned long addr = siimage_selreg(hwif, 0);
diff --git a/drivers/ide/pci/sis5513.c b/drivers/ide/pci/sis5513.c
index cc95f90b53b7..e5a4b42b4e33 100644
--- a/drivers/ide/pci/sis5513.c
+++ b/drivers/ide/pci/sis5513.c
@@ -518,7 +518,7 @@ static const struct sis_laptop sis_laptop[] = {
518 { 0, } 518 { 0, }
519}; 519};
520 520
521static u8 __devinit sis_cable_detect(ide_hwif_t *hwif) 521static u8 sis_cable_detect(ide_hwif_t *hwif)
522{ 522{
523 struct pci_dev *pdev = to_pci_dev(hwif->dev); 523 struct pci_dev *pdev = to_pci_dev(hwif->dev);
524 const struct sis_laptop *lap = &sis_laptop[0]; 524 const struct sis_laptop *lap = &sis_laptop[0];
diff --git a/drivers/ide/pci/slc90e66.c b/drivers/ide/pci/slc90e66.c
index 13d1fa491f26..866d6c65e3a0 100644
--- a/drivers/ide/pci/slc90e66.c
+++ b/drivers/ide/pci/slc90e66.c
@@ -116,7 +116,7 @@ static void slc90e66_set_dma_mode(ide_drive_t *drive, const u8 speed)
116 } 116 }
117} 117}
118 118
119static u8 __devinit slc90e66_cable_detect(ide_hwif_t *hwif) 119static u8 slc90e66_cable_detect(ide_hwif_t *hwif)
120{ 120{
121 struct pci_dev *dev = to_pci_dev(hwif->dev); 121 struct pci_dev *dev = to_pci_dev(hwif->dev);
122 u8 reg47 = 0, mask = hwif->channel ? 0x01 : 0x02; 122 u8 reg47 = 0, mask = hwif->channel ? 0x01 : 0x02;
diff --git a/drivers/ide/pci/tc86c001.c b/drivers/ide/pci/tc86c001.c
index b1cb8a9ce5a9..7fc88c375e5d 100644
--- a/drivers/ide/pci/tc86c001.c
+++ b/drivers/ide/pci/tc86c001.c
@@ -131,7 +131,7 @@ static void tc86c001_dma_start(ide_drive_t *drive)
131 ide_dma_start(drive); 131 ide_dma_start(drive);
132} 132}
133 133
134static u8 __devinit tc86c001_cable_detect(ide_hwif_t *hwif) 134static u8 tc86c001_cable_detect(ide_hwif_t *hwif)
135{ 135{
136 struct pci_dev *dev = to_pci_dev(hwif->dev); 136 struct pci_dev *dev = to_pci_dev(hwif->dev);
137 unsigned long sc_base = pci_resource_start(dev, 5); 137 unsigned long sc_base = pci_resource_start(dev, 5);
diff --git a/drivers/ide/pci/via82cxxx.c b/drivers/ide/pci/via82cxxx.c
index 454d2bf62dce..a6b2cc83f293 100644
--- a/drivers/ide/pci/via82cxxx.c
+++ b/drivers/ide/pci/via82cxxx.c
@@ -352,7 +352,7 @@ static int via_cable_override(struct pci_dev *pdev)
352 return 0; 352 return 0;
353} 353}
354 354
355static u8 __devinit via82cxxx_cable_detect(ide_hwif_t *hwif) 355static u8 via82cxxx_cable_detect(ide_hwif_t *hwif)
356{ 356{
357 struct pci_dev *pdev = to_pci_dev(hwif->dev); 357 struct pci_dev *pdev = to_pci_dev(hwif->dev);
358 struct ide_host *host = pci_get_drvdata(pdev); 358 struct ide_host *host = pci_get_drvdata(pdev);
diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/ppc/pmac.c
index c521bf6e1bf2..fa2be26272d5 100644
--- a/drivers/ide/ppc/pmac.c
+++ b/drivers/ide/ppc/pmac.c
@@ -1086,6 +1086,11 @@ static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif, hw_regs_t *hw)
1086 /* Make sure we have sane timings */ 1086 /* Make sure we have sane timings */
1087 sanitize_timings(pmif); 1087 sanitize_timings(pmif);
1088 1088
1089 host = ide_host_alloc(&d, hws);
1090 if (host == NULL)
1091 return -ENOMEM;
1092 hwif = host->ports[0];
1093
1089#ifndef CONFIG_PPC64 1094#ifndef CONFIG_PPC64
1090 /* XXX FIXME: Media bay stuff need re-organizing */ 1095 /* XXX FIXME: Media bay stuff need re-organizing */
1091 if (np->parent && np->parent->name 1096 if (np->parent && np->parent->name
@@ -1119,11 +1124,11 @@ static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif, hw_regs_t *hw)
1119 pmif->mdev ? "macio" : "PCI", pmif->aapl_bus_id, 1124 pmif->mdev ? "macio" : "PCI", pmif->aapl_bus_id,
1120 pmif->mediabay ? " (mediabay)" : "", hw->irq); 1125 pmif->mediabay ? " (mediabay)" : "", hw->irq);
1121 1126
1122 rc = ide_host_add(&d, hws, &host); 1127 rc = ide_host_register(host, &d, hws);
1123 if (rc) 1128 if (rc) {
1129 ide_host_free(host);
1124 return rc; 1130 return rc;
1125 1131 }
1126 hwif = host->ports[0];
1127 1132
1128 return 0; 1133 return 0;
1129} 1134}
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index e980ff3335db..d951896ff7fc 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -155,9 +155,7 @@ struct cma_multicast {
155 } multicast; 155 } multicast;
156 struct list_head list; 156 struct list_head list;
157 void *context; 157 void *context;
158 struct sockaddr addr; 158 struct sockaddr_storage addr;
159 u8 pad[sizeof(struct sockaddr_in6) -
160 sizeof(struct sockaddr)];
161}; 159};
162 160
163struct cma_work { 161struct cma_work {
@@ -786,8 +784,8 @@ static void cma_cancel_operation(struct rdma_id_private *id_priv,
786 cma_cancel_route(id_priv); 784 cma_cancel_route(id_priv);
787 break; 785 break;
788 case CMA_LISTEN: 786 case CMA_LISTEN:
789 if (cma_any_addr(&id_priv->id.route.addr.src_addr) && 787 if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)
790 !id_priv->cma_dev) 788 && !id_priv->cma_dev)
791 cma_cancel_listens(id_priv); 789 cma_cancel_listens(id_priv);
792 break; 790 break;
793 default: 791 default:
@@ -1026,7 +1024,7 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
1026 rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path; 1024 rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path;
1027 1025
1028 ib_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); 1026 ib_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
1029 ret = rdma_translate_ip(&id->route.addr.src_addr, 1027 ret = rdma_translate_ip((struct sockaddr *) &id->route.addr.src_addr,
1030 &id->route.addr.dev_addr); 1028 &id->route.addr.dev_addr);
1031 if (ret) 1029 if (ret)
1032 goto destroy_id; 1030 goto destroy_id;
@@ -1064,7 +1062,7 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
1064 cma_save_net_info(&id->route.addr, &listen_id->route.addr, 1062 cma_save_net_info(&id->route.addr, &listen_id->route.addr,
1065 ip_ver, port, src, dst); 1063 ip_ver, port, src, dst);
1066 1064
1067 ret = rdma_translate_ip(&id->route.addr.src_addr, 1065 ret = rdma_translate_ip((struct sockaddr *) &id->route.addr.src_addr,
1068 &id->route.addr.dev_addr); 1066 &id->route.addr.dev_addr);
1069 if (ret) 1067 if (ret)
1070 goto err; 1068 goto err;
@@ -1377,7 +1375,7 @@ static int cma_ib_listen(struct rdma_id_private *id_priv)
1377 if (IS_ERR(id_priv->cm_id.ib)) 1375 if (IS_ERR(id_priv->cm_id.ib))
1378 return PTR_ERR(id_priv->cm_id.ib); 1376 return PTR_ERR(id_priv->cm_id.ib);
1379 1377
1380 addr = &id_priv->id.route.addr.src_addr; 1378 addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr;
1381 svc_id = cma_get_service_id(id_priv->id.ps, addr); 1379 svc_id = cma_get_service_id(id_priv->id.ps, addr);
1382 if (cma_any_addr(addr)) 1380 if (cma_any_addr(addr))
1383 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL); 1381 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL);
@@ -1443,7 +1441,7 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
1443 1441
1444 dev_id_priv->state = CMA_ADDR_BOUND; 1442 dev_id_priv->state = CMA_ADDR_BOUND;
1445 memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr, 1443 memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr,
1446 ip_addr_size(&id_priv->id.route.addr.src_addr)); 1444 ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr));
1447 1445
1448 cma_attach_to_dev(dev_id_priv, cma_dev); 1446 cma_attach_to_dev(dev_id_priv, cma_dev);
1449 list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list); 1447 list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list);
@@ -1563,13 +1561,14 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
1563 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(&addr->dev_addr)); 1561 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(&addr->dev_addr));
1564 path_rec.numb_path = 1; 1562 path_rec.numb_path = 1;
1565 path_rec.reversible = 1; 1563 path_rec.reversible = 1;
1566 path_rec.service_id = cma_get_service_id(id_priv->id.ps, &addr->dst_addr); 1564 path_rec.service_id = cma_get_service_id(id_priv->id.ps,
1565 (struct sockaddr *) &addr->dst_addr);
1567 1566
1568 comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | 1567 comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |
1569 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH | 1568 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH |
1570 IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID; 1569 IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID;
1571 1570
1572 if (addr->src_addr.sa_family == AF_INET) { 1571 if (addr->src_addr.ss_family == AF_INET) {
1573 path_rec.qos_class = cpu_to_be16((u16) id_priv->tos); 1572 path_rec.qos_class = cpu_to_be16((u16) id_priv->tos);
1574 comp_mask |= IB_SA_PATH_REC_QOS_CLASS; 1573 comp_mask |= IB_SA_PATH_REC_QOS_CLASS;
1575 } else { 1574 } else {
@@ -1848,7 +1847,7 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv)
1848 ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); 1847 ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
1849 ib_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid); 1848 ib_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
1850 1849
1851 if (cma_zero_addr(&id_priv->id.route.addr.src_addr)) { 1850 if (cma_zero_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)) {
1852 src_in = (struct sockaddr_in *)&id_priv->id.route.addr.src_addr; 1851 src_in = (struct sockaddr_in *)&id_priv->id.route.addr.src_addr;
1853 dst_in = (struct sockaddr_in *)&id_priv->id.route.addr.dst_addr; 1852 dst_in = (struct sockaddr_in *)&id_priv->id.route.addr.dst_addr;
1854 src_in->sin_family = dst_in->sin_family; 1853 src_in->sin_family = dst_in->sin_family;
@@ -1897,7 +1896,7 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
1897 if (cma_any_addr(dst_addr)) 1896 if (cma_any_addr(dst_addr))
1898 ret = cma_resolve_loopback(id_priv); 1897 ret = cma_resolve_loopback(id_priv);
1899 else 1898 else
1900 ret = rdma_resolve_ip(&addr_client, &id->route.addr.src_addr, 1899 ret = rdma_resolve_ip(&addr_client, (struct sockaddr *) &id->route.addr.src_addr,
1901 dst_addr, &id->route.addr.dev_addr, 1900 dst_addr, &id->route.addr.dev_addr,
1902 timeout_ms, addr_handler, id_priv); 1901 timeout_ms, addr_handler, id_priv);
1903 if (ret) 1902 if (ret)
@@ -2021,11 +2020,11 @@ static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv)
2021 * We don't support binding to any address if anyone is bound to 2020 * We don't support binding to any address if anyone is bound to
2022 * a specific address on the same port. 2021 * a specific address on the same port.
2023 */ 2022 */
2024 if (cma_any_addr(&id_priv->id.route.addr.src_addr)) 2023 if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr))
2025 return -EADDRNOTAVAIL; 2024 return -EADDRNOTAVAIL;
2026 2025
2027 hlist_for_each_entry(cur_id, node, &bind_list->owners, node) { 2026 hlist_for_each_entry(cur_id, node, &bind_list->owners, node) {
2028 if (cma_any_addr(&cur_id->id.route.addr.src_addr)) 2027 if (cma_any_addr((struct sockaddr *) &cur_id->id.route.addr.src_addr))
2029 return -EADDRNOTAVAIL; 2028 return -EADDRNOTAVAIL;
2030 2029
2031 cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr; 2030 cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr;
@@ -2060,7 +2059,7 @@ static int cma_get_port(struct rdma_id_private *id_priv)
2060 } 2059 }
2061 2060
2062 mutex_lock(&lock); 2061 mutex_lock(&lock);
2063 if (cma_any_port(&id_priv->id.route.addr.src_addr)) 2062 if (cma_any_port((struct sockaddr *) &id_priv->id.route.addr.src_addr))
2064 ret = cma_alloc_any_port(ps, id_priv); 2063 ret = cma_alloc_any_port(ps, id_priv);
2065 else 2064 else
2066 ret = cma_use_port(ps, id_priv); 2065 ret = cma_use_port(ps, id_priv);
@@ -2232,7 +2231,7 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
2232 2231
2233 req.path = route->path_rec; 2232 req.path = route->path_rec;
2234 req.service_id = cma_get_service_id(id_priv->id.ps, 2233 req.service_id = cma_get_service_id(id_priv->id.ps,
2235 &route->addr.dst_addr); 2234 (struct sockaddr *) &route->addr.dst_addr);
2236 req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8); 2235 req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8);
2237 req.max_cm_retries = CMA_MAX_CM_RETRIES; 2236 req.max_cm_retries = CMA_MAX_CM_RETRIES;
2238 2237
@@ -2283,7 +2282,7 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
2283 req.alternate_path = &route->path_rec[1]; 2282 req.alternate_path = &route->path_rec[1];
2284 2283
2285 req.service_id = cma_get_service_id(id_priv->id.ps, 2284 req.service_id = cma_get_service_id(id_priv->id.ps,
2286 &route->addr.dst_addr); 2285 (struct sockaddr *) &route->addr.dst_addr);
2287 req.qp_num = id_priv->qp_num; 2286 req.qp_num = id_priv->qp_num;
2288 req.qp_type = IB_QPT_RC; 2287 req.qp_type = IB_QPT_RC;
2289 req.starting_psn = id_priv->seq_num; 2288 req.starting_psn = id_priv->seq_num;
@@ -2667,7 +2666,7 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
2667 if (ret) 2666 if (ret)
2668 return ret; 2667 return ret;
2669 2668
2670 cma_set_mgid(id_priv, &mc->addr, &rec.mgid); 2669 cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid);
2671 if (id_priv->id.ps == RDMA_PS_UDP) 2670 if (id_priv->id.ps == RDMA_PS_UDP)
2672 rec.qkey = cpu_to_be32(RDMA_UDP_QKEY); 2671 rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
2673 ib_addr_get_sgid(dev_addr, &rec.port_gid); 2672 ib_addr_get_sgid(dev_addr, &rec.port_gid);
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
index d0ef7d61c037..3af2b84cd838 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -133,7 +133,7 @@ static void ack_recv(struct mad_rmpp_recv *rmpp_recv,
133 msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp, 133 msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp,
134 recv_wc->wc->pkey_index, 1, hdr_len, 134 recv_wc->wc->pkey_index, 1, hdr_len,
135 0, GFP_KERNEL); 135 0, GFP_KERNEL);
136 if (!msg) 136 if (IS_ERR(msg))
137 return; 137 return;
138 138
139 format_ack(msg, (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv); 139 format_ack(msg, (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv);
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index b41dd26bbfa1..3ddacf39b7ba 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -81,9 +81,7 @@ struct ucma_multicast {
81 81
82 u64 uid; 82 u64 uid;
83 struct list_head list; 83 struct list_head list;
84 struct sockaddr addr; 84 struct sockaddr_storage addr;
85 u8 pad[sizeof(struct sockaddr_in6) -
86 sizeof(struct sockaddr)];
87}; 85};
88 86
89struct ucma_event { 87struct ucma_event {
@@ -603,11 +601,11 @@ static ssize_t ucma_query_route(struct ucma_file *file,
603 return PTR_ERR(ctx); 601 return PTR_ERR(ctx);
604 602
605 memset(&resp, 0, sizeof resp); 603 memset(&resp, 0, sizeof resp);
606 addr = &ctx->cm_id->route.addr.src_addr; 604 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
607 memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ? 605 memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
608 sizeof(struct sockaddr_in) : 606 sizeof(struct sockaddr_in) :
609 sizeof(struct sockaddr_in6)); 607 sizeof(struct sockaddr_in6));
610 addr = &ctx->cm_id->route.addr.dst_addr; 608 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
611 memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ? 609 memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
612 sizeof(struct sockaddr_in) : 610 sizeof(struct sockaddr_in) :
613 sizeof(struct sockaddr_in6)); 611 sizeof(struct sockaddr_in6));
@@ -913,7 +911,7 @@ static ssize_t ucma_join_multicast(struct ucma_file *file,
913 911
914 mc->uid = cmd.uid; 912 mc->uid = cmd.uid;
915 memcpy(&mc->addr, &cmd.addr, sizeof cmd.addr); 913 memcpy(&mc->addr, &cmd.addr, sizeof cmd.addr);
916 ret = rdma_join_multicast(ctx->cm_id, &mc->addr, mc); 914 ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr, mc);
917 if (ret) 915 if (ret)
918 goto err2; 916 goto err2;
919 917
@@ -929,7 +927,7 @@ static ssize_t ucma_join_multicast(struct ucma_file *file,
929 return 0; 927 return 0;
930 928
931err3: 929err3:
932 rdma_leave_multicast(ctx->cm_id, &mc->addr); 930 rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
933 ucma_cleanup_mc_events(mc); 931 ucma_cleanup_mc_events(mc);
934err2: 932err2:
935 mutex_lock(&mut); 933 mutex_lock(&mut);
@@ -975,7 +973,7 @@ static ssize_t ucma_leave_multicast(struct ucma_file *file,
975 goto out; 973 goto out;
976 } 974 }
977 975
978 rdma_leave_multicast(mc->ctx->cm_id, &mc->addr); 976 rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
979 mutex_lock(&mc->ctx->file->mut); 977 mutex_lock(&mc->ctx->file->mut);
980 ucma_cleanup_mc_events(mc); 978 ucma_cleanup_mc_events(mc);
981 list_del(&mc->list); 979 list_del(&mc->list);
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index f6d5747153a5..4dcf08b3fd83 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -725,9 +725,9 @@ static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry,
725 V_TPT_STAG_TYPE(type) | V_TPT_PDID(pdid)); 725 V_TPT_STAG_TYPE(type) | V_TPT_PDID(pdid));
726 BUG_ON(page_size >= 28); 726 BUG_ON(page_size >= 28);
727 tpt.flags_pagesize_qpid = cpu_to_be32(V_TPT_PERM(perm) | 727 tpt.flags_pagesize_qpid = cpu_to_be32(V_TPT_PERM(perm) |
728 F_TPT_MW_BIND_ENABLE | 728 ((perm & TPT_MW_BIND) ? F_TPT_MW_BIND_ENABLE : 0) |
729 V_TPT_ADDR_TYPE((zbva ? TPT_ZBTO : TPT_VATO)) | 729 V_TPT_ADDR_TYPE((zbva ? TPT_ZBTO : TPT_VATO)) |
730 V_TPT_PAGE_SIZE(page_size)); 730 V_TPT_PAGE_SIZE(page_size));
731 tpt.rsvd_pbl_addr = reset_tpt_entry ? 0 : 731 tpt.rsvd_pbl_addr = reset_tpt_entry ? 0 :
732 cpu_to_be32(V_TPT_PBL_ADDR(PBL_OFF(rdev_p, pbl_addr)>>3)); 732 cpu_to_be32(V_TPT_PBL_ADDR(PBL_OFF(rdev_p, pbl_addr)>>3));
733 tpt.len = cpu_to_be32(len); 733 tpt.len = cpu_to_be32(len);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index b89640aa6e10..eb778bfd6f66 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -1187,28 +1187,6 @@ static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
1187 return sprintf(buf, "%d\n", iwch_dev->rdev.t3cdev_p->type); 1187 return sprintf(buf, "%d\n", iwch_dev->rdev.t3cdev_p->type);
1188} 1188}
1189 1189
1190static int fw_supports_fastreg(struct iwch_dev *iwch_dev)
1191{
1192 struct ethtool_drvinfo info;
1193 struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
1194 char *cp, *next;
1195 unsigned fw_maj, fw_min;
1196
1197 rtnl_lock();
1198 lldev->ethtool_ops->get_drvinfo(lldev, &info);
1199 rtnl_unlock();
1200
1201 next = info.fw_version+1;
1202 cp = strsep(&next, ".");
1203 sscanf(cp, "%i", &fw_maj);
1204 cp = strsep(&next, ".");
1205 sscanf(cp, "%i", &fw_min);
1206
1207 PDBG("%s maj %u min %u\n", __func__, fw_maj, fw_min);
1208
1209 return fw_maj > 6 || (fw_maj == 6 && fw_min > 0);
1210}
1211
1212static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr, char *buf) 1190static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr, char *buf)
1213{ 1191{
1214 struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev, 1192 struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
@@ -1325,12 +1303,12 @@ int iwch_register_device(struct iwch_dev *dev)
1325 memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid)); 1303 memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
1326 memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6); 1304 memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
1327 dev->ibdev.owner = THIS_MODULE; 1305 dev->ibdev.owner = THIS_MODULE;
1328 dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW; 1306 dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY |
1307 IB_DEVICE_MEM_WINDOW |
1308 IB_DEVICE_MEM_MGT_EXTENSIONS;
1329 1309
1330 /* cxgb3 supports STag 0. */ 1310 /* cxgb3 supports STag 0. */
1331 dev->ibdev.local_dma_lkey = 0; 1311 dev->ibdev.local_dma_lkey = 0;
1332 if (fw_supports_fastreg(dev))
1333 dev->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
1334 1312
1335 dev->ibdev.uverbs_cmd_mask = 1313 dev->ibdev.uverbs_cmd_mask =
1336 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | 1314 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h
index f5ceca05c435..a237d49bdcc9 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.h
@@ -293,9 +293,16 @@ static inline u32 iwch_ib_to_tpt_access(int acc)
293 return (acc & IB_ACCESS_REMOTE_WRITE ? TPT_REMOTE_WRITE : 0) | 293 return (acc & IB_ACCESS_REMOTE_WRITE ? TPT_REMOTE_WRITE : 0) |
294 (acc & IB_ACCESS_REMOTE_READ ? TPT_REMOTE_READ : 0) | 294 (acc & IB_ACCESS_REMOTE_READ ? TPT_REMOTE_READ : 0) |
295 (acc & IB_ACCESS_LOCAL_WRITE ? TPT_LOCAL_WRITE : 0) | 295 (acc & IB_ACCESS_LOCAL_WRITE ? TPT_LOCAL_WRITE : 0) |
296 (acc & IB_ACCESS_MW_BIND ? TPT_MW_BIND : 0) |
296 TPT_LOCAL_READ; 297 TPT_LOCAL_READ;
297} 298}
298 299
300static inline u32 iwch_ib_to_tpt_bind_access(int acc)
301{
302 return (acc & IB_ACCESS_REMOTE_WRITE ? TPT_REMOTE_WRITE : 0) |
303 (acc & IB_ACCESS_REMOTE_READ ? TPT_REMOTE_READ : 0);
304}
305
299enum iwch_mmid_state { 306enum iwch_mmid_state {
300 IWCH_STAG_STATE_VALID, 307 IWCH_STAG_STATE_VALID,
301 IWCH_STAG_STATE_INVALID 308 IWCH_STAG_STATE_INVALID
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 9a3be3a9d5dc..3e4585c2318a 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -565,7 +565,7 @@ int iwch_bind_mw(struct ib_qp *qp,
565 wqe->bind.type = TPT_VATO; 565 wqe->bind.type = TPT_VATO;
566 566
567 /* TBD: check perms */ 567 /* TBD: check perms */
568 wqe->bind.perms = iwch_ib_to_tpt_access(mw_bind->mw_access_flags); 568 wqe->bind.perms = iwch_ib_to_tpt_bind_access(mw_bind->mw_access_flags);
569 wqe->bind.mr_stag = cpu_to_be32(mw_bind->mr->lkey); 569 wqe->bind.mr_stag = cpu_to_be32(mw_bind->mr->lkey);
570 wqe->bind.mw_stag = cpu_to_be32(mw->rkey); 570 wqe->bind.mw_stag = cpu_to_be32(mw->rkey);
571 wqe->bind.mw_len = cpu_to_be32(mw_bind->length); 571 wqe->bind.mw_len = cpu_to_be32(mw_bind->length);
@@ -879,20 +879,13 @@ static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp,
879 (qhp->attr.mpa_attr.xmit_marker_enabled << 1) | 879 (qhp->attr.mpa_attr.xmit_marker_enabled << 1) |
880 (qhp->attr.mpa_attr.crc_enabled << 2); 880 (qhp->attr.mpa_attr.crc_enabled << 2);
881 881
882 /* 882 init_attr.qpcaps = uP_RI_QP_RDMA_READ_ENABLE |
883 * XXX - The IWCM doesn't quite handle getting these 883 uP_RI_QP_RDMA_WRITE_ENABLE |
884 * attrs set before going into RTS. For now, just turn 884 uP_RI_QP_BIND_ENABLE;
885 * them on always... 885 if (!qhp->ibqp.uobject)
886 */ 886 init_attr.qpcaps |= uP_RI_QP_STAG0_ENABLE |
887#if 0 887 uP_RI_QP_FAST_REGISTER_ENABLE;
888 init_attr.qpcaps = qhp->attr.enableRdmaRead | 888
889 (qhp->attr.enableRdmaWrite << 1) |
890 (qhp->attr.enableBind << 2) |
891 (qhp->attr.enable_stag0_fastreg << 3) |
892 (qhp->attr.enable_stag0_fastreg << 4);
893#else
894 init_attr.qpcaps = 0x1f;
895#endif
896 init_attr.tcp_emss = qhp->ep->emss; 889 init_attr.tcp_emss = qhp->ep->emss;
897 init_attr.ord = qhp->attr.max_ord; 890 init_attr.ord = qhp->attr.max_ord;
898 init_attr.ird = qhp->attr.max_ird; 891 init_attr.ird = qhp->attr.max_ird;
@@ -900,8 +893,6 @@ static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp,
900 init_attr.qp_dma_size = (1UL << qhp->wq.size_log2); 893 init_attr.qp_dma_size = (1UL << qhp->wq.size_log2);
901 init_attr.rqe_count = iwch_rqes_posted(qhp); 894 init_attr.rqe_count = iwch_rqes_posted(qhp);
902 init_attr.flags = qhp->attr.mpa_attr.initiator ? MPA_INITIATOR : 0; 895 init_attr.flags = qhp->attr.mpa_attr.initiator ? MPA_INITIATOR : 0;
903 if (!qhp->ibqp.uobject)
904 init_attr.flags |= PRIV_QP;
905 if (peer2peer) { 896 if (peer2peer) {
906 init_attr.rtr_type = RTR_READ; 897 init_attr.rtr_type = RTR_READ;
907 if (init_attr.ord == 0 && qhp->attr.mpa_attr.initiator) 898 if (init_attr.ord == 0 && qhp->attr.mpa_attr.initiator)
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index dd9bc68f1c7b..898c8b5c38dd 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -42,7 +42,7 @@
42 */ 42 */
43 43
44 44
45#include <asm-powerpc/system.h> 45#include <asm/system.h>
46#include "ehca_classes.h" 46#include "ehca_classes.h"
47#include "ehca_tools.h" 47#include "ehca_tools.h"
48#include "ehca_qes.h" 48#include "ehca_qes.h"
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index daad09a45910..ad0aab60b051 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -1259,7 +1259,7 @@ reloop:
1259 */ 1259 */
1260 ipath_cdbg(ERRPKT, "Error Pkt, but no eflags! egrbuf" 1260 ipath_cdbg(ERRPKT, "Error Pkt, but no eflags! egrbuf"
1261 " %x, len %x hdrq+%x rhf: %Lx\n", 1261 " %x, len %x hdrq+%x rhf: %Lx\n",
1262 etail, tlen, l, 1262 etail, tlen, l, (unsigned long long)
1263 le64_to_cpu(*(__le64 *) rhf_addr)); 1263 le64_to_cpu(*(__le64 *) rhf_addr));
1264 if (ipath_debug & __IPATH_ERRPKTDBG) { 1264 if (ipath_debug & __IPATH_ERRPKTDBG) {
1265 u32 j, *d, dw = rsize-2; 1265 u32 j, *d, dw = rsize-2;
@@ -1457,7 +1457,8 @@ static void ipath_reset_availshadow(struct ipath_devdata *dd)
1457 0xaaaaaaaaaaaaaaaaULL); /* All BUSY bits in qword */ 1457 0xaaaaaaaaaaaaaaaaULL); /* All BUSY bits in qword */
1458 if (oldval != dd->ipath_pioavailshadow[i]) 1458 if (oldval != dd->ipath_pioavailshadow[i])
1459 ipath_dbg("shadow[%d] was %Lx, now %lx\n", 1459 ipath_dbg("shadow[%d] was %Lx, now %lx\n",
1460 i, oldval, dd->ipath_pioavailshadow[i]); 1460 i, (unsigned long long) oldval,
1461 dd->ipath_pioavailshadow[i]);
1461 } 1462 }
1462 spin_unlock_irqrestore(&ipath_pioavail_lock, flags); 1463 spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
1463} 1464}
diff --git a/drivers/infiniband/hw/ipath/ipath_iba7220.c b/drivers/infiniband/hw/ipath/ipath_iba7220.c
index fb70712ac85c..d90f5e9a54fa 100644
--- a/drivers/infiniband/hw/ipath/ipath_iba7220.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba7220.c
@@ -528,7 +528,7 @@ static const struct ipath_cregs ipath_7220_cregs = {
528 528
529static char int_type[16] = "auto"; 529static char int_type[16] = "auto";
530module_param_string(interrupt_type, int_type, sizeof(int_type), 0444); 530module_param_string(interrupt_type, int_type, sizeof(int_type), 0444);
531MODULE_PARM_DESC(int_type, " interrupt_type=auto|force_msi|force_intx\n"); 531MODULE_PARM_DESC(int_type, " interrupt_type=auto|force_msi|force_intx");
532 532
533/* packet rate matching delay; chip has support */ 533/* packet rate matching delay; chip has support */
534static u8 rate_to_delay[2][2] = { 534static u8 rate_to_delay[2][2] = {
@@ -1032,7 +1032,7 @@ static int ipath_7220_bringup_serdes(struct ipath_devdata *dd)
1032 ipath_cdbg(VERBOSE, "done: xgxs=%llx from %llx\n", 1032 ipath_cdbg(VERBOSE, "done: xgxs=%llx from %llx\n",
1033 (unsigned long long) 1033 (unsigned long long)
1034 ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig), 1034 ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig),
1035 prev_val); 1035 (unsigned long long) prev_val);
1036 1036
1037 guid = be64_to_cpu(dd->ipath_guid); 1037 guid = be64_to_cpu(dd->ipath_guid);
1038 1038
@@ -1042,7 +1042,8 @@ static int ipath_7220_bringup_serdes(struct ipath_devdata *dd)
1042 ipath_dbg("No GUID for heartbeat, faking %llx\n", 1042 ipath_dbg("No GUID for heartbeat, faking %llx\n",
1043 (unsigned long long)guid); 1043 (unsigned long long)guid);
1044 } else 1044 } else
1045 ipath_cdbg(VERBOSE, "Wrote %llX to HRTBT_GUID\n", guid); 1045 ipath_cdbg(VERBOSE, "Wrote %llX to HRTBT_GUID\n",
1046 (unsigned long long) guid);
1046 ipath_write_kreg(dd, dd->ipath_kregs->kr_hrtbt_guid, guid); 1047 ipath_write_kreg(dd, dd->ipath_kregs->kr_hrtbt_guid, guid);
1047 return ret; 1048 return ret;
1048} 1049}
@@ -2505,7 +2506,7 @@ done:
2505 if (dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) { 2506 if (dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) {
2506 ipath_dbg("Did not get to DDR INIT (%x) after %Lu msecs\n", 2507 ipath_dbg("Did not get to DDR INIT (%x) after %Lu msecs\n",
2507 ipath_ib_state(dd, dd->ipath_lastibcstat), 2508 ipath_ib_state(dd, dd->ipath_lastibcstat),
2508 jiffies_to_msecs(jiffies)-startms); 2509 (unsigned long long) jiffies_to_msecs(jiffies)-startms);
2509 dd->ipath_flags &= ~IPATH_IB_AUTONEG_INPROG; 2510 dd->ipath_flags &= ~IPATH_IB_AUTONEG_INPROG;
2510 if (dd->ipath_autoneg_tries == IPATH_AUTONEG_TRIES) { 2511 if (dd->ipath_autoneg_tries == IPATH_AUTONEG_TRIES) {
2511 dd->ipath_flags |= IPATH_IB_AUTONEG_FAILED; 2512 dd->ipath_flags |= IPATH_IB_AUTONEG_FAILED;
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c
index 26900b3b7a4e..6c21b4b5ec71 100644
--- a/drivers/infiniband/hw/ipath/ipath_intr.c
+++ b/drivers/infiniband/hw/ipath/ipath_intr.c
@@ -356,9 +356,10 @@ static void handle_e_ibstatuschanged(struct ipath_devdata *dd,
356 dd->ipath_cregs->cr_iblinkerrrecovcnt); 356 dd->ipath_cregs->cr_iblinkerrrecovcnt);
357 if (linkrecov != dd->ipath_lastlinkrecov) { 357 if (linkrecov != dd->ipath_lastlinkrecov) {
358 ipath_dbg("IB linkrecov up %Lx (%s %s) recov %Lu\n", 358 ipath_dbg("IB linkrecov up %Lx (%s %s) recov %Lu\n",
359 ibcs, ib_linkstate(dd, ibcs), 359 (unsigned long long) ibcs,
360 ib_linkstate(dd, ibcs),
360 ipath_ibcstatus_str[ltstate], 361 ipath_ibcstatus_str[ltstate],
361 linkrecov); 362 (unsigned long long) linkrecov);
362 /* and no more until active again */ 363 /* and no more until active again */
363 dd->ipath_lastlinkrecov = 0; 364 dd->ipath_lastlinkrecov = 0;
364 ipath_set_linkstate(dd, IPATH_IB_LINKDOWN); 365 ipath_set_linkstate(dd, IPATH_IB_LINKDOWN);
@@ -1118,9 +1119,11 @@ irqreturn_t ipath_intr(int irq, void *data)
1118 if (unlikely(istat & ~dd->ipath_i_bitsextant)) 1119 if (unlikely(istat & ~dd->ipath_i_bitsextant))
1119 ipath_dev_err(dd, 1120 ipath_dev_err(dd,
1120 "interrupt with unknown interrupts %Lx set\n", 1121 "interrupt with unknown interrupts %Lx set\n",
1122 (unsigned long long)
1121 istat & ~dd->ipath_i_bitsextant); 1123 istat & ~dd->ipath_i_bitsextant);
1122 else if (istat & ~INFINIPATH_I_ERROR) /* errors do own printing */ 1124 else if (istat & ~INFINIPATH_I_ERROR) /* errors do own printing */
1123 ipath_cdbg(VERBOSE, "intr stat=0x%Lx\n", istat); 1125 ipath_cdbg(VERBOSE, "intr stat=0x%Lx\n",
1126 (unsigned long long) istat);
1124 1127
1125 if (istat & INFINIPATH_I_ERROR) { 1128 if (istat & INFINIPATH_I_ERROR) {
1126 ipath_stats.sps_errints++; 1129 ipath_stats.sps_errints++;
@@ -1128,7 +1131,8 @@ irqreturn_t ipath_intr(int irq, void *data)
1128 dd->ipath_kregs->kr_errorstatus); 1131 dd->ipath_kregs->kr_errorstatus);
1129 if (!estat) 1132 if (!estat)
1130 dev_info(&dd->pcidev->dev, "error interrupt (%Lx), " 1133 dev_info(&dd->pcidev->dev, "error interrupt (%Lx), "
1131 "but no error bits set!\n", istat); 1134 "but no error bits set!\n",
1135 (unsigned long long) istat);
1132 else if (estat == -1LL) 1136 else if (estat == -1LL)
1133 /* 1137 /*
1134 * should we try clearing all, or hope next read 1138 * should we try clearing all, or hope next read
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index 55c718828826..b766e40e9ebf 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -1021,7 +1021,7 @@ static void sdma_complete(void *cookie, int status)
1021 struct ipath_verbs_txreq *tx = cookie; 1021 struct ipath_verbs_txreq *tx = cookie;
1022 struct ipath_qp *qp = tx->qp; 1022 struct ipath_qp *qp = tx->qp;
1023 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); 1023 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
1024 unsigned int flags; 1024 unsigned long flags;
1025 enum ib_wc_status ibs = status == IPATH_SDMA_TXREQ_S_OK ? 1025 enum ib_wc_status ibs = status == IPATH_SDMA_TXREQ_S_OK ?
1026 IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR; 1026 IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR;
1027 1027
@@ -1051,7 +1051,7 @@ static void sdma_complete(void *cookie, int status)
1051 1051
1052static void decrement_dma_busy(struct ipath_qp *qp) 1052static void decrement_dma_busy(struct ipath_qp *qp)
1053{ 1053{
1054 unsigned int flags; 1054 unsigned long flags;
1055 1055
1056 if (atomic_dec_and_test(&qp->s_dma_busy)) { 1056 if (atomic_dec_and_test(&qp->s_dma_busy)) {
1057 spin_lock_irqsave(&qp->s_lock, flags); 1057 spin_lock_irqsave(&qp->s_lock, flags);
@@ -1221,7 +1221,7 @@ static int ipath_verbs_send_pio(struct ipath_qp *qp,
1221 unsigned flush_wc; 1221 unsigned flush_wc;
1222 u32 control; 1222 u32 control;
1223 int ret; 1223 int ret;
1224 unsigned int flags; 1224 unsigned long flags;
1225 1225
1226 piobuf = ipath_getpiobuf(dd, plen, NULL); 1226 piobuf = ipath_getpiobuf(dd, plen, NULL);
1227 if (unlikely(piobuf == NULL)) { 1227 if (unlikely(piobuf == NULL)) {
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index a1464574bfdd..d0866a3636e2 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -515,17 +515,17 @@ static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe,
515 wc->vendor_err = cqe->vendor_err_syndrome; 515 wc->vendor_err = cqe->vendor_err_syndrome;
516} 516}
517 517
518static int mlx4_ib_ipoib_csum_ok(__be32 status, __be16 checksum) 518static int mlx4_ib_ipoib_csum_ok(__be16 status, __be16 checksum)
519{ 519{
520 return ((status & cpu_to_be32(MLX4_CQE_IPOIB_STATUS_IPV4 | 520 return ((status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
521 MLX4_CQE_IPOIB_STATUS_IPV4F | 521 MLX4_CQE_STATUS_IPV4F |
522 MLX4_CQE_IPOIB_STATUS_IPV4OPT | 522 MLX4_CQE_STATUS_IPV4OPT |
523 MLX4_CQE_IPOIB_STATUS_IPV6 | 523 MLX4_CQE_STATUS_IPV6 |
524 MLX4_CQE_IPOIB_STATUS_IPOK)) == 524 MLX4_CQE_STATUS_IPOK)) ==
525 cpu_to_be32(MLX4_CQE_IPOIB_STATUS_IPV4 | 525 cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
526 MLX4_CQE_IPOIB_STATUS_IPOK)) && 526 MLX4_CQE_STATUS_IPOK)) &&
527 (status & cpu_to_be32(MLX4_CQE_IPOIB_STATUS_UDP | 527 (status & cpu_to_be16(MLX4_CQE_STATUS_UDP |
528 MLX4_CQE_IPOIB_STATUS_TCP)) && 528 MLX4_CQE_STATUS_TCP)) &&
529 checksum == cpu_to_be16(0xffff); 529 checksum == cpu_to_be16(0xffff);
530} 530}
531 531
@@ -582,17 +582,17 @@ repoll:
582 } 582 }
583 583
584 if (!*cur_qp || 584 if (!*cur_qp ||
585 (be32_to_cpu(cqe->my_qpn) & 0xffffff) != (*cur_qp)->mqp.qpn) { 585 (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) != (*cur_qp)->mqp.qpn) {
586 /* 586 /*
587 * We do not have to take the QP table lock here, 587 * We do not have to take the QP table lock here,
588 * because CQs will be locked while QPs are removed 588 * because CQs will be locked while QPs are removed
589 * from the table. 589 * from the table.
590 */ 590 */
591 mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev, 591 mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev,
592 be32_to_cpu(cqe->my_qpn)); 592 be32_to_cpu(cqe->vlan_my_qpn));
593 if (unlikely(!mqp)) { 593 if (unlikely(!mqp)) {
594 printk(KERN_WARNING "CQ %06x with entry for unknown QPN %06x\n", 594 printk(KERN_WARNING "CQ %06x with entry for unknown QPN %06x\n",
595 cq->mcq.cqn, be32_to_cpu(cqe->my_qpn) & 0xffffff); 595 cq->mcq.cqn, be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK);
596 return -EINVAL; 596 return -EINVAL;
597 } 597 }
598 598
@@ -692,14 +692,13 @@ repoll:
692 } 692 }
693 693
694 wc->slid = be16_to_cpu(cqe->rlid); 694 wc->slid = be16_to_cpu(cqe->rlid);
695 wc->sl = cqe->sl >> 4; 695 wc->sl = be16_to_cpu(cqe->sl_vid >> 12);
696 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn); 696 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
697 wc->src_qp = g_mlpath_rqpn & 0xffffff; 697 wc->src_qp = g_mlpath_rqpn & 0xffffff;
698 wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f; 698 wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
699 wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0; 699 wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0;
700 wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f; 700 wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
701 wc->csum_ok = mlx4_ib_ipoib_csum_ok(cqe->ipoib_status, 701 wc->csum_ok = mlx4_ib_ipoib_csum_ok(cqe->status, cqe->checksum);
702 cqe->checksum);
703 } 702 }
704 703
705 return 0; 704 return 0;
@@ -767,7 +766,7 @@ void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
767 */ 766 */
768 while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) { 767 while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
769 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); 768 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
770 if ((be32_to_cpu(cqe->my_qpn) & 0xffffff) == qpn) { 769 if ((be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) == qpn) {
771 if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK)) 770 if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK))
772 mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index)); 771 mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index));
773 ++nfreed; 772 ++nfreed;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index f7bc7dd8578a..f29dbb767e87 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -902,7 +902,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
902 context->mtu_msgmax = (IB_MTU_4096 << 5) | 902 context->mtu_msgmax = (IB_MTU_4096 << 5) |
903 ilog2(dev->dev->caps.max_gso_sz); 903 ilog2(dev->dev->caps.max_gso_sz);
904 else 904 else
905 context->mtu_msgmax = (IB_MTU_4096 << 5) | 11; 905 context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
906 } else if (attr_mask & IB_QP_PATH_MTU) { 906 } else if (attr_mask & IB_QP_PATH_MTU) {
907 if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) { 907 if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) {
908 printk(KERN_ERR "path MTU (%u) is invalid\n", 908 printk(KERN_ERR "path MTU (%u) is invalid\n",
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 0f2d3045061a..7ebc400a4b3d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -337,7 +337,7 @@ static void ipoib_cm_init_rx_wr(struct net_device *dev,
337 sge[i].length = PAGE_SIZE; 337 sge[i].length = PAGE_SIZE;
338 338
339 wr->next = NULL; 339 wr->next = NULL;
340 wr->sg_list = priv->cm.rx_sge; 340 wr->sg_list = sge;
341 wr->num_sge = priv->cm.num_frags; 341 wr->num_sge = priv->cm.num_frags;
342} 342}
343 343
diff --git a/drivers/input/keyboard/aaed2000_kbd.c b/drivers/input/keyboard/aaed2000_kbd.c
index 8a77bfcd05bc..18222a689a03 100644
--- a/drivers/input/keyboard/aaed2000_kbd.c
+++ b/drivers/input/keyboard/aaed2000_kbd.c
@@ -20,8 +20,8 @@
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22 22
23#include <asm/arch/hardware.h> 23#include <mach/hardware.h>
24#include <asm/arch/aaed2000.h> 24#include <mach/aaed2000.h>
25 25
26#define KB_ROWS 12 26#define KB_ROWS 12
27#define KB_COLS 8 27#define KB_COLS 8
diff --git a/drivers/input/keyboard/corgikbd.c b/drivers/input/keyboard/corgikbd.c
index 1aa46ae12630..134e67bf6a90 100644
--- a/drivers/input/keyboard/corgikbd.c
+++ b/drivers/input/keyboard/corgikbd.c
@@ -20,10 +20,10 @@
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22 22
23#include <asm/arch/corgi.h> 23#include <mach/corgi.h>
24#include <asm/arch/hardware.h> 24#include <mach/hardware.h>
25#include <asm/arch/pxa-regs.h> 25#include <mach/pxa-regs.h>
26#include <asm/arch/pxa2xx-gpio.h> 26#include <mach/pxa2xx-gpio.h>
27#include <asm/hardware/scoop.h> 27#include <asm/hardware/scoop.h>
28 28
29#define KB_ROWS 8 29#define KB_ROWS 8
diff --git a/drivers/input/keyboard/jornada720_kbd.c b/drivers/input/keyboard/jornada720_kbd.c
index ce650af6d649..4e016d823069 100644
--- a/drivers/input/keyboard/jornada720_kbd.c
+++ b/drivers/input/keyboard/jornada720_kbd.c
@@ -24,8 +24,8 @@
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26 26
27#include <asm/arch/jornada720.h> 27#include <mach/jornada720.h>
28#include <asm/hardware.h> 28#include <mach/hardware.h>
29 29
30MODULE_AUTHOR("Kristoffer Ericson <Kristoffer.Ericson@gmail.com>"); 30MODULE_AUTHOR("Kristoffer Ericson <Kristoffer.Ericson@gmail.com>");
31MODULE_DESCRIPTION("HP Jornada 710/720/728 keyboard driver"); 31MODULE_DESCRIPTION("HP Jornada 710/720/728 keyboard driver");
diff --git a/drivers/input/keyboard/maple_keyb.c b/drivers/input/keyboard/maple_keyb.c
index 2b404284c28a..22f17a593be7 100644
--- a/drivers/input/keyboard/maple_keyb.c
+++ b/drivers/input/keyboard/maple_keyb.c
@@ -2,7 +2,7 @@
2 * SEGA Dreamcast keyboard driver 2 * SEGA Dreamcast keyboard driver
3 * Based on drivers/usb/usbkbd.c 3 * Based on drivers/usb/usbkbd.c
4 * Copyright YAEGASHI Takeshi, 2001 4 * Copyright YAEGASHI Takeshi, 2001
5 * Porting to 2.6 Copyright Adrian McMenamin, 2007 5 * Porting to 2.6 Copyright Adrian McMenamin, 2007, 2008
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -27,7 +27,6 @@
27#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/timer.h> 28#include <linux/timer.h>
29#include <linux/maple.h> 29#include <linux/maple.h>
30#include <asm/mach/maple.h>
31 30
32/* Very simple mutex to ensure proper cleanup */ 31/* Very simple mutex to ensure proper cleanup */
33static DEFINE_MUTEX(maple_keyb_mutex); 32static DEFINE_MUTEX(maple_keyb_mutex);
@@ -46,39 +45,51 @@ struct dc_kbd {
46}; 45};
47 46
48static const unsigned short dc_kbd_keycode[NR_SCANCODES] = { 47static const unsigned short dc_kbd_keycode[NR_SCANCODES] = {
49 KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_A, KEY_B, KEY_C, KEY_D, 48 KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_A, KEY_B,
50 KEY_E, KEY_F, KEY_G, KEY_H, KEY_I, KEY_J, KEY_K, KEY_L, 49 KEY_C, KEY_D, KEY_E, KEY_F, KEY_G, KEY_H, KEY_I, KEY_J, KEY_K, KEY_L,
51 KEY_M, KEY_N, KEY_O, KEY_P, KEY_Q, KEY_R, KEY_S, KEY_T, 50 KEY_M, KEY_N, KEY_O, KEY_P, KEY_Q, KEY_R, KEY_S, KEY_T, KEY_U, KEY_V,
52 KEY_U, KEY_V, KEY_W, KEY_X, KEY_Y, KEY_Z, KEY_1, KEY_2, 51 KEY_W, KEY_X, KEY_Y, KEY_Z, KEY_1, KEY_2, KEY_3, KEY_4, KEY_5, KEY_6,
53 KEY_3, KEY_4, KEY_5, KEY_6, KEY_7, KEY_8, KEY_9, KEY_0, 52 KEY_7, KEY_8, KEY_9, KEY_0, KEY_ENTER, KEY_ESC, KEY_BACKSPACE,
54 KEY_ENTER, KEY_ESC, KEY_BACKSPACE, KEY_TAB, KEY_SPACE, KEY_MINUS, KEY_EQUAL, KEY_LEFTBRACE, 53 KEY_TAB, KEY_SPACE, KEY_MINUS, KEY_EQUAL, KEY_LEFTBRACE,
55 KEY_RIGHTBRACE, KEY_BACKSLASH, KEY_BACKSLASH, KEY_SEMICOLON, KEY_APOSTROPHE, KEY_GRAVE, KEY_COMMA, 54 KEY_RIGHTBRACE, KEY_BACKSLASH, KEY_BACKSLASH, KEY_SEMICOLON,
56 KEY_DOT, KEY_SLASH, KEY_CAPSLOCK, KEY_F1, KEY_F2, KEY_F3, KEY_F4, KEY_F5, KEY_F6, 55 KEY_APOSTROPHE, KEY_GRAVE, KEY_COMMA, KEY_DOT, KEY_SLASH,
56 KEY_CAPSLOCK, KEY_F1, KEY_F2, KEY_F3, KEY_F4, KEY_F5, KEY_F6,
57 KEY_F7, KEY_F8, KEY_F9, KEY_F10, KEY_F11, KEY_F12, KEY_SYSRQ, 57 KEY_F7, KEY_F8, KEY_F9, KEY_F10, KEY_F11, KEY_F12, KEY_SYSRQ,
58 KEY_SCROLLLOCK, KEY_PAUSE, KEY_INSERT, KEY_HOME, KEY_PAGEUP, KEY_DELETE, 58 KEY_SCROLLLOCK, KEY_PAUSE, KEY_INSERT, KEY_HOME, KEY_PAGEUP,
59 KEY_END, KEY_PAGEDOWN, KEY_RIGHT, KEY_LEFT, KEY_DOWN, KEY_UP, 59 KEY_DELETE, KEY_END, KEY_PAGEDOWN, KEY_RIGHT, KEY_LEFT, KEY_DOWN,
60 KEY_NUMLOCK, KEY_KPSLASH, KEY_KPASTERISK, KEY_KPMINUS, KEY_KPPLUS, KEY_KPENTER, KEY_KP1, KEY_KP2, 60 KEY_UP, KEY_NUMLOCK, KEY_KPSLASH, KEY_KPASTERISK, KEY_KPMINUS,
61 KEY_KP3, KEY_KP4, KEY_KP5, KEY_KP6, KEY_KP7, KEY_KP8, KEY_KP9, KEY_KP0, KEY_KPDOT, 61 KEY_KPPLUS, KEY_KPENTER, KEY_KP1, KEY_KP2, KEY_KP3, KEY_KP4, KEY_KP5,
62 KEY_102ND, KEY_COMPOSE, KEY_POWER, KEY_KPEQUAL, KEY_F13, KEY_F14, KEY_F15, 62 KEY_KP6, KEY_KP7, KEY_KP8, KEY_KP9, KEY_KP0, KEY_KPDOT, KEY_102ND,
63 KEY_F16, KEY_F17, KEY_F18, KEY_F19, KEY_F20, 63 KEY_COMPOSE, KEY_POWER, KEY_KPEQUAL, KEY_F13, KEY_F14, KEY_F15,
64 KEY_F21, KEY_F22, KEY_F23, KEY_F24, KEY_OPEN, KEY_HELP, KEY_PROPS, KEY_FRONT, 64 KEY_F16, KEY_F17, KEY_F18, KEY_F19, KEY_F20, KEY_F21, KEY_F22,
65 KEY_STOP, KEY_AGAIN, KEY_UNDO, KEY_CUT, KEY_COPY, KEY_PASTE, KEY_FIND, KEY_MUTE, 65 KEY_F23, KEY_F24, KEY_OPEN, KEY_HELP, KEY_PROPS, KEY_FRONT, KEY_STOP,
66 KEY_VOLUMEUP, KEY_VOLUMEDOWN, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_KPCOMMA, KEY_RESERVED, KEY_RO, KEY_KATAKANAHIRAGANA , KEY_YEN, 66 KEY_AGAIN, KEY_UNDO, KEY_CUT, KEY_COPY, KEY_PASTE, KEY_FIND, KEY_MUTE,
67 KEY_HENKAN, KEY_MUHENKAN, KEY_KPJPCOMMA, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, 67 KEY_VOLUMEUP, KEY_VOLUMEDOWN, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
68 KEY_HANGEUL, KEY_HANJA, KEY_KATAKANA, KEY_HIRAGANA, KEY_ZENKAKUHANKAKU, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, 68 KEY_KPCOMMA, KEY_RESERVED, KEY_RO, KEY_KATAKANAHIRAGANA , KEY_YEN,
69 KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, 69 KEY_HENKAN, KEY_MUHENKAN, KEY_KPJPCOMMA, KEY_RESERVED, KEY_RESERVED,
70 KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, 70 KEY_RESERVED, KEY_HANGEUL, KEY_HANJA, KEY_KATAKANA, KEY_HIRAGANA,
71 KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, 71 KEY_ZENKAKUHANKAKU, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
72 KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, 72 KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
73 KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, 73 KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
74 KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, 74 KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
75 KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, 75 KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
76 KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, 76 KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
77 KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, 77 KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
78 KEY_LEFTCTRL, KEY_LEFTSHIFT, KEY_LEFTALT, KEY_LEFTMETA, KEY_RIGHTCTRL, KEY_RIGHTSHIFT, KEY_RIGHTALT, KEY_RIGHTMETA, 78 KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
79 KEY_PLAYPAUSE, KEY_STOPCD, KEY_PREVIOUSSONG, KEY_NEXTSONG, KEY_EJECTCD, KEY_VOLUMEUP, KEY_VOLUMEDOWN, KEY_MUTE, 79 KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
80 KEY_WWW, KEY_BACK, KEY_FORWARD, KEY_STOP, KEY_FIND, KEY_SCROLLUP, KEY_SCROLLDOWN, KEY_EDIT, KEY_SLEEP, 80 KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
81 KEY_SCREENLOCK, KEY_REFRESH, KEY_CALC, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED 81 KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
82 KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
83 KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
84 KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
85 KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
86 KEY_RESERVED, KEY_RESERVED, KEY_LEFTCTRL, KEY_LEFTSHIFT, KEY_LEFTALT,
87 KEY_LEFTMETA, KEY_RIGHTCTRL, KEY_RIGHTSHIFT, KEY_RIGHTALT,
88 KEY_RIGHTMETA, KEY_PLAYPAUSE, KEY_STOPCD, KEY_PREVIOUSSONG,
89 KEY_NEXTSONG, KEY_EJECTCD, KEY_VOLUMEUP, KEY_VOLUMEDOWN, KEY_MUTE,
90 KEY_WWW, KEY_BACK, KEY_FORWARD, KEY_STOP, KEY_FIND, KEY_SCROLLUP,
91 KEY_SCROLLDOWN, KEY_EDIT, KEY_SLEEP, KEY_SCREENLOCK, KEY_REFRESH,
92 KEY_CALC, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED
82}; 93};
83 94
84static void dc_scan_kbd(struct dc_kbd *kbd) 95static void dc_scan_kbd(struct dc_kbd *kbd)
@@ -128,12 +139,12 @@ static void dc_scan_kbd(struct dc_kbd *kbd)
128static void dc_kbd_callback(struct mapleq *mq) 139static void dc_kbd_callback(struct mapleq *mq)
129{ 140{
130 struct maple_device *mapledev = mq->dev; 141 struct maple_device *mapledev = mq->dev;
131 struct dc_kbd *kbd = mapledev->private_data; 142 struct dc_kbd *kbd = maple_get_drvdata(mapledev);
132 unsigned long *buf = mq->recvbuf; 143 unsigned long *buf = mq->recvbuf;
133 144
134 /* 145 /*
135 * We should always be getting the lock because the only 146 * We should always get the lock because the only
136 * time it may be locked if driver is in cleanup phase. 147 * time it may be locked is if the driver is in the cleanup phase.
137 */ 148 */
138 if (likely(mutex_trylock(&maple_keyb_mutex))) { 149 if (likely(mutex_trylock(&maple_keyb_mutex))) {
139 150
@@ -146,106 +157,96 @@ static void dc_kbd_callback(struct mapleq *mq)
146 } 157 }
147} 158}
148 159
149static int dc_kbd_connect(struct maple_device *mdev) 160static int probe_maple_kbd(struct device *dev)
150{ 161{
162 struct maple_device *mdev = to_maple_dev(dev);
163 struct maple_driver *mdrv = to_maple_driver(dev->driver);
151 int i, error; 164 int i, error;
152 struct dc_kbd *kbd; 165 struct dc_kbd *kbd;
153 struct input_dev *dev; 166 struct input_dev *idev;
154 167
155 if (!(mdev->function & MAPLE_FUNC_KEYBOARD)) 168 if (!(mdev->function & MAPLE_FUNC_KEYBOARD))
156 return -EINVAL; 169 return -EINVAL;
157 170
158 kbd = kzalloc(sizeof(struct dc_kbd), GFP_KERNEL); 171 kbd = kzalloc(sizeof(struct dc_kbd), GFP_KERNEL);
159 dev = input_allocate_device(); 172 idev = input_allocate_device();
160 if (!kbd || !dev) { 173 if (!kbd || !idev) {
161 error = -ENOMEM; 174 error = -ENOMEM;
162 goto fail; 175 goto fail;
163 } 176 }
164 177
165 mdev->private_data = kbd; 178 kbd->dev = idev;
166
167 kbd->dev = dev;
168 memcpy(kbd->keycode, dc_kbd_keycode, sizeof(kbd->keycode)); 179 memcpy(kbd->keycode, dc_kbd_keycode, sizeof(kbd->keycode));
169 180
170 dev->name = mdev->product_name; 181 idev->name = mdev->product_name;
171 dev->evbit[0] = BIT(EV_KEY) | BIT(EV_REP); 182 idev->evbit[0] = BIT(EV_KEY) | BIT(EV_REP);
172 dev->keycode = kbd->keycode; 183 idev->keycode = kbd->keycode;
173 dev->keycodesize = sizeof (unsigned short); 184 idev->keycodesize = sizeof(unsigned short);
174 dev->keycodemax = ARRAY_SIZE(kbd->keycode); 185 idev->keycodemax = ARRAY_SIZE(kbd->keycode);
175 dev->id.bustype = BUS_HOST; 186 idev->id.bustype = BUS_HOST;
176 dev->dev.parent = &mdev->dev; 187 idev->dev.parent = &mdev->dev;
177 188
178 for (i = 0; i < NR_SCANCODES; i++) 189 for (i = 0; i < NR_SCANCODES; i++)
179 __set_bit(dc_kbd_keycode[i], dev->keybit); 190 __set_bit(dc_kbd_keycode[i], idev->keybit);
180 __clear_bit(KEY_RESERVED, dev->keybit); 191 __clear_bit(KEY_RESERVED, idev->keybit);
181 192
182 input_set_capability(dev, EV_MSC, MSC_SCAN); 193 input_set_capability(idev, EV_MSC, MSC_SCAN);
183 input_set_drvdata(dev, kbd); 194 input_set_drvdata(idev, kbd);
184 195
185 error = input_register_device(dev); 196 error = input_register_device(idev);
186 if (error) 197 if (error)
187 goto fail; 198 goto fail;
188 199
189 /* Maple polling is locked to VBLANK - which may be just 50/s */ 200 /* Maple polling is locked to VBLANK - which may be just 50/s */
190 maple_getcond_callback(mdev, dc_kbd_callback, HZ/50, MAPLE_FUNC_KEYBOARD); 201 maple_getcond_callback(mdev, dc_kbd_callback, HZ/50,
191 return 0; 202 MAPLE_FUNC_KEYBOARD);
192 203
193 fail: 204 mdev->driver = mdrv;
194 input_free_device(dev); 205
206 maple_set_drvdata(mdev, kbd);
207
208 return error;
209
210fail:
211 input_free_device(idev);
195 kfree(kbd); 212 kfree(kbd);
196 mdev->private_data = NULL; 213 maple_set_drvdata(mdev, NULL);
197 return error; 214 return error;
198} 215}
199 216
200static void dc_kbd_disconnect(struct maple_device *mdev) 217static int remove_maple_kbd(struct device *dev)
201{ 218{
202 struct dc_kbd *kbd; 219 struct maple_device *mdev = to_maple_dev(dev);
220 struct dc_kbd *kbd = maple_get_drvdata(mdev);
203 221
204 mutex_lock(&maple_keyb_mutex); 222 mutex_lock(&maple_keyb_mutex);
205 223
206 kbd = mdev->private_data;
207 mdev->private_data = NULL;
208 input_unregister_device(kbd->dev); 224 input_unregister_device(kbd->dev);
209 kfree(kbd); 225 kfree(kbd);
210 226
211 mutex_unlock(&maple_keyb_mutex); 227 maple_set_drvdata(mdev, NULL);
212}
213
214/* allow the keyboard to be used */
215static int probe_maple_kbd(struct device *dev)
216{
217 struct maple_device *mdev = to_maple_dev(dev);
218 struct maple_driver *mdrv = to_maple_driver(dev->driver);
219 int error;
220
221 error = dc_kbd_connect(mdev);
222 if (error)
223 return error;
224
225 mdev->driver = mdrv;
226 mdev->registered = 1;
227 228
229 mutex_unlock(&maple_keyb_mutex);
228 return 0; 230 return 0;
229} 231}
230 232
231static struct maple_driver dc_kbd_driver = { 233static struct maple_driver dc_kbd_driver = {
232 .function = MAPLE_FUNC_KEYBOARD, 234 .function = MAPLE_FUNC_KEYBOARD,
233 .connect = dc_kbd_connect,
234 .disconnect = dc_kbd_disconnect,
235 .drv = { 235 .drv = {
236 .name = "Dreamcast_keyboard", 236 .name = "Dreamcast_keyboard",
237 .probe = probe_maple_kbd, 237 .probe = probe_maple_kbd,
238 }, 238 .remove = remove_maple_kbd,
239 },
239}; 240};
240 241
241static int __init dc_kbd_init(void) 242static int __init dc_kbd_init(void)
242{ 243{
243 return maple_driver_register(&dc_kbd_driver.drv); 244 return maple_driver_register(&dc_kbd_driver);
244} 245}
245 246
246static void __exit dc_kbd_exit(void) 247static void __exit dc_kbd_exit(void)
247{ 248{
248 driver_unregister(&dc_kbd_driver.drv); 249 maple_driver_unregister(&dc_kbd_driver);
249} 250}
250 251
251module_init(dc_kbd_init); 252module_init(dc_kbd_init);
diff --git a/drivers/input/keyboard/omap-keypad.c b/drivers/input/keyboard/omap-keypad.c
index 10afd2068068..dcea87a0bc56 100644
--- a/drivers/input/keyboard/omap-keypad.c
+++ b/drivers/input/keyboard/omap-keypad.c
@@ -34,14 +34,13 @@
34#include <linux/platform_device.h> 34#include <linux/platform_device.h>
35#include <linux/mutex.h> 35#include <linux/mutex.h>
36#include <linux/errno.h> 36#include <linux/errno.h>
37#include <asm/arch/gpio.h> 37#include <mach/gpio.h>
38#include <asm/arch/keypad.h> 38#include <mach/keypad.h>
39#include <asm/arch/menelaus.h> 39#include <mach/menelaus.h>
40#include <asm/irq.h> 40#include <asm/irq.h>
41#include <asm/hardware.h> 41#include <mach/hardware.h>
42#include <asm/io.h> 42#include <asm/io.h>
43#include <asm/mach-types.h> 43#include <mach/mux.h>
44#include <asm/arch/mux.h>
45 44
46#undef NEW_BOARD_LEARNING_MODE 45#undef NEW_BOARD_LEARNING_MODE
47 46
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index 6f1516f50750..6d30c6d334c3 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -26,12 +26,11 @@
26#include <linux/clk.h> 26#include <linux/clk.h>
27#include <linux/err.h> 27#include <linux/err.h>
28 28
29#include <asm/mach-types.h>
30#include <asm/mach/arch.h> 29#include <asm/mach/arch.h>
31#include <asm/mach/map.h> 30#include <asm/mach/map.h>
32 31
33#include <asm/arch/hardware.h> 32#include <mach/hardware.h>
34#include <asm/arch/pxa27x_keypad.h> 33#include <mach/pxa27x_keypad.h>
35 34
36/* 35/*
37 * Keypad Controller registers 36 * Keypad Controller registers
diff --git a/drivers/input/keyboard/spitzkbd.c b/drivers/input/keyboard/spitzkbd.c
index 1aa37181c40f..de67b8e0a799 100644
--- a/drivers/input/keyboard/spitzkbd.c
+++ b/drivers/input/keyboard/spitzkbd.c
@@ -20,10 +20,10 @@
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22 22
23#include <asm/arch/spitz.h> 23#include <mach/spitz.h>
24#include <asm/arch/hardware.h> 24#include <mach/hardware.h>
25#include <asm/arch/pxa-regs.h> 25#include <mach/pxa-regs.h>
26#include <asm/arch/pxa2xx-gpio.h> 26#include <mach/pxa2xx-gpio.h>
27 27
28#define KB_ROWS 7 28#define KB_ROWS 7
29#define KB_COLS 11 29#define KB_COLS 11
diff --git a/drivers/input/keyboard/tosakbd.c b/drivers/input/keyboard/tosakbd.c
index b12b7ee4b6aa..44cb50af3ce9 100644
--- a/drivers/input/keyboard/tosakbd.c
+++ b/drivers/input/keyboard/tosakbd.c
@@ -19,8 +19,8 @@
19#include <linux/delay.h> 19#include <linux/delay.h>
20#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21 21
22#include <asm/arch/gpio.h> 22#include <mach/gpio.h>
23#include <asm/arch/tosa.h> 23#include <mach/tosa.h>
24 24
25#define KB_ROWMASK(r) (1 << (r)) 25#define KB_ROWMASK(r) (1 << (r))
26#define SCANCODE(r, c) (((r)<<4) + (c) + 1) 26#define SCANCODE(r, c) (((r)<<4) + (c) + 1)
diff --git a/drivers/input/misc/ixp4xx-beeper.c b/drivers/input/misc/ixp4xx-beeper.c
index 798d84c44d03..9946d73624b9 100644
--- a/drivers/input/misc/ixp4xx-beeper.c
+++ b/drivers/input/misc/ixp4xx-beeper.c
@@ -20,7 +20,7 @@
20#include <linux/delay.h> 20#include <linux/delay.h>
21#include <linux/platform_device.h> 21#include <linux/platform_device.h>
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <asm/hardware.h> 23#include <mach/hardware.h>
24 24
25MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>"); 25MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
26MODULE_DESCRIPTION("ixp4xx beeper driver"); 26MODULE_DESCRIPTION("ixp4xx beeper driver");
diff --git a/drivers/input/mouse/rpcmouse.c b/drivers/input/mouse/rpcmouse.c
index 18a48636ba4a..56c079ef5018 100644
--- a/drivers/input/mouse/rpcmouse.c
+++ b/drivers/input/mouse/rpcmouse.c
@@ -23,7 +23,7 @@
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/input.h> 24#include <linux/input.h>
25 25
26#include <asm/hardware.h> 26#include <mach/hardware.h>
27#include <asm/irq.h> 27#include <asm/irq.h>
28#include <asm/io.h> 28#include <asm/io.h>
29#include <asm/hardware/iomd.h> 29#include <asm/hardware/iomd.h>
diff --git a/drivers/input/serio/i8042-sparcio.h b/drivers/input/serio/i8042-sparcio.h
index d9ca55891cd7..66bafe308b0c 100644
--- a/drivers/input/serio/i8042-sparcio.h
+++ b/drivers/input/serio/i8042-sparcio.h
@@ -41,6 +41,8 @@ static inline void i8042_write_command(int val)
41 writeb(val, kbd_iobase + 0x64UL); 41 writeb(val, kbd_iobase + 0x64UL);
42} 42}
43 43
44#ifdef CONFIG_PCI
45
44#define OBP_PS2KBD_NAME1 "kb_ps2" 46#define OBP_PS2KBD_NAME1 "kb_ps2"
45#define OBP_PS2KBD_NAME2 "keyboard" 47#define OBP_PS2KBD_NAME2 "keyboard"
46#define OBP_PS2MS_NAME1 "kdmouse" 48#define OBP_PS2MS_NAME1 "kdmouse"
@@ -101,9 +103,6 @@ static struct of_platform_driver sparc_i8042_driver = {
101 103
102static int __init i8042_platform_init(void) 104static int __init i8042_platform_init(void)
103{ 105{
104#ifndef CONFIG_PCI
105 return -ENODEV;
106#else
107 struct device_node *root = of_find_node_by_path("/"); 106 struct device_node *root = of_find_node_by_path("/");
108 107
109 if (!strcmp(root->name, "SUNW,JavaStation-1")) { 108 if (!strcmp(root->name, "SUNW,JavaStation-1")) {
@@ -131,17 +130,25 @@ static int __init i8042_platform_init(void)
131 i8042_reset = 1; 130 i8042_reset = 1;
132 131
133 return 0; 132 return 0;
134#endif /* CONFIG_PCI */
135} 133}
136 134
137static inline void i8042_platform_exit(void) 135static inline void i8042_platform_exit(void)
138{ 136{
139#ifdef CONFIG_PCI
140 struct device_node *root = of_find_node_by_path("/"); 137 struct device_node *root = of_find_node_by_path("/");
141 138
142 if (strcmp(root->name, "SUNW,JavaStation-1")) 139 if (strcmp(root->name, "SUNW,JavaStation-1"))
143 of_unregister_driver(&sparc_i8042_driver); 140 of_unregister_driver(&sparc_i8042_driver);
144#endif
145} 141}
146 142
143#else /* !CONFIG_PCI */
144static int __init i8042_platform_init(void)
145{
146 return -ENODEV;
147}
148
149static inline void i8042_platform_exit(void)
150{
151}
152#endif /* !CONFIG_PCI */
153
147#endif /* _I8042_SPARCIO_H */ 154#endif /* _I8042_SPARCIO_H */
diff --git a/drivers/input/serio/rpckbd.c b/drivers/input/serio/rpckbd.c
index 1567b7782478..7f36edd34f8b 100644
--- a/drivers/input/serio/rpckbd.c
+++ b/drivers/input/serio/rpckbd.c
@@ -35,7 +35,7 @@
35#include <linux/platform_device.h> 35#include <linux/platform_device.h>
36 36
37#include <asm/irq.h> 37#include <asm/irq.h>
38#include <asm/hardware.h> 38#include <mach/hardware.h>
39#include <asm/io.h> 39#include <asm/io.h>
40#include <asm/hardware/iomd.h> 40#include <asm/hardware/iomd.h>
41#include <asm/system.h> 41#include <asm/system.h>
diff --git a/drivers/input/touchscreen/corgi_ts.c b/drivers/input/touchscreen/corgi_ts.c
index d0e13fc4a88c..65202c9f63ff 100644
--- a/drivers/input/touchscreen/corgi_ts.c
+++ b/drivers/input/touchscreen/corgi_ts.c
@@ -19,10 +19,10 @@
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/irq.h> 20#include <linux/irq.h>
21 21
22#include <asm/arch/sharpsl.h> 22#include <mach/sharpsl.h>
23#include <asm/arch/hardware.h> 23#include <mach/hardware.h>
24#include <asm/arch/pxa-regs.h> 24#include <mach/pxa-regs.h>
25#include <asm/arch/pxa2xx-gpio.h> 25#include <mach/pxa2xx-gpio.h>
26 26
27 27
28#define PWR_MODE_ACTIVE 0 28#define PWR_MODE_ACTIVE 0
diff --git a/drivers/input/touchscreen/h3600_ts_input.c b/drivers/input/touchscreen/h3600_ts_input.c
index 4f86081dc7fc..4d3139e2099d 100644
--- a/drivers/input/touchscreen/h3600_ts_input.c
+++ b/drivers/input/touchscreen/h3600_ts_input.c
@@ -39,8 +39,8 @@
39#include <linux/delay.h> 39#include <linux/delay.h>
40 40
41/* SA1100 serial defines */ 41/* SA1100 serial defines */
42#include <asm/arch/hardware.h> 42#include <mach/hardware.h>
43#include <asm/arch/irqs.h> 43#include <mach/irqs.h>
44 44
45#define DRIVER_DESC "H3600 touchscreen driver" 45#define DRIVER_DESC "H3600 touchscreen driver"
46 46
diff --git a/drivers/input/touchscreen/jornada720_ts.c b/drivers/input/touchscreen/jornada720_ts.c
index 1aca108b1031..bf44f9d68342 100644
--- a/drivers/input/touchscreen/jornada720_ts.c
+++ b/drivers/input/touchscreen/jornada720_ts.c
@@ -19,8 +19,8 @@
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/module.h> 20#include <linux/module.h>
21 21
22#include <asm/hardware.h> 22#include <mach/hardware.h>
23#include <asm/arch/jornada720.h> 23#include <mach/jornada720.h>
24 24
25MODULE_AUTHOR("Kristoffer Ericson <kristoffer.ericson@gmail.com>"); 25MODULE_AUTHOR("Kristoffer Ericson <kristoffer.ericson@gmail.com>");
26MODULE_DESCRIPTION("HP Jornada 710/720/728 touchscreen driver"); 26MODULE_DESCRIPTION("HP Jornada 710/720/728 touchscreen driver");
diff --git a/drivers/input/touchscreen/mainstone-wm97xx.c b/drivers/input/touchscreen/mainstone-wm97xx.c
index 590a1379aa32..283f93a0cee2 100644
--- a/drivers/input/touchscreen/mainstone-wm97xx.c
+++ b/drivers/input/touchscreen/mainstone-wm97xx.c
@@ -33,7 +33,7 @@
33#include <linux/interrupt.h> 33#include <linux/interrupt.h>
34#include <linux/wm97xx.h> 34#include <linux/wm97xx.h>
35#include <linux/io.h> 35#include <linux/io.h>
36#include <asm/arch/pxa-regs.h> 36#include <mach/pxa-regs.h>
37 37
38#define VERSION "0.13" 38#define VERSION "0.13"
39 39
diff --git a/drivers/isdn/Makefile b/drivers/isdn/Makefile
index 8380a4568d11..f1f777570e8e 100644
--- a/drivers/isdn/Makefile
+++ b/drivers/isdn/Makefile
@@ -5,7 +5,7 @@
5obj-$(CONFIG_ISDN_I4L) += i4l/ 5obj-$(CONFIG_ISDN_I4L) += i4l/
6obj-$(CONFIG_ISDN_CAPI) += capi/ 6obj-$(CONFIG_ISDN_CAPI) += capi/
7obj-$(CONFIG_MISDN) += mISDN/ 7obj-$(CONFIG_MISDN) += mISDN/
8obj-$(CONFIG_ISDN_CAPI) += hardware/ 8obj-$(CONFIG_ISDN) += hardware/
9obj-$(CONFIG_ISDN_DIVERSION) += divert/ 9obj-$(CONFIG_ISDN_DIVERSION) += divert/
10obj-$(CONFIG_ISDN_DRV_HISAX) += hisax/ 10obj-$(CONFIG_ISDN_DRV_HISAX) += hisax/
11obj-$(CONFIG_ISDN_DRV_ICN) += icn/ 11obj-$(CONFIG_ISDN_DRV_ICN) += icn/
diff --git a/drivers/isdn/gigaset/isocdata.c b/drivers/isdn/gigaset/isocdata.c
index e30a7773f93c..fbce5222d83c 100644
--- a/drivers/isdn/gigaset/isocdata.c
+++ b/drivers/isdn/gigaset/isocdata.c
@@ -247,7 +247,6 @@ static inline void dump_bytes(enum debuglevel level, const char *tag,
247#ifdef CONFIG_GIGASET_DEBUG 247#ifdef CONFIG_GIGASET_DEBUG
248 unsigned char c; 248 unsigned char c;
249 static char dbgline[3 * 32 + 1]; 249 static char dbgline[3 * 32 + 1];
250 static const char hexdigit[] = "0123456789abcdef";
251 int i = 0; 250 int i = 0;
252 while (count-- > 0) { 251 while (count-- > 0) {
253 if (i > sizeof(dbgline) - 4) { 252 if (i > sizeof(dbgline) - 4) {
@@ -258,8 +257,8 @@ static inline void dump_bytes(enum debuglevel level, const char *tag,
258 c = *bytes++; 257 c = *bytes++;
259 dbgline[i] = (i && !(i % 12)) ? '-' : ' '; 258 dbgline[i] = (i && !(i % 12)) ? '-' : ' ';
260 i++; 259 i++;
261 dbgline[i++] = hexdigit[(c >> 4) & 0x0f]; 260 dbgline[i++] = hex_asc_hi(c);
262 dbgline[i++] = hexdigit[c & 0x0f]; 261 dbgline[i++] = hex_asc_lo(c);
263 } 262 }
264 dbgline[i] = '\0'; 263 dbgline[i] = '\0';
265 gig_dbg(level, "%s:%s", tag, dbgline); 264 gig_dbg(level, "%s:%s", tag, dbgline);
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
index 2649ea55a9e8..1eac03f39d00 100644
--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -140,7 +140,7 @@
140 * #define HFC_REGISTER_DEBUG 140 * #define HFC_REGISTER_DEBUG
141 */ 141 */
142 142
143static const char *hfcmulti_revision = "2.00"; 143static const char *hfcmulti_revision = "2.02";
144 144
145#include <linux/module.h> 145#include <linux/module.h>
146#include <linux/pci.h> 146#include <linux/pci.h>
@@ -427,12 +427,12 @@ write_fifo_regio(struct hfc_multi *hc, u_char *data, int len)
427{ 427{
428 outb(A_FIFO_DATA0, (hc->pci_iobase)+4); 428 outb(A_FIFO_DATA0, (hc->pci_iobase)+4);
429 while (len>>2) { 429 while (len>>2) {
430 outl(*(u32 *)data, hc->pci_iobase); 430 outl(cpu_to_le32(*(u32 *)data), hc->pci_iobase);
431 data += 4; 431 data += 4;
432 len -= 4; 432 len -= 4;
433 } 433 }
434 while (len>>1) { 434 while (len>>1) {
435 outw(*(u16 *)data, hc->pci_iobase); 435 outw(cpu_to_le16(*(u16 *)data), hc->pci_iobase);
436 data += 2; 436 data += 2;
437 len -= 2; 437 len -= 2;
438 } 438 }
@@ -447,17 +447,19 @@ void
447write_fifo_pcimem(struct hfc_multi *hc, u_char *data, int len) 447write_fifo_pcimem(struct hfc_multi *hc, u_char *data, int len)
448{ 448{
449 while (len>>2) { 449 while (len>>2) {
450 writel(*(u32 *)data, (hc->pci_membase)+A_FIFO_DATA0); 450 writel(cpu_to_le32(*(u32 *)data),
451 hc->pci_membase + A_FIFO_DATA0);
451 data += 4; 452 data += 4;
452 len -= 4; 453 len -= 4;
453 } 454 }
454 while (len>>1) { 455 while (len>>1) {
455 writew(*(u16 *)data, (hc->pci_membase)+A_FIFO_DATA0); 456 writew(cpu_to_le16(*(u16 *)data),
457 hc->pci_membase + A_FIFO_DATA0);
456 data += 2; 458 data += 2;
457 len -= 2; 459 len -= 2;
458 } 460 }
459 while (len) { 461 while (len) {
460 writeb(*data, (hc->pci_membase)+A_FIFO_DATA0); 462 writeb(*data, hc->pci_membase + A_FIFO_DATA0);
461 data++; 463 data++;
462 len--; 464 len--;
463 } 465 }
@@ -468,12 +470,12 @@ read_fifo_regio(struct hfc_multi *hc, u_char *data, int len)
468{ 470{
469 outb(A_FIFO_DATA0, (hc->pci_iobase)+4); 471 outb(A_FIFO_DATA0, (hc->pci_iobase)+4);
470 while (len>>2) { 472 while (len>>2) {
471 *(u32 *)data = inl(hc->pci_iobase); 473 *(u32 *)data = le32_to_cpu(inl(hc->pci_iobase));
472 data += 4; 474 data += 4;
473 len -= 4; 475 len -= 4;
474 } 476 }
475 while (len>>1) { 477 while (len>>1) {
476 *(u16 *)data = inw(hc->pci_iobase); 478 *(u16 *)data = le16_to_cpu(inw(hc->pci_iobase));
477 data += 2; 479 data += 2;
478 len -= 2; 480 len -= 2;
479 } 481 }
@@ -490,18 +492,18 @@ read_fifo_pcimem(struct hfc_multi *hc, u_char *data, int len)
490{ 492{
491 while (len>>2) { 493 while (len>>2) {
492 *(u32 *)data = 494 *(u32 *)data =
493 readl((hc->pci_membase)+A_FIFO_DATA0); 495 le32_to_cpu(readl(hc->pci_membase + A_FIFO_DATA0));
494 data += 4; 496 data += 4;
495 len -= 4; 497 len -= 4;
496 } 498 }
497 while (len>>1) { 499 while (len>>1) {
498 *(u16 *)data = 500 *(u16 *)data =
499 readw((hc->pci_membase)+A_FIFO_DATA0); 501 le16_to_cpu(readw(hc->pci_membase + A_FIFO_DATA0));
500 data += 2; 502 data += 2;
501 len -= 2; 503 len -= 2;
502 } 504 }
503 while (len) { 505 while (len) {
504 *data = readb((hc->pci_membase)+A_FIFO_DATA0); 506 *data = readb(hc->pci_membase + A_FIFO_DATA0);
505 data++; 507 data++;
506 len--; 508 len--;
507 } 509 }
@@ -3971,7 +3973,7 @@ open_bchannel(struct hfc_multi *hc, struct dchannel *dch,
3971 struct bchannel *bch; 3973 struct bchannel *bch;
3972 int ch; 3974 int ch;
3973 3975
3974 if (!test_bit(rq->adr.channel, &dch->dev.channelmap[0])) 3976 if (!test_channelmap(rq->adr.channel, dch->dev.channelmap))
3975 return -EINVAL; 3977 return -EINVAL;
3976 if (rq->protocol == ISDN_P_NONE) 3978 if (rq->protocol == ISDN_P_NONE)
3977 return -EINVAL; 3979 return -EINVAL;
@@ -4587,7 +4589,7 @@ init_e1_port(struct hfc_multi *hc, struct hm_map *m)
4587 list_add(&bch->ch.list, &dch->dev.bchannels); 4589 list_add(&bch->ch.list, &dch->dev.bchannels);
4588 hc->chan[ch].bch = bch; 4590 hc->chan[ch].bch = bch;
4589 hc->chan[ch].port = 0; 4591 hc->chan[ch].port = 0;
4590 test_and_set_bit(bch->nr, &dch->dev.channelmap[0]); 4592 set_channelmap(bch->nr, dch->dev.channelmap);
4591 } 4593 }
4592 /* set optical line type */ 4594 /* set optical line type */
4593 if (port[Port_cnt] & 0x001) { 4595 if (port[Port_cnt] & 0x001) {
@@ -4755,7 +4757,7 @@ init_multi_port(struct hfc_multi *hc, int pt)
4755 list_add(&bch->ch.list, &dch->dev.bchannels); 4757 list_add(&bch->ch.list, &dch->dev.bchannels);
4756 hc->chan[i + ch].bch = bch; 4758 hc->chan[i + ch].bch = bch;
4757 hc->chan[i + ch].port = pt; 4759 hc->chan[i + ch].port = pt;
4758 test_and_set_bit(bch->nr, &dch->dev.channelmap[0]); 4760 set_channelmap(bch->nr, dch->dev.channelmap);
4759 } 4761 }
4760 /* set master clock */ 4762 /* set master clock */
4761 if (port[Port_cnt] & 0x001) { 4763 if (port[Port_cnt] & 0x001) {
@@ -5050,12 +5052,12 @@ static void __devexit hfc_remove_pci(struct pci_dev *pdev)
5050 5052
5051static const struct hm_map hfcm_map[] = { 5053static const struct hm_map hfcm_map[] = {
5052/*0*/ {VENDOR_BN, "HFC-1S Card (mini PCI)", 4, 1, 1, 3, 0, DIP_4S, 0}, 5054/*0*/ {VENDOR_BN, "HFC-1S Card (mini PCI)", 4, 1, 1, 3, 0, DIP_4S, 0},
5053/*1*/ {VENDOR_BN, "HFC-2S Card", 4, 2, 1, 3, 0, DIP_4S}, 5055/*1*/ {VENDOR_BN, "HFC-2S Card", 4, 2, 1, 3, 0, DIP_4S, 0},
5054/*2*/ {VENDOR_BN, "HFC-2S Card (mini PCI)", 4, 2, 1, 3, 0, DIP_4S, 0}, 5056/*2*/ {VENDOR_BN, "HFC-2S Card (mini PCI)", 4, 2, 1, 3, 0, DIP_4S, 0},
5055/*3*/ {VENDOR_BN, "HFC-4S Card", 4, 4, 1, 2, 0, DIP_4S, 0}, 5057/*3*/ {VENDOR_BN, "HFC-4S Card", 4, 4, 1, 2, 0, DIP_4S, 0},
5056/*4*/ {VENDOR_BN, "HFC-4S Card (mini PCI)", 4, 4, 1, 2, 0, 0, 0}, 5058/*4*/ {VENDOR_BN, "HFC-4S Card (mini PCI)", 4, 4, 1, 2, 0, 0, 0},
5057/*5*/ {VENDOR_CCD, "HFC-4S Eval (old)", 4, 4, 0, 0, 0, 0, 0}, 5059/*5*/ {VENDOR_CCD, "HFC-4S Eval (old)", 4, 4, 0, 0, 0, 0, 0},
5058/*6*/ {VENDOR_CCD, "HFC-4S IOB4ST", 4, 4, 1, 2, 0, 0, 0}, 5060/*6*/ {VENDOR_CCD, "HFC-4S IOB4ST", 4, 4, 1, 2, 0, DIP_4S, 0},
5059/*7*/ {VENDOR_CCD, "HFC-4S", 4, 4, 1, 2, 0, 0, 0}, 5061/*7*/ {VENDOR_CCD, "HFC-4S", 4, 4, 1, 2, 0, 0, 0},
5060/*8*/ {VENDOR_DIG, "HFC-4S Card", 4, 4, 0, 2, 0, 0, HFC_IO_MODE_REGIO}, 5062/*8*/ {VENDOR_DIG, "HFC-4S Card", 4, 4, 0, 2, 0, 0, HFC_IO_MODE_REGIO},
5061/*9*/ {VENDOR_CCD, "HFC-4S Swyx 4xS0 SX2 QuadBri", 4, 4, 1, 2, 0, 0, 0}, 5063/*9*/ {VENDOR_CCD, "HFC-4S Swyx 4xS0 SX2 QuadBri", 4, 4, 1, 2, 0, 0, 0},
@@ -5251,9 +5253,6 @@ HFCmulti_init(void)
5251 if (debug & DEBUG_HFCMULTI_INIT) 5253 if (debug & DEBUG_HFCMULTI_INIT)
5252 printk(KERN_DEBUG "%s: init entered\n", __func__); 5254 printk(KERN_DEBUG "%s: init entered\n", __func__);
5253 5255
5254#ifdef __BIG_ENDIAN
5255#error "not running on big endian machines now"
5256#endif
5257 hfc_interrupt = symbol_get(ztdummy_extern_interrupt); 5256 hfc_interrupt = symbol_get(ztdummy_extern_interrupt);
5258 register_interrupt = symbol_get(ztdummy_register_interrupt); 5257 register_interrupt = symbol_get(ztdummy_register_interrupt);
5259 unregister_interrupt = symbol_get(ztdummy_unregister_interrupt); 5258 unregister_interrupt = symbol_get(ztdummy_unregister_interrupt);
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index 3231814e7efa..9cf5edbb1a9b 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -2056,7 +2056,7 @@ setup_card(struct hfc_pci *card)
2056 card->dch.dev.nrbchan = 2; 2056 card->dch.dev.nrbchan = 2;
2057 for (i = 0; i < 2; i++) { 2057 for (i = 0; i < 2; i++) {
2058 card->bch[i].nr = i + 1; 2058 card->bch[i].nr = i + 1;
2059 test_and_set_bit(i + 1, &card->dch.dev.channelmap[0]); 2059 set_channelmap(i + 1, card->dch.dev.channelmap);
2060 card->bch[i].debug = debug; 2060 card->bch[i].debug = debug;
2061 mISDN_initbchannel(&card->bch[i], MAX_DATA_MEM); 2061 mISDN_initbchannel(&card->bch[i], MAX_DATA_MEM);
2062 card->bch[i].hw = card; 2062 card->bch[i].hw = card;
diff --git a/drivers/isdn/hysdn/hysdn_pof.h b/drivers/isdn/hysdn/hysdn_pof.h
index a368d6caca0e..3a72b908900f 100644
--- a/drivers/isdn/hysdn/hysdn_pof.h
+++ b/drivers/isdn/hysdn/hysdn_pof.h
@@ -60,7 +60,7 @@ typedef struct PofRecHdr_tag { /* Pof record header */
60 60
61typedef struct PofTimeStamp_tag { 61typedef struct PofTimeStamp_tag {
62/*00 */ unsigned long UnixTime __attribute__((packed)); 62/*00 */ unsigned long UnixTime __attribute__((packed));
63 /*04 */ unsigned char DateTimeText[0x28] __attribute__((packed)); 63 /*04 */ unsigned char DateTimeText[0x28];
64 /* =40 */ 64 /* =40 */
65/*2C */ 65/*2C */
66} tPofTimeStamp; 66} tPofTimeStamp;
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
index 155b99780c4f..e42150a57780 100644
--- a/drivers/isdn/mISDN/l1oip_core.c
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -1006,8 +1006,7 @@ open_bchannel(struct l1oip *hc, struct dchannel *dch, struct channel_req *rq)
1006 struct bchannel *bch; 1006 struct bchannel *bch;
1007 int ch; 1007 int ch;
1008 1008
1009 if (!test_bit(rq->adr.channel & 0x1f, 1009 if (!test_channelmap(rq->adr.channel, dch->dev.channelmap))
1010 &dch->dev.channelmap[rq->adr.channel >> 5]))
1011 return -EINVAL; 1010 return -EINVAL;
1012 if (rq->protocol == ISDN_P_NONE) 1011 if (rq->protocol == ISDN_P_NONE)
1013 return -EINVAL; 1012 return -EINVAL;
@@ -1412,8 +1411,7 @@ init_card(struct l1oip *hc, int pri, int bundle)
1412 bch->ch.nr = i + ch; 1411 bch->ch.nr = i + ch;
1413 list_add(&bch->ch.list, &dch->dev.bchannels); 1412 list_add(&bch->ch.list, &dch->dev.bchannels);
1414 hc->chan[i + ch].bch = bch; 1413 hc->chan[i + ch].bch = bch;
1415 test_and_set_bit(bch->nr & 0x1f, 1414 set_channelmap(bch->nr, dch->dev.channelmap);
1416 &dch->dev.channelmap[bch->nr >> 5]);
1417 } 1415 }
1418 ret = mISDN_register_device(&dch->dev, hc->name); 1416 ret = mISDN_register_device(&dch->dev, hc->name);
1419 if (ret) 1417 if (ret)
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index 4ba4cc364c9e..e5a20f9542d1 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -379,7 +379,7 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
379 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols(); 379 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
380 di.protocol = dev->D.protocol; 380 di.protocol = dev->D.protocol;
381 memcpy(di.channelmap, dev->channelmap, 381 memcpy(di.channelmap, dev->channelmap,
382 MISDN_CHMAP_SIZE * 4); 382 sizeof(di.channelmap));
383 di.nrbchan = dev->nrbchan; 383 di.nrbchan = dev->nrbchan;
384 strcpy(di.name, dev->name); 384 strcpy(di.name, dev->name);
385 if (copy_to_user((void __user *)arg, &di, sizeof(di))) 385 if (copy_to_user((void __user *)arg, &di, sizeof(di)))
@@ -637,7 +637,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
637 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols(); 637 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
638 di.protocol = dev->D.protocol; 638 di.protocol = dev->D.protocol;
639 memcpy(di.channelmap, dev->channelmap, 639 memcpy(di.channelmap, dev->channelmap,
640 MISDN_CHMAP_SIZE * 4); 640 sizeof(di.channelmap));
641 di.nrbchan = dev->nrbchan; 641 di.nrbchan = dev->nrbchan;
642 strcpy(di.name, dev->name); 642 strcpy(di.name, dev->name);
643 if (copy_to_user((void __user *)arg, &di, sizeof(di))) 643 if (copy_to_user((void __user *)arg, &di, sizeof(di)))
diff --git a/drivers/leds/leds-ams-delta.c b/drivers/leds/leds-ams-delta.c
index c37bb0d5a0c5..32c98b2efa3f 100644
--- a/drivers/leds/leds-ams-delta.c
+++ b/drivers/leds/leds-ams-delta.c
@@ -12,7 +12,7 @@
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/platform_device.h> 13#include <linux/platform_device.h>
14#include <linux/leds.h> 14#include <linux/leds.h>
15#include <asm/arch/board-ams-delta.h> 15#include <mach/board-ams-delta.h>
16 16
17/* 17/*
18 * Our context 18 * Our context
diff --git a/drivers/leds/leds-cm-x270.c b/drivers/leds/leds-cm-x270.c
index accc7eddb788..836a43d776e6 100644
--- a/drivers/leds/leds-cm-x270.c
+++ b/drivers/leds/leds-cm-x270.c
@@ -18,8 +18,8 @@
18#include <linux/platform_device.h> 18#include <linux/platform_device.h>
19#include <linux/leds.h> 19#include <linux/leds.h>
20 20
21#include <asm/arch/hardware.h> 21#include <mach/hardware.h>
22#include <asm/arch/pxa-regs.h> 22#include <mach/pxa-regs.h>
23 23
24#define GPIO_RED_LED (93) 24#define GPIO_RED_LED (93)
25#define GPIO_GREEN_LED (94) 25#define GPIO_GREEN_LED (94)
diff --git a/drivers/leds/leds-corgi.c b/drivers/leds/leds-corgi.c
index a709704b9f93..bc2dcd89f635 100644
--- a/drivers/leds/leds-corgi.c
+++ b/drivers/leds/leds-corgi.c
@@ -15,10 +15,9 @@
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/platform_device.h> 16#include <linux/platform_device.h>
17#include <linux/leds.h> 17#include <linux/leds.h>
18#include <asm/mach-types.h> 18#include <mach/corgi.h>
19#include <asm/arch/corgi.h> 19#include <mach/hardware.h>
20#include <asm/arch/hardware.h> 20#include <mach/pxa-regs.h>
21#include <asm/arch/pxa-regs.h>
22#include <asm/hardware/scoop.h> 21#include <asm/hardware/scoop.h>
23 22
24static void corgiled_amber_set(struct led_classdev *led_cdev, 23static void corgiled_amber_set(struct led_classdev *led_cdev,
diff --git a/drivers/leds/leds-fsg.c b/drivers/leds/leds-fsg.c
index a7421b8c47d8..be0e12144b8b 100644
--- a/drivers/leds/leds-fsg.c
+++ b/drivers/leds/leds-fsg.c
@@ -19,7 +19,7 @@
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/leds.h> 21#include <linux/leds.h>
22#include <asm/arch/hardware.h> 22#include <mach/hardware.h>
23#include <asm/io.h> 23#include <asm/io.h>
24 24
25static short __iomem *latch_address; 25static short __iomem *latch_address;
diff --git a/drivers/leds/leds-h1940.c b/drivers/leds/leds-h1940.c
index 73c705021686..11b77a70bbcb 100644
--- a/drivers/leds/leds-h1940.c
+++ b/drivers/leds/leds-h1940.c
@@ -16,9 +16,9 @@
16#include <linux/string.h> 16#include <linux/string.h>
17#include <linux/ctype.h> 17#include <linux/ctype.h>
18#include <linux/leds.h> 18#include <linux/leds.h>
19#include <asm/arch/regs-gpio.h> 19#include <mach/regs-gpio.h>
20#include <asm/hardware.h> 20#include <mach/hardware.h>
21#include <asm/arch/h1940-latch.h> 21#include <mach/h1940-latch.h>
22 22
23/* 23/*
24 * Green led. 24 * Green led.
diff --git a/drivers/leds/leds-locomo.c b/drivers/leds/leds-locomo.c
index 7295f7f52185..5d91362e3066 100644
--- a/drivers/leds/leds-locomo.c
+++ b/drivers/leds/leds-locomo.c
@@ -13,7 +13,7 @@
13#include <linux/device.h> 13#include <linux/device.h>
14#include <linux/leds.h> 14#include <linux/leds.h>
15 15
16#include <asm/hardware.h> 16#include <mach/hardware.h>
17#include <asm/hardware/locomo.h> 17#include <asm/hardware/locomo.h>
18 18
19static void locomoled_brightness_set(struct led_classdev *led_cdev, 19static void locomoled_brightness_set(struct led_classdev *led_cdev,
diff --git a/drivers/leds/leds-s3c24xx.c b/drivers/leds/leds-s3c24xx.c
index d4f5021dccbf..25a07f2643ad 100644
--- a/drivers/leds/leds-s3c24xx.c
+++ b/drivers/leds/leds-s3c24xx.c
@@ -16,9 +16,9 @@
16#include <linux/platform_device.h> 16#include <linux/platform_device.h>
17#include <linux/leds.h> 17#include <linux/leds.h>
18 18
19#include <asm/hardware.h> 19#include <mach/hardware.h>
20#include <asm/arch/regs-gpio.h> 20#include <mach/regs-gpio.h>
21#include <asm/arch/leds-gpio.h> 21#include <mach/leds-gpio.h>
22 22
23/* our context */ 23/* our context */
24 24
diff --git a/drivers/leds/leds-spitz.c b/drivers/leds/leds-spitz.c
index e75e8543bc5a..178831c64bfb 100644
--- a/drivers/leds/leds-spitz.c
+++ b/drivers/leds/leds-spitz.c
@@ -17,9 +17,9 @@
17#include <linux/leds.h> 17#include <linux/leds.h>
18#include <asm/hardware/scoop.h> 18#include <asm/hardware/scoop.h>
19#include <asm/mach-types.h> 19#include <asm/mach-types.h>
20#include <asm/arch/hardware.h> 20#include <mach/hardware.h>
21#include <asm/arch/pxa-regs.h> 21#include <mach/pxa-regs.h>
22#include <asm/arch/spitz.h> 22#include <mach/spitz.h>
23 23
24static void spitzled_amber_set(struct led_classdev *led_cdev, 24static void spitzled_amber_set(struct led_classdev *led_cdev,
25 enum led_brightness value) 25 enum led_brightness value)
diff --git a/drivers/macintosh/mediabay.c b/drivers/macintosh/mediabay.c
index b1e5b4705250..d7e46d345d9e 100644
--- a/drivers/macintosh/mediabay.c
+++ b/drivers/macintosh/mediabay.c
@@ -16,7 +16,6 @@
16#include <linux/delay.h> 16#include <linux/delay.h>
17#include <linux/sched.h> 17#include <linux/sched.h>
18#include <linux/timer.h> 18#include <linux/timer.h>
19#include <linux/hdreg.h>
20#include <linux/stddef.h> 19#include <linux/stddef.h>
21#include <linux/init.h> 20#include <linux/init.h>
22#include <linux/ide.h> 21#include <linux/ide.h>
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 621a272a2c74..7e65bad522cb 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1234,7 +1234,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
1234 case 0: 1234 case 0:
1235 bitmap_file_set_bit(bitmap, offset); 1235 bitmap_file_set_bit(bitmap, offset);
1236 bitmap_count_page(bitmap,offset, 1); 1236 bitmap_count_page(bitmap,offset, 1);
1237 blk_plug_device(bitmap->mddev->queue); 1237 blk_plug_device_unlocked(bitmap->mddev->queue);
1238 /* fall through */ 1238 /* fall through */
1239 case 1: 1239 case 1:
1240 *bmc = 2; 1240 *bmc = 2;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 798e468103b8..61f441409234 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -316,29 +316,12 @@ static inline int check_space(struct dm_table *t)
316 */ 316 */
317static int lookup_device(const char *path, dev_t *dev) 317static int lookup_device(const char *path, dev_t *dev)
318{ 318{
319 int r; 319 struct block_device *bdev = lookup_bdev(path);
320 struct nameidata nd; 320 if (IS_ERR(bdev))
321 struct inode *inode; 321 return PTR_ERR(bdev);
322 322 *dev = bdev->bd_dev;
323 if ((r = path_lookup(path, LOOKUP_FOLLOW, &nd))) 323 bdput(bdev);
324 return r; 324 return 0;
325
326 inode = nd.path.dentry->d_inode;
327 if (!inode) {
328 r = -ENOENT;
329 goto out;
330 }
331
332 if (!S_ISBLK(inode->i_mode)) {
333 r = -ENOTBLK;
334 goto out;
335 }
336
337 *dev = inode->i_rdev;
338
339 out:
340 path_put(&nd.path);
341 return r;
342} 325}
343 326
344/* 327/*
diff --git a/drivers/md/md.c b/drivers/md/md.c
index c2ff77ccec50..c7aae66c6f9b 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -3483,7 +3483,7 @@ static void md_safemode_timeout(unsigned long data)
3483 if (!atomic_read(&mddev->writes_pending)) { 3483 if (!atomic_read(&mddev->writes_pending)) {
3484 mddev->safemode = 1; 3484 mddev->safemode = 1;
3485 if (mddev->external) 3485 if (mddev->external)
3486 sysfs_notify(&mddev->kobj, NULL, "array_state"); 3486 set_bit(MD_NOTIFY_ARRAY_STATE, &mddev->flags);
3487 } 3487 }
3488 md_wakeup_thread(mddev->thread); 3488 md_wakeup_thread(mddev->thread);
3489} 3489}
@@ -5996,7 +5996,8 @@ static int remove_and_add_spares(mddev_t *mddev)
5996 if (mddev->degraded) { 5996 if (mddev->degraded) {
5997 rdev_for_each(rdev, rtmp, mddev) { 5997 rdev_for_each(rdev, rtmp, mddev) {
5998 if (rdev->raid_disk >= 0 && 5998 if (rdev->raid_disk >= 0 &&
5999 !test_bit(In_sync, &rdev->flags)) 5999 !test_bit(In_sync, &rdev->flags) &&
6000 !test_bit(Blocked, &rdev->flags))
6000 spares++; 6001 spares++;
6001 if (rdev->raid_disk < 0 6002 if (rdev->raid_disk < 0
6002 && !test_bit(Faulty, &rdev->flags)) { 6003 && !test_bit(Faulty, &rdev->flags)) {
@@ -6051,6 +6052,9 @@ void md_check_recovery(mddev_t *mddev)
6051 if (mddev->bitmap) 6052 if (mddev->bitmap)
6052 bitmap_daemon_work(mddev->bitmap); 6053 bitmap_daemon_work(mddev->bitmap);
6053 6054
6055 if (test_and_clear_bit(MD_NOTIFY_ARRAY_STATE, &mddev->flags))
6056 sysfs_notify(&mddev->kobj, NULL, "array_state");
6057
6054 if (mddev->ro) 6058 if (mddev->ro)
6055 return; 6059 return;
6056 6060
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 159535d73567..d41bebb6da0f 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -215,6 +215,9 @@ static void reschedule_retry(r10bio_t *r10_bio)
215 conf->nr_queued ++; 215 conf->nr_queued ++;
216 spin_unlock_irqrestore(&conf->device_lock, flags); 216 spin_unlock_irqrestore(&conf->device_lock, flags);
217 217
218 /* wake up frozen array... */
219 wake_up(&conf->wait_barrier);
220
218 md_wakeup_thread(mddev->thread); 221 md_wakeup_thread(mddev->thread);
219} 222}
220 223
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 55e7c56045a0..40e939675657 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2507,7 +2507,7 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
2507 * 2507 *
2508 */ 2508 */
2509 2509
2510static void handle_stripe5(struct stripe_head *sh) 2510static bool handle_stripe5(struct stripe_head *sh)
2511{ 2511{
2512 raid5_conf_t *conf = sh->raid_conf; 2512 raid5_conf_t *conf = sh->raid_conf;
2513 int disks = sh->disks, i; 2513 int disks = sh->disks, i;
@@ -2717,10 +2717,11 @@ static void handle_stripe5(struct stripe_head *sh)
2717 if (sh->reconstruct_state == reconstruct_state_result) { 2717 if (sh->reconstruct_state == reconstruct_state_result) {
2718 sh->reconstruct_state = reconstruct_state_idle; 2718 sh->reconstruct_state = reconstruct_state_idle;
2719 clear_bit(STRIPE_EXPANDING, &sh->state); 2719 clear_bit(STRIPE_EXPANDING, &sh->state);
2720 for (i = conf->raid_disks; i--; ) 2720 for (i = conf->raid_disks; i--; ) {
2721 set_bit(R5_Wantwrite, &sh->dev[i].flags); 2721 set_bit(R5_Wantwrite, &sh->dev[i].flags);
2722 set_bit(R5_LOCKED, &dev->flags); 2722 set_bit(R5_LOCKED, &sh->dev[i].flags);
2723 s.locked++; 2723 s.locked++;
2724 }
2724 } 2725 }
2725 2726
2726 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) && 2727 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
@@ -2754,9 +2755,11 @@ static void handle_stripe5(struct stripe_head *sh)
2754 ops_run_io(sh, &s); 2755 ops_run_io(sh, &s);
2755 2756
2756 return_io(return_bi); 2757 return_io(return_bi);
2758
2759 return blocked_rdev == NULL;
2757} 2760}
2758 2761
2759static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page) 2762static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
2760{ 2763{
2761 raid6_conf_t *conf = sh->raid_conf; 2764 raid6_conf_t *conf = sh->raid_conf;
2762 int disks = sh->disks; 2765 int disks = sh->disks;
@@ -2967,14 +2970,17 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
2967 ops_run_io(sh, &s); 2970 ops_run_io(sh, &s);
2968 2971
2969 return_io(return_bi); 2972 return_io(return_bi);
2973
2974 return blocked_rdev == NULL;
2970} 2975}
2971 2976
2972static void handle_stripe(struct stripe_head *sh, struct page *tmp_page) 2977/* returns true if the stripe was handled */
2978static bool handle_stripe(struct stripe_head *sh, struct page *tmp_page)
2973{ 2979{
2974 if (sh->raid_conf->level == 6) 2980 if (sh->raid_conf->level == 6)
2975 handle_stripe6(sh, tmp_page); 2981 return handle_stripe6(sh, tmp_page);
2976 else 2982 else
2977 handle_stripe5(sh); 2983 return handle_stripe5(sh);
2978} 2984}
2979 2985
2980 2986
@@ -3692,7 +3698,9 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski
3692 clear_bit(STRIPE_INSYNC, &sh->state); 3698 clear_bit(STRIPE_INSYNC, &sh->state);
3693 spin_unlock(&sh->lock); 3699 spin_unlock(&sh->lock);
3694 3700
3695 handle_stripe(sh, NULL); 3701 /* wait for any blocked device to be handled */
3702 while(unlikely(!handle_stripe(sh, NULL)))
3703 ;
3696 release_stripe(sh); 3704 release_stripe(sh);
3697 3705
3698 return STRIPE_SECTORS; 3706 return STRIPE_SECTORS;
@@ -3811,10 +3819,8 @@ static void raid5d(mddev_t *mddev)
3811 3819
3812 sh = __get_priority_stripe(conf); 3820 sh = __get_priority_stripe(conf);
3813 3821
3814 if (!sh) { 3822 if (!sh)
3815 async_tx_issue_pending_all();
3816 break; 3823 break;
3817 }
3818 spin_unlock_irq(&conf->device_lock); 3824 spin_unlock_irq(&conf->device_lock);
3819 3825
3820 handled++; 3826 handled++;
@@ -3827,6 +3833,7 @@ static void raid5d(mddev_t *mddev)
3827 3833
3828 spin_unlock_irq(&conf->device_lock); 3834 spin_unlock_irq(&conf->device_lock);
3829 3835
3836 async_tx_issue_pending_all();
3830 unplug_slaves(mddev); 3837 unplug_slaves(mddev);
3831 3838
3832 pr_debug("--- raid5d inactive\n"); 3839 pr_debug("--- raid5d inactive\n");
diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
index 578afce6884c..aaa0b6f0b521 100644
--- a/drivers/media/dvb/dvb-usb/cxusb.c
+++ b/drivers/media/dvb/dvb-usb/cxusb.c
@@ -565,7 +565,8 @@ static int cxusb_lgh064f_tuner_attach(struct dvb_usb_adapter *adap)
565 565
566static int dvico_bluebird_xc2028_callback(void *ptr, int command, int arg) 566static int dvico_bluebird_xc2028_callback(void *ptr, int command, int arg)
567{ 567{
568 struct dvb_usb_device *d = ptr; 568 struct dvb_usb_adapter *adap = ptr;
569 struct dvb_usb_device *d = adap->dev;
569 570
570 switch (command) { 571 switch (command) {
571 case XC2028_TUNER_RESET: 572 case XC2028_TUNER_RESET:
@@ -593,9 +594,9 @@ static int cxusb_dvico_xc3028_tuner_attach(struct dvb_usb_adapter *adap)
593 .callback = dvico_bluebird_xc2028_callback, 594 .callback = dvico_bluebird_xc2028_callback,
594 }; 595 };
595 static struct xc2028_ctrl ctl = { 596 static struct xc2028_ctrl ctl = {
596 .fname = "xc3028-dvico-au-01.fw", 597 .fname = "xc3028-v27.fw",
597 .max_len = 64, 598 .max_len = 64,
598 .scode_table = XC3028_FE_ZARLINK456, 599 .demod = XC3028_FE_ZARLINK456,
599 }; 600 };
600 601
601 fe = dvb_attach(xc2028_attach, adap->fe, &cfg); 602 fe = dvb_attach(xc2028_attach, adap->fe, &cfg);
diff --git a/drivers/media/dvb/frontends/Kconfig b/drivers/media/dvb/frontends/Kconfig
index 574dffe91b68..7dbb4a223c99 100644
--- a/drivers/media/dvb/frontends/Kconfig
+++ b/drivers/media/dvb/frontends/Kconfig
@@ -135,9 +135,8 @@ config DVB_CX22702
135 135
136config DVB_DRX397XD 136config DVB_DRX397XD
137 tristate "Micronas DRX3975D/DRX3977D based" 137 tristate "Micronas DRX3975D/DRX3977D based"
138 depends on DVB_CORE && I2C && HOTPLUG 138 depends on DVB_CORE && I2C
139 default m if DVB_FE_CUSTOMISE 139 default m if DVB_FE_CUSTOMISE
140 select FW_LOADER
141 help 140 help
142 A DVB-T tuner module. Say Y when you want to support this frontend. 141 A DVB-T tuner module. Say Y when you want to support this frontend.
143 142
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index d4a6e56a7135..ecbfa1b39b70 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -630,7 +630,7 @@ config VIDEO_ZORAN_ZR36060
630 depends on VIDEO_ZORAN 630 depends on VIDEO_ZORAN
631 help 631 help
632 Say Y to support Zoran boards based on 36060 chips. 632 Say Y to support Zoran boards based on 36060 chips.
633 This includes Iomega Bus, Pinnacle DC10, Linux media Labs 33 633 This includes Iomega Buz, Pinnacle DC10, Linux media Labs 33
634 and 33 R10 and AverMedia 6 boards. 634 and 33 R10 and AverMedia 6 boards.
635 635
636config VIDEO_ZORAN_BUZ 636config VIDEO_ZORAN_BUZ
diff --git a/drivers/media/video/arv.c b/drivers/media/video/arv.c
index 56ebfd5ef6fa..9e436ad3d34b 100644
--- a/drivers/media/video/arv.c
+++ b/drivers/media/video/arv.c
@@ -29,6 +29,7 @@
29#include <linux/sched.h> 29#include <linux/sched.h>
30#include <linux/videodev.h> 30#include <linux/videodev.h>
31#include <media/v4l2-common.h> 31#include <media/v4l2-common.h>
32#include <media/v4l2-ioctl.h>
32#include <linux/mutex.h> 33#include <linux/mutex.h>
33 34
34#include <asm/uaccess.h> 35#include <asm/uaccess.h>
@@ -755,7 +756,6 @@ static const struct file_operations ar_fops = {
755 756
756static struct video_device ar_template = { 757static struct video_device ar_template = {
757 .name = "Colour AR VGA", 758 .name = "Colour AR VGA",
758 .type = VID_TYPE_CAPTURE,
759 .fops = &ar_fops, 759 .fops = &ar_fops,
760 .release = ar_release, 760 .release = ar_release,
761 .minor = -1, 761 .minor = -1,
diff --git a/drivers/media/video/cs5345.c b/drivers/media/video/cs5345.c
index 61d14d26686f..a662b15d5b90 100644
--- a/drivers/media/video/cs5345.c
+++ b/drivers/media/video/cs5345.c
@@ -35,7 +35,7 @@ static int debug;
35 35
36module_param(debug, bool, 0644); 36module_param(debug, bool, 0644);
37 37
38MODULE_PARM_DESC(debug, "Debugging messages\n\t\t\t0=Off (default), 1=On"); 38MODULE_PARM_DESC(debug, "Debugging messages, 0=Off (default), 1=On");
39 39
40 40
41/* ----------------------------------------------------------------------- */ 41/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/video/cs53l32a.c b/drivers/media/video/cs53l32a.c
index e30a589c0e18..c4444500b330 100644
--- a/drivers/media/video/cs53l32a.c
+++ b/drivers/media/video/cs53l32a.c
@@ -39,7 +39,7 @@ static int debug;
39 39
40module_param(debug, bool, 0644); 40module_param(debug, bool, 0644);
41 41
42MODULE_PARM_DESC(debug, "Debugging messages\n\t\t\t0=Off (default), 1=On"); 42MODULE_PARM_DESC(debug, "Debugging messages, 0=Off (default), 1=On");
43 43
44static unsigned short normal_i2c[] = { 0x22 >> 1, I2C_CLIENT_END }; 44static unsigned short normal_i2c[] = { 0x22 >> 1, I2C_CLIENT_END };
45 45
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index 476ae44a62d2..452da70e719f 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -1015,6 +1015,7 @@ struct em28xx_board em28xx_boards[] = {
1015 .valid = EM28XX_BOARD_NOT_VALIDATED, 1015 .valid = EM28XX_BOARD_NOT_VALIDATED,
1016 .vchannels = 3, 1016 .vchannels = 3,
1017 .tuner_type = TUNER_XC2028, 1017 .tuner_type = TUNER_XC2028,
1018 .mts_firmware = 1,
1018 .decoder = EM28XX_TVP5150, 1019 .decoder = EM28XX_TVP5150,
1019 .input = { { 1020 .input = { {
1020 .type = EM28XX_VMUX_TELEVISION, 1021 .type = EM28XX_VMUX_TELEVISION,
diff --git a/drivers/media/video/gspca/conex.c b/drivers/media/video/gspca/conex.c
index 44b0bffeb20e..cd3a3f5829b2 100644
--- a/drivers/media/video/gspca/conex.c
+++ b/drivers/media/video/gspca/conex.c
@@ -123,7 +123,7 @@ static void reg_r(struct gspca_dev *gspca_dev,
123{ 123{
124 struct usb_device *dev = gspca_dev->dev; 124 struct usb_device *dev = gspca_dev->dev;
125 125
126#ifdef CONFIG_VIDEO_ADV_DEBUG 126#ifdef GSPCA_DEBUG
127 if (len > sizeof gspca_dev->usb_buf) { 127 if (len > sizeof gspca_dev->usb_buf) {
128 err("reg_r: buffer overflow"); 128 err("reg_r: buffer overflow");
129 return; 129 return;
@@ -163,7 +163,7 @@ static void reg_w(struct gspca_dev *gspca_dev,
163{ 163{
164 struct usb_device *dev = gspca_dev->dev; 164 struct usb_device *dev = gspca_dev->dev;
165 165
166#ifdef CONFIG_VIDEO_ADV_DEBUG 166#ifdef GSPCA_DEBUG
167 if (len > sizeof gspca_dev->usb_buf) { 167 if (len > sizeof gspca_dev->usb_buf) {
168 err("reg_w: buffer overflow"); 168 err("reg_w: buffer overflow");
169 return; 169 return;
diff --git a/drivers/media/video/gspca/etoms.c b/drivers/media/video/gspca/etoms.c
index c8c2f02fcf00..1dbe92d01e6a 100644
--- a/drivers/media/video/gspca/etoms.c
+++ b/drivers/media/video/gspca/etoms.c
@@ -233,7 +233,7 @@ static void reg_r(struct gspca_dev *gspca_dev,
233{ 233{
234 struct usb_device *dev = gspca_dev->dev; 234 struct usb_device *dev = gspca_dev->dev;
235 235
236#ifdef CONFIG_VIDEO_ADV_DEBUG 236#ifdef GSPCA_DEBUG
237 if (len > sizeof gspca_dev->usb_buf) { 237 if (len > sizeof gspca_dev->usb_buf) {
238 err("reg_r: buffer overflow"); 238 err("reg_r: buffer overflow");
239 return; 239 return;
@@ -271,7 +271,7 @@ static void reg_w(struct gspca_dev *gspca_dev,
271{ 271{
272 struct usb_device *dev = gspca_dev->dev; 272 struct usb_device *dev = gspca_dev->dev;
273 273
274#ifdef CONFIG_VIDEO_ADV_DEBUG 274#ifdef GSPCA_DEBUG
275 if (len > sizeof gspca_dev->usb_buf) { 275 if (len > sizeof gspca_dev->usb_buf) {
276 err("reg_w: buffer overflow"); 276 err("reg_w: buffer overflow");
277 return; 277 return;
@@ -461,6 +461,52 @@ static void Et_init2(struct gspca_dev *gspca_dev)
461 reg_w_val(gspca_dev, 0x80, 0x20); /* 0x20; */ 461 reg_w_val(gspca_dev, 0x80, 0x20); /* 0x20; */
462} 462}
463 463
464static void setbrightness(struct gspca_dev *gspca_dev)
465{
466 struct sd *sd = (struct sd *) gspca_dev;
467 int i;
468 __u8 brightness = sd->brightness;
469
470 for (i = 0; i < 4; i++)
471 reg_w_val(gspca_dev, ET_O_RED + i, brightness);
472}
473
474static void getbrightness(struct gspca_dev *gspca_dev)
475{
476 struct sd *sd = (struct sd *) gspca_dev;
477 int i;
478 int brightness = 0;
479
480 for (i = 0; i < 4; i++) {
481 reg_r(gspca_dev, ET_O_RED + i, 1);
482 brightness += gspca_dev->usb_buf[0];
483 }
484 sd->brightness = brightness >> 3;
485}
486
487static void setcontrast(struct gspca_dev *gspca_dev)
488{
489 struct sd *sd = (struct sd *) gspca_dev;
490 __u8 RGBG[] = { 0x80, 0x80, 0x80, 0x80, 0x00, 0x00 };
491 __u8 contrast = sd->contrast;
492
493 memset(RGBG, contrast, sizeof(RGBG) - 2);
494 reg_w(gspca_dev, ET_G_RED, RGBG, 6);
495}
496
497static void getcontrast(struct gspca_dev *gspca_dev)
498{
499 struct sd *sd = (struct sd *) gspca_dev;
500 int i;
501 int contrast = 0;
502
503 for (i = 0; i < 4; i++) {
504 reg_r(gspca_dev, ET_G_RED + i, 1);
505 contrast += gspca_dev->usb_buf[0];
506 }
507 sd->contrast = contrast >> 2;
508}
509
464static void setcolors(struct gspca_dev *gspca_dev) 510static void setcolors(struct gspca_dev *gspca_dev)
465{ 511{
466 struct sd *sd = (struct sd *) gspca_dev; 512 struct sd *sd = (struct sd *) gspca_dev;
@@ -492,6 +538,16 @@ static void getcolors(struct gspca_dev *gspca_dev)
492 } 538 }
493} 539}
494 540
541static void setautogain(struct gspca_dev *gspca_dev)
542{
543 struct sd *sd = (struct sd *) gspca_dev;
544
545 if (sd->autogain)
546 sd->ag_cnt = AG_CNT_START;
547 else
548 sd->ag_cnt = -1;
549}
550
495static void Et_init1(struct gspca_dev *gspca_dev) 551static void Et_init1(struct gspca_dev *gspca_dev)
496{ 552{
497 __u8 value; 553 __u8 value;
@@ -614,6 +670,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
614 sd->contrast = CONTRAST_DEF; 670 sd->contrast = CONTRAST_DEF;
615 sd->colors = COLOR_DEF; 671 sd->colors = COLOR_DEF;
616 sd->autogain = AUTOGAIN_DEF; 672 sd->autogain = AUTOGAIN_DEF;
673 sd->ag_cnt = -1;
617 return 0; 674 return 0;
618} 675}
619 676
@@ -641,6 +698,8 @@ static void sd_start(struct gspca_dev *gspca_dev)
641 else 698 else
642 Et_init2(gspca_dev); 699 Et_init2(gspca_dev);
643 700
701 setautogain(gspca_dev);
702
644 reg_w_val(gspca_dev, ET_RESET_ALL, 0x08); 703 reg_w_val(gspca_dev, ET_RESET_ALL, 0x08);
645 et_video(gspca_dev, 1); /* video on */ 704 et_video(gspca_dev, 1); /* video on */
646} 705}
@@ -658,52 +717,6 @@ static void sd_close(struct gspca_dev *gspca_dev)
658{ 717{
659} 718}
660 719
661static void setbrightness(struct gspca_dev *gspca_dev)
662{
663 struct sd *sd = (struct sd *) gspca_dev;
664 int i;
665 __u8 brightness = sd->brightness;
666
667 for (i = 0; i < 4; i++)
668 reg_w_val(gspca_dev, ET_O_RED + i, brightness);
669}
670
671static void getbrightness(struct gspca_dev *gspca_dev)
672{
673 struct sd *sd = (struct sd *) gspca_dev;
674 int i;
675 int brightness = 0;
676
677 for (i = 0; i < 4; i++) {
678 reg_r(gspca_dev, ET_O_RED + i, 1);
679 brightness += gspca_dev->usb_buf[0];
680 }
681 sd->brightness = brightness >> 3;
682}
683
684static void setcontrast(struct gspca_dev *gspca_dev)
685{
686 struct sd *sd = (struct sd *) gspca_dev;
687 __u8 RGBG[] = { 0x80, 0x80, 0x80, 0x80, 0x00, 0x00 };
688 __u8 contrast = sd->contrast;
689
690 memset(RGBG, contrast, sizeof(RGBG) - 2);
691 reg_w(gspca_dev, ET_G_RED, RGBG, 6);
692}
693
694static void getcontrast(struct gspca_dev *gspca_dev)
695{
696 struct sd *sd = (struct sd *) gspca_dev;
697 int i;
698 int contrast = 0;
699
700 for (i = 0; i < 4; i++) {
701 reg_r(gspca_dev, ET_G_RED + i, 1);
702 contrast += gspca_dev->usb_buf[0];
703 }
704 sd->contrast = contrast >> 2;
705}
706
707static __u8 Et_getgainG(struct gspca_dev *gspca_dev) 720static __u8 Et_getgainG(struct gspca_dev *gspca_dev)
708{ 721{
709 struct sd *sd = (struct sd *) gspca_dev; 722 struct sd *sd = (struct sd *) gspca_dev;
@@ -733,15 +746,22 @@ static void Et_setgainG(struct gspca_dev *gspca_dev, __u8 gain)
733#define LIMIT(color) \ 746#define LIMIT(color) \
734 (unsigned char)((color > 0xff)?0xff:((color < 0)?0:color)) 747 (unsigned char)((color > 0xff)?0xff:((color < 0)?0:color))
735 748
736static void setautogain(struct gspca_dev *gspca_dev) 749static void do_autogain(struct gspca_dev *gspca_dev)
737{ 750{
738 __u8 luma = 0; 751 struct sd *sd = (struct sd *) gspca_dev;
752 __u8 luma;
739 __u8 luma_mean = 128; 753 __u8 luma_mean = 128;
740 __u8 luma_delta = 20; 754 __u8 luma_delta = 20;
741 __u8 spring = 4; 755 __u8 spring = 4;
742 int Gbright = 0; 756 int Gbright;
743 __u8 r, g, b; 757 __u8 r, g, b;
744 758
759 if (sd->ag_cnt < 0)
760 return;
761 if (--sd->ag_cnt >= 0)
762 return;
763 sd->ag_cnt = AG_CNT_START;
764
745 Gbright = Et_getgainG(gspca_dev); 765 Gbright = Et_getgainG(gspca_dev);
746 reg_r(gspca_dev, ET_LUMA_CENTER, 4); 766 reg_r(gspca_dev, ET_LUMA_CENTER, 4);
747 g = (gspca_dev->usb_buf[0] + gspca_dev->usb_buf[3]) >> 1; 767 g = (gspca_dev->usb_buf[0] + gspca_dev->usb_buf[3]) >> 1;
@@ -768,7 +788,6 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
768 __u8 *data, /* isoc packet */ 788 __u8 *data, /* isoc packet */
769 int len) /* iso packet length */ 789 int len) /* iso packet length */
770{ 790{
771 struct sd *sd;
772 int seqframe; 791 int seqframe;
773 792
774 seqframe = data[0] & 0x3f; 793 seqframe = data[0] & 0x3f;
@@ -783,13 +802,6 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
783 frame = gspca_frame_add(gspca_dev, LAST_PACKET, frame, 802 frame = gspca_frame_add(gspca_dev, LAST_PACKET, frame,
784 data, 0); 803 data, 0);
785 gspca_frame_add(gspca_dev, FIRST_PACKET, frame, data, len); 804 gspca_frame_add(gspca_dev, FIRST_PACKET, frame, data, len);
786 sd = (struct sd *) gspca_dev;
787 if (sd->ag_cnt >= 0) {
788 if (--sd->ag_cnt < 0) {
789 sd->ag_cnt = AG_CNT_START;
790 setautogain(gspca_dev);
791 }
792 }
793 return; 805 return;
794 } 806 }
795 if (len) { 807 if (len) {
@@ -862,10 +874,8 @@ static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val)
862 struct sd *sd = (struct sd *) gspca_dev; 874 struct sd *sd = (struct sd *) gspca_dev;
863 875
864 sd->autogain = val; 876 sd->autogain = val;
865 if (val) 877 if (gspca_dev->streaming)
866 sd->ag_cnt = AG_CNT_START; 878 setautogain(gspca_dev);
867 else
868 sd->ag_cnt = -1;
869 return 0; 879 return 0;
870} 880}
871 881
@@ -889,6 +899,7 @@ static struct sd_desc sd_desc = {
889 .stop0 = sd_stop0, 899 .stop0 = sd_stop0,
890 .close = sd_close, 900 .close = sd_close,
891 .pkt_scan = sd_pkt_scan, 901 .pkt_scan = sd_pkt_scan,
902 .dq_callback = do_autogain,
892}; 903};
893 904
894/* -- module initialisation -- */ 905/* -- module initialisation -- */
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index 3a051c925ff6..15d302b28b79 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -47,7 +47,7 @@ MODULE_LICENSE("GPL");
47 47
48static int video_nr = -1; 48static int video_nr = -1;
49 49
50#ifdef CONFIG_VIDEO_ADV_DEBUG 50#ifdef GSPCA_DEBUG
51int gspca_debug = D_ERR | D_PROBE; 51int gspca_debug = D_ERR | D_PROBE;
52EXPORT_SYMBOL(gspca_debug); 52EXPORT_SYMBOL(gspca_debug);
53 53
@@ -677,7 +677,7 @@ static int try_fmt_vid_cap(struct gspca_dev *gspca_dev,
677 w = fmt->fmt.pix.width; 677 w = fmt->fmt.pix.width;
678 h = fmt->fmt.pix.height; 678 h = fmt->fmt.pix.height;
679 679
680#ifdef CONFIG_VIDEO_ADV_DEBUG 680#ifdef GSPCA_DEBUG
681 if (gspca_debug & D_CONF) 681 if (gspca_debug & D_CONF)
682 PDEBUG_MODE("try fmt cap", fmt->fmt.pix.pixelformat, w, h); 682 PDEBUG_MODE("try fmt cap", fmt->fmt.pix.pixelformat, w, h);
683#endif 683#endif
@@ -785,7 +785,7 @@ static int dev_open(struct inode *inode, struct file *file)
785 } 785 }
786 gspca_dev->users++; 786 gspca_dev->users++;
787 file->private_data = gspca_dev; 787 file->private_data = gspca_dev;
788#ifdef CONFIG_VIDEO_ADV_DEBUG 788#ifdef GSPCA_DEBUG
789 /* activate the v4l2 debug */ 789 /* activate the v4l2 debug */
790 if (gspca_debug & D_V4L2) 790 if (gspca_debug & D_V4L2)
791 gspca_dev->vdev.debug |= 3; 791 gspca_dev->vdev.debug |= 3;
@@ -904,7 +904,7 @@ static int vidioc_s_ctrl(struct file *file, void *priv,
904 if (ctrl->id != ctrls->qctrl.id) 904 if (ctrl->id != ctrls->qctrl.id)
905 continue; 905 continue;
906 if (ctrl->value < ctrls->qctrl.minimum 906 if (ctrl->value < ctrls->qctrl.minimum
907 && ctrl->value > ctrls->qctrl.maximum) 907 || ctrl->value > ctrls->qctrl.maximum)
908 return -ERANGE; 908 return -ERANGE;
909 PDEBUG(D_CONF, "set ctrl [%08x] = %d", ctrl->id, ctrl->value); 909 PDEBUG(D_CONF, "set ctrl [%08x] = %d", ctrl->id, ctrl->value);
910 if (mutex_lock_interruptible(&gspca_dev->usb_lock)) 910 if (mutex_lock_interruptible(&gspca_dev->usb_lock))
@@ -1080,7 +1080,7 @@ static int vidioc_streamon(struct file *file, void *priv,
1080 if (ret < 0) 1080 if (ret < 0)
1081 goto out; 1081 goto out;
1082 } 1082 }
1083#ifdef CONFIG_VIDEO_ADV_DEBUG 1083#ifdef GSPCA_DEBUG
1084 if (gspca_debug & D_STREAM) { 1084 if (gspca_debug & D_STREAM) {
1085 PDEBUG_MODE("stream on OK", 1085 PDEBUG_MODE("stream on OK",
1086 gspca_dev->pixfmt, 1086 gspca_dev->pixfmt,
@@ -1913,7 +1913,7 @@ static void __exit gspca_exit(void)
1913module_init(gspca_init); 1913module_init(gspca_init);
1914module_exit(gspca_exit); 1914module_exit(gspca_exit);
1915 1915
1916#ifdef CONFIG_VIDEO_ADV_DEBUG 1916#ifdef GSPCA_DEBUG
1917module_param_named(debug, gspca_debug, int, 0644); 1917module_param_named(debug, gspca_debug, int, 0644);
1918MODULE_PARM_DESC(debug, 1918MODULE_PARM_DESC(debug,
1919 "Debug (bit) 0x01:error 0x02:probe 0x04:config" 1919 "Debug (bit) 0x01:error 0x02:probe 0x04:config"
diff --git a/drivers/media/video/gspca/gspca.h b/drivers/media/video/gspca/gspca.h
index 3fd2c4eee204..67e448940eaa 100644
--- a/drivers/media/video/gspca/gspca.h
+++ b/drivers/media/video/gspca/gspca.h
@@ -9,7 +9,10 @@
9#include <media/v4l2-common.h> 9#include <media/v4l2-common.h>
10#include <linux/mutex.h> 10#include <linux/mutex.h>
11 11
12#ifdef CONFIG_VIDEO_ADV_DEBUG 12/* compilation option */
13#define GSPCA_DEBUG 1
14
15#ifdef GSPCA_DEBUG
13/* GSPCA our debug messages */ 16/* GSPCA our debug messages */
14extern int gspca_debug; 17extern int gspca_debug;
15#define PDEBUG(level, fmt, args...) \ 18#define PDEBUG(level, fmt, args...) \
diff --git a/drivers/media/video/gspca/ov519.c b/drivers/media/video/gspca/ov519.c
index 83139efc4629..b4f00ec0885c 100644
--- a/drivers/media/video/gspca/ov519.c
+++ b/drivers/media/video/gspca/ov519.c
@@ -40,14 +40,15 @@ struct sd {
40 struct gspca_dev gspca_dev; /* !! must be the first item */ 40 struct gspca_dev gspca_dev; /* !! must be the first item */
41 41
42 /* Determined by sensor type */ 42 /* Determined by sensor type */
43 short maxwidth; 43 char sif;
44 short maxheight;
45 44
46 unsigned char primary_i2c_slave; /* I2C write id of sensor */ 45 unsigned char primary_i2c_slave; /* I2C write id of sensor */
47 46
48 unsigned char brightness; 47 unsigned char brightness;
49 unsigned char contrast; 48 unsigned char contrast;
50 unsigned char colors; 49 unsigned char colors;
50 __u8 hflip;
51 __u8 vflip;
51 52
52 char compress; /* Should the next frame be compressed? */ 53 char compress; /* Should the next frame be compressed? */
53 char compress_inited; /* Are compression params uploaded? */ 54 char compress_inited; /* Are compression params uploaded? */
@@ -77,9 +78,12 @@ static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val);
77static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val); 78static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val);
78static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val); 79static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val);
79static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val); 80static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val);
81static int sd_sethflip(struct gspca_dev *gspca_dev, __s32 val);
82static int sd_gethflip(struct gspca_dev *gspca_dev, __s32 *val);
83static int sd_setvflip(struct gspca_dev *gspca_dev, __s32 val);
84static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val);
80 85
81static struct ctrl sd_ctrls[] = { 86static struct ctrl sd_ctrls[] = {
82#define SD_BRIGHTNESS 0
83 { 87 {
84 { 88 {
85 .id = V4L2_CID_BRIGHTNESS, 89 .id = V4L2_CID_BRIGHTNESS,
@@ -88,12 +92,12 @@ static struct ctrl sd_ctrls[] = {
88 .minimum = 0, 92 .minimum = 0,
89 .maximum = 255, 93 .maximum = 255,
90 .step = 1, 94 .step = 1,
91 .default_value = 127, 95#define BRIGHTNESS_DEF 127
96 .default_value = BRIGHTNESS_DEF,
92 }, 97 },
93 .set = sd_setbrightness, 98 .set = sd_setbrightness,
94 .get = sd_getbrightness, 99 .get = sd_getbrightness,
95 }, 100 },
96#define SD_CONTRAST 1
97 { 101 {
98 { 102 {
99 .id = V4L2_CID_CONTRAST, 103 .id = V4L2_CID_CONTRAST,
@@ -102,31 +106,61 @@ static struct ctrl sd_ctrls[] = {
102 .minimum = 0, 106 .minimum = 0,
103 .maximum = 255, 107 .maximum = 255,
104 .step = 1, 108 .step = 1,
105 .default_value = 127, 109#define CONTRAST_DEF 127
110 .default_value = CONTRAST_DEF,
106 }, 111 },
107 .set = sd_setcontrast, 112 .set = sd_setcontrast,
108 .get = sd_getcontrast, 113 .get = sd_getcontrast,
109 }, 114 },
110#define SD_COLOR 2
111 { 115 {
112 { 116 {
113 .id = V4L2_CID_SATURATION, 117 .id = V4L2_CID_SATURATION,
114 .type = V4L2_CTRL_TYPE_INTEGER, 118 .type = V4L2_CTRL_TYPE_INTEGER,
115 .name = "Saturation", 119 .name = "Color",
116 .minimum = 0, 120 .minimum = 0,
117 .maximum = 255, 121 .maximum = 255,
118 .step = 1, 122 .step = 1,
119 .default_value = 127, 123#define COLOR_DEF 127
124 .default_value = COLOR_DEF,
120 }, 125 },
121 .set = sd_setcolors, 126 .set = sd_setcolors,
122 .get = sd_getcolors, 127 .get = sd_getcolors,
123 }, 128 },
129/* next controls work with ov7670 only */
130 {
131 {
132 .id = V4L2_CID_HFLIP,
133 .type = V4L2_CTRL_TYPE_BOOLEAN,
134 .name = "Mirror",
135 .minimum = 0,
136 .maximum = 1,
137 .step = 1,
138#define HFLIP_DEF 0
139 .default_value = HFLIP_DEF,
140 },
141 .set = sd_sethflip,
142 .get = sd_gethflip,
143 },
144 {
145 {
146 .id = V4L2_CID_VFLIP,
147 .type = V4L2_CTRL_TYPE_BOOLEAN,
148 .name = "Vflip",
149 .minimum = 0,
150 .maximum = 1,
151 .step = 1,
152#define VFLIP_DEF 0
153 .default_value = VFLIP_DEF,
154 },
155 .set = sd_setvflip,
156 .get = sd_getvflip,
157 },
124}; 158};
125 159
126static struct v4l2_pix_format vga_mode[] = { 160static struct v4l2_pix_format vga_mode[] = {
127 {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, 161 {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
128 .bytesperline = 320, 162 .bytesperline = 320,
129 .sizeimage = 320 * 240 * 3 / 8 + 589, 163 .sizeimage = 320 * 240 * 3 / 8 + 590,
130 .colorspace = V4L2_COLORSPACE_JPEG, 164 .colorspace = V4L2_COLORSPACE_JPEG,
131 .priv = 1}, 165 .priv = 1},
132 {640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, 166 {640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
@@ -138,12 +172,12 @@ static struct v4l2_pix_format vga_mode[] = {
138static struct v4l2_pix_format sif_mode[] = { 172static struct v4l2_pix_format sif_mode[] = {
139 {176, 144, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, 173 {176, 144, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
140 .bytesperline = 176, 174 .bytesperline = 176,
141 .sizeimage = 176 * 144 * 3 / 8 + 589, 175 .sizeimage = 176 * 144 * 3 / 8 + 590,
142 .colorspace = V4L2_COLORSPACE_JPEG, 176 .colorspace = V4L2_COLORSPACE_JPEG,
143 .priv = 1}, 177 .priv = 1},
144 {352, 288, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, 178 {352, 288, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
145 .bytesperline = 352, 179 .bytesperline = 352,
146 .sizeimage = 352 * 288 * 3 / 8 + 589, 180 .sizeimage = 352 * 288 * 3 / 8 + 590,
147 .colorspace = V4L2_COLORSPACE_JPEG, 181 .colorspace = V4L2_COLORSPACE_JPEG,
148 .priv = 0}, 182 .priv = 0},
149}; 183};
@@ -225,6 +259,7 @@ static struct v4l2_pix_format sif_mode[] = {
225#define OV7670_REG_VSTART 0x19 /* Vert start high bits */ 259#define OV7670_REG_VSTART 0x19 /* Vert start high bits */
226#define OV7670_REG_VSTOP 0x1a /* Vert stop high bits */ 260#define OV7670_REG_VSTOP 0x1a /* Vert stop high bits */
227#define OV7670_REG_MVFP 0x1e /* Mirror / vflip */ 261#define OV7670_REG_MVFP 0x1e /* Mirror / vflip */
262#define OV7670_MVFP_VFLIP 0x10 /* vertical flip */
228#define OV7670_MVFP_MIRROR 0x20 /* Mirror image */ 263#define OV7670_MVFP_MIRROR 0x20 /* Mirror image */
229#define OV7670_REG_AEW 0x24 /* AGC upper limit */ 264#define OV7670_REG_AEW 0x24 /* AGC upper limit */
230#define OV7670_REG_AEB 0x25 /* AGC lower limit */ 265#define OV7670_REG_AEB 0x25 /* AGC lower limit */
@@ -258,16 +293,6 @@ static struct v4l2_pix_format sif_mode[] = {
258#define OV7670_REG_HAECC7 0xaa /* Hist AEC/AGC control 7 */ 293#define OV7670_REG_HAECC7 0xaa /* Hist AEC/AGC control 7 */
259#define OV7670_REG_BD60MAX 0xab /* 60hz banding step limit */ 294#define OV7670_REG_BD60MAX 0xab /* 60hz banding step limit */
260 295
261struct ovsensor_window {
262 short x;
263 short y;
264 short width;
265 short height;
266/* int format; */
267 short quarter; /* Scale width and height down 2x */
268 short clockdiv; /* Clock divisor setting */
269};
270
271static unsigned char ov7670_abs_to_sm(unsigned char v) 296static unsigned char ov7670_abs_to_sm(unsigned char v)
272{ 297{
273 if (v > 127) 298 if (v > 127)
@@ -499,19 +524,6 @@ static int init_ov_sensor(struct sd *sd)
499 return 0; 524 return 0;
500} 525}
501 526
502/* Switch on standard JPEG compression. Returns 0 for success. */
503static int ov519_init_compression(struct sd *sd)
504{
505 if (!sd->compress_inited) {
506 if (reg_w_mask(sd, OV519_SYS_EN_CLK1, 1 << 2, 1 << 2) < 0) {
507 PDEBUG(D_ERR, "Error switching to compressed mode");
508 return -EIO;
509 }
510 sd->compress_inited = 1;
511 }
512 return 0;
513}
514
515/* Set the read and write slave IDs. The "slave" argument is the write slave, 527/* Set the read and write slave IDs. The "slave" argument is the write slave,
516 * and the read slave will be set to (slave + 1). 528 * and the read slave will be set to (slave + 1).
517 * This should not be called from outside the i2c I/O functions. 529 * This should not be called from outside the i2c I/O functions.
@@ -681,21 +693,17 @@ static int ov8xx0_configure(struct sd *sd)
681 return -1; 693 return -1;
682 } 694 }
683 if ((rc & 3) == 1) { 695 if ((rc & 3) == 1) {
684 PDEBUG(D_PROBE, "Sensor is an OV8610");
685 sd->sensor = SEN_OV8610; 696 sd->sensor = SEN_OV8610;
686 } else { 697 } else {
687 PDEBUG(D_ERR, "Unknown image sensor version: %d", rc & 3); 698 PDEBUG(D_ERR, "Unknown image sensor version: %d", rc & 3);
688 return -1; 699 return -1;
689 } 700 }
690 PDEBUG(D_PROBE, "Writing 8610 registers"); 701 PDEBUG(D_PROBE, "Writing 8610 registers");
691 if (write_i2c_regvals(sd, 702 if (write_i2c_regvals(sd, norm_8610, ARRAY_SIZE(norm_8610)))
692 norm_8610,
693 sizeof norm_8610 / sizeof norm_8610[0]))
694 return -1; 703 return -1;
695 704
696 /* Set sensor-specific vars */ 705 /* Set sensor-specific vars */
697 sd->maxwidth = 640; 706/* sd->sif = 0; already done */
698 sd->maxheight = 480;
699 return 0; 707 return 0;
700} 708}
701 709
@@ -825,7 +833,7 @@ static int ov7xx0_configure(struct sd *sd)
825 { OV7670_REG_COM7, OV7670_COM7_RESET }, 833 { OV7670_REG_COM7, OV7670_COM7_RESET },
826 { OV7670_REG_TSLB, 0x04 }, /* OV */ 834 { OV7670_REG_TSLB, 0x04 }, /* OV */
827 { OV7670_REG_COM7, OV7670_COM7_FMT_VGA }, /* VGA */ 835 { OV7670_REG_COM7, OV7670_COM7_FMT_VGA }, /* VGA */
828 { OV7670_REG_CLKRC, 0x1 }, 836 { OV7670_REG_CLKRC, 0x01 },
829 /* 837 /*
830 * Set the hardware window. These values from OV don't entirely 838 * Set the hardware window. These values from OV don't entirely
831 * make sense - hstop is less than hstart. But they work... 839 * make sense - hstop is less than hstart. But they work...
@@ -839,16 +847,12 @@ static int ov7xx0_configure(struct sd *sd)
839 { 0x70, 0x3a }, { 0x71, 0x35 }, 847 { 0x70, 0x3a }, { 0x71, 0x35 },
840 { 0x72, 0x11 }, { 0x73, 0xf0 }, 848 { 0x72, 0x11 }, { 0x73, 0xf0 },
841 { 0xa2, 0x02 }, 849 { 0xa2, 0x02 },
842/* jfm */ 850/* { OV7670_REG_COM10, 0x0 }, */
843/* { OV7670_REG_COM10, 0x0 }, */
844 851
845 /* Gamma curve values */ 852 /* Gamma curve values */
846 { 0x7a, 0x20 }, 853 { 0x7a, 0x20 },
847/* jfm:win 7b=1c */
848 { 0x7b, 0x10 }, 854 { 0x7b, 0x10 },
849/* jfm:win 7c=28 */
850 { 0x7c, 0x1e }, 855 { 0x7c, 0x1e },
851/* jfm:win 7d=3c */
852 { 0x7d, 0x35 }, 856 { 0x7d, 0x35 },
853 { 0x7e, 0x5a }, { 0x7f, 0x69 }, 857 { 0x7e, 0x5a }, { 0x7f, 0x69 },
854 { 0x80, 0x76 }, { 0x81, 0x80 }, 858 { 0x80, 0x76 }, { 0x81, 0x80 },
@@ -864,13 +868,11 @@ static int ov7xx0_configure(struct sd *sd)
864 | OV7670_COM8_BFILT }, 868 | OV7670_COM8_BFILT },
865 { OV7670_REG_GAIN, 0 }, { OV7670_REG_AECH, 0 }, 869 { OV7670_REG_GAIN, 0 }, { OV7670_REG_AECH, 0 },
866 { OV7670_REG_COM4, 0x40 }, /* magic reserved bit */ 870 { OV7670_REG_COM4, 0x40 }, /* magic reserved bit */
867/* jfm:win 14=38 */
868 { OV7670_REG_COM9, 0x18 }, /* 4x gain + magic rsvd bit */ 871 { OV7670_REG_COM9, 0x18 }, /* 4x gain + magic rsvd bit */
869 { OV7670_REG_BD50MAX, 0x05 }, { OV7670_REG_BD60MAX, 0x07 }, 872 { OV7670_REG_BD50MAX, 0x05 }, { OV7670_REG_BD60MAX, 0x07 },
870 { OV7670_REG_AEW, 0x95 }, { OV7670_REG_AEB, 0x33 }, 873 { OV7670_REG_AEW, 0x95 }, { OV7670_REG_AEB, 0x33 },
871 { OV7670_REG_VPT, 0xe3 }, { OV7670_REG_HAECC1, 0x78 }, 874 { OV7670_REG_VPT, 0xe3 }, { OV7670_REG_HAECC1, 0x78 },
872 { OV7670_REG_HAECC2, 0x68 }, 875 { OV7670_REG_HAECC2, 0x68 },
873/* jfm:win a1=0b */
874 { 0xa1, 0x03 }, /* magic */ 876 { 0xa1, 0x03 }, /* magic */
875 { OV7670_REG_HAECC3, 0xd8 }, { OV7670_REG_HAECC4, 0xd8 }, 877 { OV7670_REG_HAECC3, 0xd8 }, { OV7670_REG_HAECC4, 0xd8 },
876 { OV7670_REG_HAECC5, 0xf0 }, { OV7670_REG_HAECC6, 0x90 }, 878 { OV7670_REG_HAECC5, 0xf0 }, { OV7670_REG_HAECC6, 0x90 },
@@ -884,8 +886,6 @@ static int ov7xx0_configure(struct sd *sd)
884 /* Almost all of these are magic "reserved" values. */ 886 /* Almost all of these are magic "reserved" values. */
885 { OV7670_REG_COM5, 0x61 }, { OV7670_REG_COM6, 0x4b }, 887 { OV7670_REG_COM5, 0x61 }, { OV7670_REG_COM6, 0x4b },
886 { 0x16, 0x02 }, 888 { 0x16, 0x02 },
887/* jfm */
888/* { OV7670_REG_MVFP, 0x07|OV7670_MVFP_MIRROR }, */
889 { OV7670_REG_MVFP, 0x07 }, 889 { OV7670_REG_MVFP, 0x07 },
890 { 0x21, 0x02 }, { 0x22, 0x91 }, 890 { 0x21, 0x02 }, { 0x22, 0x91 },
891 { 0x29, 0x07 }, { 0x33, 0x0b }, 891 { 0x29, 0x07 }, { 0x33, 0x0b },
@@ -930,7 +930,10 @@ static int ov7xx0_configure(struct sd *sd)
930 { OV7670_REG_EDGE, 0 }, 930 { OV7670_REG_EDGE, 0 },
931 { 0x75, 0x05 }, { 0x76, 0xe1 }, 931 { 0x75, 0x05 }, { 0x76, 0xe1 },
932 { 0x4c, 0 }, { 0x77, 0x01 }, 932 { 0x4c, 0 }, { 0x77, 0x01 },
933 { OV7670_REG_COM13, 0xc3 }, { 0x4b, 0x09 }, 933 { OV7670_REG_COM13, OV7670_COM13_GAMMA
934 | OV7670_COM13_UVSAT
935 | 2}, /* was 3 */
936 { 0x4b, 0x09 },
934 { 0xc9, 0x60 }, { OV7670_REG_COM16, 0x38 }, 937 { 0xc9, 0x60 }, { OV7670_REG_COM16, 0x38 },
935 { 0x56, 0x40 }, 938 { 0x56, 0x40 },
936 939
@@ -956,30 +959,10 @@ static int ov7xx0_configure(struct sd *sd)
956 { 0x79, 0x03 }, { 0xc8, 0x40 }, 959 { 0x79, 0x03 }, { 0xc8, 0x40 },
957 { 0x79, 0x05 }, { 0xc8, 0x30 }, 960 { 0x79, 0x05 }, { 0xc8, 0x30 },
958 { 0x79, 0x26 }, 961 { 0x79, 0x26 },
959 962 };
960 /* Format YUV422 */
961 { OV7670_REG_COM7, OV7670_COM7_YUV }, /* Selects YUV mode */
962 { OV7670_REG_RGB444, 0 }, /* No RGB444 please */
963 { OV7670_REG_COM1, 0 },
964 { OV7670_REG_COM15, OV7670_COM15_R00FF },
965 { OV7670_REG_COM9, 0x18 },
966 /* 4x gain ceiling; 0x8 is reserved bit */
967 { 0x4f, 0x80 }, /* "matrix coefficient 1" */
968 { 0x50, 0x80 }, /* "matrix coefficient 2" */
969 { 0x52, 0x22 }, /* "matrix coefficient 4" */
970 { 0x53, 0x5e }, /* "matrix coefficient 5" */
971 { 0x54, 0x80 }, /* "matrix coefficient 6" */
972 { OV7670_REG_COM13, OV7670_COM13_GAMMA|OV7670_COM13_UVSAT },
973};
974 963
975 PDEBUG(D_PROBE, "starting OV7xx0 configuration"); 964 PDEBUG(D_PROBE, "starting OV7xx0 configuration");
976 965
977/* jfm:already done? */
978 if (init_ov_sensor(sd) < 0)
979 PDEBUG(D_ERR, "Failed to read sensor ID");
980 else
981 PDEBUG(D_PROBE, "OV7xx0 initialized");
982
983 /* Detect sensor (sub)type */ 966 /* Detect sensor (sub)type */
984 rc = i2c_r(sd, OV7610_REG_COM_I); 967 rc = i2c_r(sd, OV7610_REG_COM_I);
985 968
@@ -1025,20 +1008,25 @@ static int ov7xx0_configure(struct sd *sd)
1025 return low; 1008 return low;
1026 } 1009 }
1027 if (high == 0x76) { 1010 if (high == 0x76) {
1028 if (low == 0x30) { 1011 switch (low) {
1012 case 0x30:
1029 PDEBUG(D_PROBE, "Sensor is an OV7630/OV7635"); 1013 PDEBUG(D_PROBE, "Sensor is an OV7630/OV7635");
1030 sd->sensor = SEN_OV7630; 1014 sd->sensor = SEN_OV7630;
1031 } else if (low == 0x40) { 1015 break;
1016 case 0x40:
1032 PDEBUG(D_PROBE, "Sensor is an OV7645"); 1017 PDEBUG(D_PROBE, "Sensor is an OV7645");
1033 sd->sensor = SEN_OV7640; /* FIXME */ 1018 sd->sensor = SEN_OV7640; /* FIXME */
1034 } else if (low == 0x45) { 1019 break;
1020 case 0x45:
1035 PDEBUG(D_PROBE, "Sensor is an OV7645B"); 1021 PDEBUG(D_PROBE, "Sensor is an OV7645B");
1036 sd->sensor = SEN_OV7640; /* FIXME */ 1022 sd->sensor = SEN_OV7640; /* FIXME */
1037 } else if (low == 0x48) { 1023 break;
1024 case 0x48:
1038 PDEBUG(D_PROBE, "Sensor is an OV7648"); 1025 PDEBUG(D_PROBE, "Sensor is an OV7648");
1039 sd->sensor = SEN_OV7640; /* FIXME */ 1026 sd->sensor = SEN_OV7640; /* FIXME */
1040 } else { 1027 break;
1041 PDEBUG(D_PROBE, "Unknown sensor: 0x76%X", low); 1028 default:
1029 PDEBUG(D_PROBE, "Unknown sensor: 0x76%x", low);
1042 return -1; 1030 return -1;
1043 } 1031 }
1044 } else { 1032 } else {
@@ -1050,34 +1038,34 @@ static int ov7xx0_configure(struct sd *sd)
1050 return -1; 1038 return -1;
1051 } 1039 }
1052 1040
1053 if (sd->sensor == SEN_OV7620) { 1041 switch (sd->sensor) {
1042 case SEN_OV7620:
1054 PDEBUG(D_PROBE, "Writing 7620 registers"); 1043 PDEBUG(D_PROBE, "Writing 7620 registers");
1055 if (write_i2c_regvals(sd, norm_7620, 1044 if (write_i2c_regvals(sd, norm_7620, ARRAY_SIZE(norm_7620)))
1056 sizeof norm_7620 / sizeof norm_7620[0]))
1057 return -1; 1045 return -1;
1058 } else if (sd->sensor == SEN_OV7630) { 1046 break;
1047 case SEN_OV7630:
1059 PDEBUG(D_ERR, "7630 is not supported by this driver version"); 1048 PDEBUG(D_ERR, "7630 is not supported by this driver version");
1060 return -1; 1049 return -1;
1061 } else if (sd->sensor == SEN_OV7640) { 1050 case SEN_OV7640:
1062 PDEBUG(D_PROBE, "Writing 7640 registers"); 1051 PDEBUG(D_PROBE, "Writing 7640 registers");
1063 if (write_i2c_regvals(sd, norm_7640, 1052 if (write_i2c_regvals(sd, norm_7640, ARRAY_SIZE(norm_7640)))
1064 sizeof norm_7640 / sizeof norm_7640[0]))
1065 return -1; 1053 return -1;
1066 } else if (sd->sensor == SEN_OV7670) { 1054 break;
1055 case SEN_OV7670:
1067 PDEBUG(D_PROBE, "Writing 7670 registers"); 1056 PDEBUG(D_PROBE, "Writing 7670 registers");
1068 if (write_i2c_regvals(sd, norm_7670, 1057 if (write_i2c_regvals(sd, norm_7670, ARRAY_SIZE(norm_7670)))
1069 sizeof norm_7670 / sizeof norm_7670[0]))
1070 return -1; 1058 return -1;
1071 } else { 1059 break;
1060 default:
1072 PDEBUG(D_PROBE, "Writing 7610 registers"); 1061 PDEBUG(D_PROBE, "Writing 7610 registers");
1073 if (write_i2c_regvals(sd, norm_7610, 1062 if (write_i2c_regvals(sd, norm_7610, ARRAY_SIZE(norm_7610)))
1074 sizeof norm_7610 / sizeof norm_7610[0]))
1075 return -1; 1063 return -1;
1064 break;
1076 } 1065 }
1077 1066
1078 /* Set sensor-specific vars */ 1067 /* Set sensor-specific vars */
1079 sd->maxwidth = 640; 1068/* sd->sif = 0; already done */
1080 sd->maxheight = 480;
1081 return 0; 1069 return 0;
1082} 1070}
1083 1071
@@ -1231,43 +1219,45 @@ static int ov6xx0_configure(struct sd *sd)
1231 /* Ugh. The first two bits are the version bits, but 1219 /* Ugh. The first two bits are the version bits, but
1232 * the entire register value must be used. I guess OVT 1220 * the entire register value must be used. I guess OVT
1233 * underestimated how many variants they would make. */ 1221 * underestimated how many variants they would make. */
1234 if (rc == 0x00) { 1222 switch (rc) {
1223 case 0x00:
1235 sd->sensor = SEN_OV6630; 1224 sd->sensor = SEN_OV6630;
1236 PDEBUG(D_ERR, 1225 PDEBUG(D_ERR,
1237 "WARNING: Sensor is an OV66308. Your camera may have"); 1226 "WARNING: Sensor is an OV66308. Your camera may have");
1238 PDEBUG(D_ERR, "been misdetected in previous driver versions."); 1227 PDEBUG(D_ERR, "been misdetected in previous driver versions.");
1239 } else if (rc == 0x01) { 1228 break;
1229 case 0x01:
1240 sd->sensor = SEN_OV6620; 1230 sd->sensor = SEN_OV6620;
1241 PDEBUG(D_PROBE, "Sensor is an OV6620"); 1231 break;
1242 } else if (rc == 0x02) { 1232 case 0x02:
1243 sd->sensor = SEN_OV6630; 1233 sd->sensor = SEN_OV6630;
1244 PDEBUG(D_PROBE, "Sensor is an OV66308AE"); 1234 PDEBUG(D_PROBE, "Sensor is an OV66308AE");
1245 } else if (rc == 0x03) { 1235 break;
1236 case 0x03:
1246 sd->sensor = SEN_OV6630; 1237 sd->sensor = SEN_OV6630;
1247 PDEBUG(D_PROBE, "Sensor is an OV66308AF"); 1238 PDEBUG(D_PROBE, "Sensor is an OV66308AF");
1248 } else if (rc == 0x90) { 1239 break;
1240 case 0x90:
1249 sd->sensor = SEN_OV6630; 1241 sd->sensor = SEN_OV6630;
1250 PDEBUG(D_ERR, 1242 PDEBUG(D_ERR,
1251 "WARNING: Sensor is an OV66307. Your camera may have"); 1243 "WARNING: Sensor is an OV66307. Your camera may have");
1252 PDEBUG(D_ERR, "been misdetected in previous driver versions."); 1244 PDEBUG(D_ERR, "been misdetected in previous driver versions.");
1253 } else { 1245 break;
1246 default:
1254 PDEBUG(D_ERR, "FATAL: Unknown sensor version: 0x%02x", rc); 1247 PDEBUG(D_ERR, "FATAL: Unknown sensor version: 0x%02x", rc);
1255 return -1; 1248 return -1;
1256 } 1249 }
1257 1250
1258 /* Set sensor-specific vars */ 1251 /* Set sensor-specific vars */
1259 sd->maxwidth = 352; 1252 sd->sif = 1;
1260 sd->maxheight = 288;
1261 1253
1262 if (sd->sensor == SEN_OV6620) { 1254 if (sd->sensor == SEN_OV6620) {
1263 PDEBUG(D_PROBE, "Writing 6x20 registers"); 1255 PDEBUG(D_PROBE, "Writing 6x20 registers");
1264 if (write_i2c_regvals(sd, norm_6x20, 1256 if (write_i2c_regvals(sd, norm_6x20, ARRAY_SIZE(norm_6x20)))
1265 sizeof norm_6x20 / sizeof norm_6x20[0]))
1266 return -1; 1257 return -1;
1267 } else { 1258 } else {
1268 PDEBUG(D_PROBE, "Writing 6x30 registers"); 1259 PDEBUG(D_PROBE, "Writing 6x30 registers");
1269 if (write_i2c_regvals(sd, norm_6x30, 1260 if (write_i2c_regvals(sd, norm_6x30, ARRAY_SIZE(norm_6x30)))
1270 sizeof norm_6x30 / sizeof norm_6x30[0]))
1271 return -1; 1261 return -1;
1272 } 1262 }
1273 return 0; 1263 return 0;
@@ -1276,14 +1266,8 @@ static int ov6xx0_configure(struct sd *sd)
1276/* Turns on or off the LED. Only has an effect with OV511+/OV518(+)/OV519 */ 1266/* Turns on or off the LED. Only has an effect with OV511+/OV518(+)/OV519 */
1277static void ov51x_led_control(struct sd *sd, int on) 1267static void ov51x_led_control(struct sd *sd, int on)
1278{ 1268{
1279 PDEBUG(D_STREAM, "LED (%s)", on ? "on" : "off"); 1269/* PDEBUG(D_STREAM, "LED (%s)", on ? "on" : "off"); */
1280 1270 reg_w_mask(sd, OV519_GPIO_DATA_OUT0, !on, 1); /* 0 / 1 */
1281/* if (sd->bridge == BRG_OV511PLUS) */
1282/* reg_w(sd, R511_SYS_LED_CTL, on ? 1 : 0); */
1283/* else if (sd->bridge == BRG_OV519) */
1284 reg_w_mask(sd, OV519_GPIO_DATA_OUT0, !on, 1); /* 0 / 1 */
1285/* else if (sd->bclass == BCL_OV518) */
1286/* reg_w_mask(sd, R518_GPIO_OUT, on ? 0x02 : 0x00, 0x02); */
1287} 1271}
1288 1272
1289/* this function is called at probe time */ 1273/* this function is called at probe time */
@@ -1293,11 +1277,8 @@ static int sd_config(struct gspca_dev *gspca_dev,
1293 struct sd *sd = (struct sd *) gspca_dev; 1277 struct sd *sd = (struct sd *) gspca_dev;
1294 struct cam *cam; 1278 struct cam *cam;
1295 1279
1296/* (from ov519_configure) */
1297 static const struct ov_regvals init_519[] = { 1280 static const struct ov_regvals init_519[] = {
1298 { 0x5a, 0x6d }, /* EnableSystem */ 1281 { 0x5a, 0x6d }, /* EnableSystem */
1299/* jfm trace usbsnoop3-1.txt */
1300/* jfm 53 = fb */
1301 { 0x53, 0x9b }, 1282 { 0x53, 0x9b },
1302 { 0x54, 0xff }, /* set bit2 to enable jpeg */ 1283 { 0x54, 0xff }, /* set bit2 to enable jpeg */
1303 { 0x5d, 0x03 }, 1284 { 0x5d, 0x03 },
@@ -1314,9 +1295,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
1314 1295
1315 if (write_regvals(sd, init_519, ARRAY_SIZE(init_519))) 1296 if (write_regvals(sd, init_519, ARRAY_SIZE(init_519)))
1316 goto error; 1297 goto error;
1317/* jfm: not seen in windows trace */
1318 if (ov519_init_compression(sd))
1319 goto error;
1320 ov51x_led_control(sd, 0); /* turn LED off */ 1298 ov51x_led_control(sd, 0); /* turn LED off */
1321 1299
1322 /* Test for 76xx */ 1300 /* Test for 76xx */
@@ -1365,16 +1343,18 @@ static int sd_config(struct gspca_dev *gspca_dev,
1365 1343
1366 cam = &gspca_dev->cam; 1344 cam = &gspca_dev->cam;
1367 cam->epaddr = OV511_ENDPOINT_ADDRESS; 1345 cam->epaddr = OV511_ENDPOINT_ADDRESS;
1368 if (sd->maxwidth == 640) { 1346 if (!sd->sif) {
1369 cam->cam_mode = vga_mode; 1347 cam->cam_mode = vga_mode;
1370 cam->nmodes = sizeof vga_mode / sizeof vga_mode[0]; 1348 cam->nmodes = ARRAY_SIZE(vga_mode);
1371 } else { 1349 } else {
1372 cam->cam_mode = sif_mode; 1350 cam->cam_mode = sif_mode;
1373 cam->nmodes = sizeof sif_mode / sizeof sif_mode[0]; 1351 cam->nmodes = ARRAY_SIZE(sif_mode);
1374 } 1352 }
1375 sd->brightness = sd_ctrls[SD_BRIGHTNESS].qctrl.default_value; 1353 sd->brightness = BRIGHTNESS_DEF;
1376 sd->contrast = sd_ctrls[SD_CONTRAST].qctrl.default_value; 1354 sd->contrast = CONTRAST_DEF;
1377 sd->colors = sd_ctrls[SD_COLOR].qctrl.default_value; 1355 sd->colors = COLOR_DEF;
1356 sd->hflip = HFLIP_DEF;
1357 sd->vflip = VFLIP_DEF;
1378 return 0; 1358 return 0;
1379error: 1359error:
1380 PDEBUG(D_ERR, "OV519 Config failed"); 1360 PDEBUG(D_ERR, "OV519 Config failed");
@@ -1394,8 +1374,7 @@ static int sd_open(struct gspca_dev *gspca_dev)
1394 * 1374 *
1395 * Do not put any sensor-specific code in here (including I2C I/O functions) 1375 * Do not put any sensor-specific code in here (including I2C I/O functions)
1396 */ 1376 */
1397static int ov519_mode_init_regs(struct sd *sd, 1377static int ov519_mode_init_regs(struct sd *sd)
1398 int width, int height)
1399{ 1378{
1400 static const struct ov_regvals mode_init_519_ov7670[] = { 1379 static const struct ov_regvals mode_init_519_ov7670[] = {
1401 { 0x5d, 0x03 }, /* Turn off suspend mode */ 1380 { 0x5d, 0x03 }, /* Turn off suspend mode */
@@ -1441,36 +1420,23 @@ static int ov519_mode_init_regs(struct sd *sd,
1441 /* windows reads 0x55 at this point, why? */ 1420 /* windows reads 0x55 at this point, why? */
1442 }; 1421 };
1443 1422
1444/* int hi_res; */
1445
1446 PDEBUG(D_CONF, "mode init %dx%d", width, height);
1447
1448/* if (width >= 800 && height >= 600)
1449 hi_res = 1;
1450 else
1451 hi_res = 0; */
1452
1453/* if (ov51x_stop(sd) < 0)
1454 return -EIO; */
1455
1456 /******** Set the mode ********/ 1423 /******** Set the mode ********/
1457 if (sd->sensor != SEN_OV7670) { 1424 if (sd->sensor != SEN_OV7670) {
1458 if (write_regvals(sd, mode_init_519, 1425 if (write_regvals(sd, mode_init_519,
1459 ARRAY_SIZE(mode_init_519))) 1426 ARRAY_SIZE(mode_init_519)))
1460 return -EIO; 1427 return -EIO;
1428 if (sd->sensor == SEN_OV7640) {
1429 /* Select 8-bit input mode */
1430 reg_w_mask(sd, OV519_CAM_DFR, 0x10, 0x10);
1431 }
1461 } else { 1432 } else {
1462 if (write_regvals(sd, mode_init_519_ov7670, 1433 if (write_regvals(sd, mode_init_519_ov7670,
1463 ARRAY_SIZE(mode_init_519_ov7670))) 1434 ARRAY_SIZE(mode_init_519_ov7670)))
1464 return -EIO; 1435 return -EIO;
1465 } 1436 }
1466 1437
1467 if (sd->sensor == SEN_OV7640) { 1438 reg_w(sd, OV519_CAM_H_SIZE, sd->gspca_dev.width >> 4);
1468 /* Select 8-bit input mode */ 1439 reg_w(sd, OV519_CAM_V_SIZE, sd->gspca_dev.height >> 3);
1469 reg_w_mask(sd, OV519_CAM_DFR, 0x10, 0x10);
1470 }
1471
1472 reg_w(sd, OV519_CAM_H_SIZE, width >> 4);
1473 reg_w(sd, OV519_CAM_V_SIZE, height >> 3);
1474 reg_w(sd, OV519_CAM_X_OFFSETL, 0x00); 1440 reg_w(sd, OV519_CAM_X_OFFSETL, 0x00);
1475 reg_w(sd, OV519_CAM_X_OFFSETH, 0x00); 1441 reg_w(sd, OV519_CAM_X_OFFSETH, 0x00);
1476 reg_w(sd, OV519_CAM_Y_OFFSETL, 0x00); 1442 reg_w(sd, OV519_CAM_Y_OFFSETL, 0x00);
@@ -1485,9 +1451,10 @@ static int ov519_mode_init_regs(struct sd *sd,
1485 1451
1486/* FIXME: These are only valid at the max resolution. */ 1452/* FIXME: These are only valid at the max resolution. */
1487 sd->clockdiv = 0; 1453 sd->clockdiv = 0;
1488 if (sd->sensor == SEN_OV7640) { 1454 switch (sd->sensor) {
1455 case SEN_OV7640:
1489 switch (sd->frame_rate) { 1456 switch (sd->frame_rate) {
1490/*jfm: default was 30 fps */ 1457/*fixme: default was 30 fps */
1491 case 30: 1458 case 30:
1492 reg_w(sd, 0xa4, 0x0c); 1459 reg_w(sd, 0xa4, 0x0c);
1493 reg_w(sd, 0x23, 0xff); 1460 reg_w(sd, 0x23, 0xff);
@@ -1517,7 +1484,8 @@ static int ov519_mode_init_regs(struct sd *sd,
1517 sd->clockdiv = 1; 1484 sd->clockdiv = 1;
1518 break; 1485 break;
1519 } 1486 }
1520 } else if (sd->sensor == SEN_OV8610) { 1487 break;
1488 case SEN_OV8610:
1521 switch (sd->frame_rate) { 1489 switch (sd->frame_rate) {
1522 default: /* 15 fps */ 1490 default: /* 15 fps */
1523/* case 15: */ 1491/* case 15: */
@@ -1533,41 +1501,37 @@ static int ov519_mode_init_regs(struct sd *sd,
1533 reg_w(sd, 0x23, 0x1b); 1501 reg_w(sd, 0x23, 0x1b);
1534 break; 1502 break;
1535 } 1503 }
1536 sd->clockdiv = 0; 1504 break;
1537 } else if (sd->sensor == SEN_OV7670) { /* guesses, based on 7640 */ 1505 case SEN_OV7670: /* guesses, based on 7640 */
1538 PDEBUG(D_STREAM, "Setting framerate to %d fps", 1506 PDEBUG(D_STREAM, "Setting framerate to %d fps",
1539 (sd->frame_rate == 0) ? 15 : sd->frame_rate); 1507 (sd->frame_rate == 0) ? 15 : sd->frame_rate);
1508 reg_w(sd, 0xa4, 0x10);
1540 switch (sd->frame_rate) { 1509 switch (sd->frame_rate) {
1541 case 30: 1510 case 30:
1542 reg_w(sd, 0xa4, 0x10);
1543 reg_w(sd, 0x23, 0xff); 1511 reg_w(sd, 0x23, 0xff);
1544 break; 1512 break;
1545 case 20: 1513 case 20:
1546 reg_w(sd, 0xa4, 0x10);
1547 reg_w(sd, 0x23, 0x1b); 1514 reg_w(sd, 0x23, 0x1b);
1548 break; 1515 break;
1549 default: /* 15 fps */ 1516 default:
1550/* case 15: */ 1517/* case 15: */
1551 reg_w(sd, 0xa4, 0x10);
1552 reg_w(sd, 0x23, 0xff); 1518 reg_w(sd, 0x23, 0xff);
1553 sd->clockdiv = 1; 1519 sd->clockdiv = 1;
1554 break; 1520 break;
1555 } 1521 }
1522 break;
1556 } 1523 }
1557 1524
1558/* if (ov51x_restart(sd) < 0)
1559 return -EIO; */
1560
1561 /* Reset it just for good measure */
1562/* if (ov51x_reset(sd, OV511_RESET_NOREGS) < 0)
1563 return -EIO; */
1564 return 0; 1525 return 0;
1565} 1526}
1566 1527
1567static int mode_init_ov_sensor_regs(struct sd *sd, 1528static int mode_init_ov_sensor_regs(struct sd *sd)
1568 struct ovsensor_window *win)
1569{ 1529{
1570 int qvga = win->quarter; 1530 struct gspca_dev *gspca_dev;
1531 int qvga;
1532
1533 gspca_dev = &sd->gspca_dev;
1534 qvga = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv;
1571 1535
1572 /******** Mode (VGA/QVGA) and sensor specific regs ********/ 1536 /******** Mode (VGA/QVGA) and sensor specific regs ********/
1573 switch (sd->sensor) { 1537 switch (sd->sensor) {
@@ -1611,8 +1575,6 @@ static int mode_init_ov_sensor_regs(struct sd *sd,
1611 OV7670_COM7_FMT_MASK); 1575 OV7670_COM7_FMT_MASK);
1612 break; 1576 break;
1613 case SEN_OV6620: 1577 case SEN_OV6620:
1614 i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20);
1615 break;
1616 case SEN_OV6630: 1578 case SEN_OV6630:
1617 i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20); 1579 i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20);
1618 break; 1580 break;
@@ -1621,24 +1583,21 @@ static int mode_init_ov_sensor_regs(struct sd *sd,
1621 } 1583 }
1622 1584
1623 /******** Palette-specific regs ********/ 1585 /******** Palette-specific regs ********/
1624/* Need to do work here for the OV7670 */ 1586 if (sd->sensor == SEN_OV7610 || sd->sensor == SEN_OV76BE) {
1625 1587 /* not valid on the OV6620/OV7620/6630? */
1626 if (sd->sensor == SEN_OV7610 || sd->sensor == SEN_OV76BE) { 1588 i2c_w_mask(sd, 0x0e, 0x00, 0x40);
1627 /* not valid on the OV6620/OV7620/6630? */ 1589 }
1628 i2c_w_mask(sd, 0x0e, 0x00, 0x40);
1629 }
1630 1590
1631 /* The OV518 needs special treatment. Although both the OV518 1591 /* The OV518 needs special treatment. Although both the OV518
1632 * and the OV6630 support a 16-bit video bus, only the 8 bit Y 1592 * and the OV6630 support a 16-bit video bus, only the 8 bit Y
1633 * bus is actually used. The UV bus is tied to ground. 1593 * bus is actually used. The UV bus is tied to ground.
1634 * Therefore, the OV6630 needs to be in 8-bit multiplexed 1594 * Therefore, the OV6630 needs to be in 8-bit multiplexed
1635 * output mode */ 1595 * output mode */
1636 1596
1637 /* OV7640 is 8-bit only */ 1597 /* OV7640 is 8-bit only */
1638 1598
1639 if (sd->sensor != SEN_OV6630 && sd->sensor != SEN_OV7640) 1599 if (sd->sensor != SEN_OV6630 && sd->sensor != SEN_OV7640)
1640 i2c_w_mask(sd, 0x13, 0x00, 0x20); 1600 i2c_w_mask(sd, 0x13, 0x00, 0x20);
1641/* } */
1642 1601
1643 /******** Clock programming ********/ 1602 /******** Clock programming ********/
1644 /* The OV6620 needs special handling. This prevents the 1603 /* The OV6620 needs special handling. This prevents the
@@ -1647,14 +1606,14 @@ static int mode_init_ov_sensor_regs(struct sd *sd,
1647 1606
1648 /* Clock down */ 1607 /* Clock down */
1649 i2c_w(sd, 0x2a, 0x04); 1608 i2c_w(sd, 0x2a, 0x04);
1650 i2c_w(sd, 0x11, win->clockdiv); 1609 i2c_w(sd, 0x11, sd->clockdiv);
1651 i2c_w(sd, 0x2a, 0x84); 1610 i2c_w(sd, 0x2a, 0x84);
1652 /* This next setting is critical. It seems to improve 1611 /* This next setting is critical. It seems to improve
1653 * the gain or the contrast. The "reserved" bits seem 1612 * the gain or the contrast. The "reserved" bits seem
1654 * to have some effect in this case. */ 1613 * to have some effect in this case. */
1655 i2c_w(sd, 0x2d, 0x85); 1614 i2c_w(sd, 0x2d, 0x85);
1656 } else if (win->clockdiv >= 0) { 1615 } else if (sd->clockdiv >= 0) {
1657 i2c_w(sd, 0x11, win->clockdiv); 1616 i2c_w(sd, 0x11, sd->clockdiv);
1658 } 1617 }
1659 1618
1660 /******** Special Features ********/ 1619 /******** Special Features ********/
@@ -1674,7 +1633,7 @@ static int mode_init_ov_sensor_regs(struct sd *sd,
1674 /* is fully tested. */ 1633 /* is fully tested. */
1675 /* 7620/6620/6630? don't have register 0x35, so play it safe */ 1634 /* 7620/6620/6630? don't have register 0x35, so play it safe */
1676 if (sd->sensor == SEN_OV7610 || sd->sensor == SEN_OV76BE) { 1635 if (sd->sensor == SEN_OV7610 || sd->sensor == SEN_OV76BE) {
1677 if (win->width == 640 /*&& win->height == 480*/) 1636 if (!qvga)
1678 i2c_w(sd, 0x35, 0x9e); 1637 i2c_w(sd, 0x35, 0x9e);
1679 else 1638 else
1680 i2c_w(sd, 0x35, 0x1e); 1639 i2c_w(sd, 0x35, 0x1e);
@@ -1682,13 +1641,31 @@ static int mode_init_ov_sensor_regs(struct sd *sd,
1682 return 0; 1641 return 0;
1683} 1642}
1684 1643
1685static int set_ov_sensor_window(struct sd *sd, 1644static void sethvflip(struct sd *sd)
1686 struct ovsensor_window *win)
1687{ 1645{
1646 if (sd->sensor != SEN_OV7670)
1647 return;
1648 if (sd->gspca_dev.streaming)
1649 ov51x_stop(sd);
1650 i2c_w_mask(sd, OV7670_REG_MVFP,
1651 OV7670_MVFP_MIRROR * sd->hflip
1652 | OV7670_MVFP_VFLIP * sd->vflip,
1653 OV7670_MVFP_MIRROR | OV7670_MVFP_VFLIP);
1654 if (sd->gspca_dev.streaming)
1655 ov51x_restart(sd);
1656}
1657
1658static int set_ov_sensor_window(struct sd *sd)
1659{
1660 struct gspca_dev *gspca_dev;
1661 int qvga;
1688 int hwsbase, hwebase, vwsbase, vwebase, hwscale, vwscale; 1662 int hwsbase, hwebase, vwsbase, vwebase, hwscale, vwscale;
1689 int ret, hstart, hstop, vstop, vstart; 1663 int ret, hstart, hstop, vstop, vstart;
1690 __u8 v; 1664 __u8 v;
1691 1665
1666 gspca_dev = &sd->gspca_dev;
1667 qvga = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv;
1668
1692 /* The different sensor ICs handle setting up of window differently. 1669 /* The different sensor ICs handle setting up of window differently.
1693 * IF YOU SET IT WRONG, YOU WILL GET ALL ZERO ISOC DATA FROM OV51x!! */ 1670 * IF YOU SET IT WRONG, YOU WILL GET ALL ZERO ISOC DATA FROM OV51x!! */
1694 switch (sd->sensor) { 1671 switch (sd->sensor) {
@@ -1733,7 +1710,7 @@ static int set_ov_sensor_window(struct sd *sd,
1733 switch (sd->sensor) { 1710 switch (sd->sensor) {
1734 case SEN_OV6620: 1711 case SEN_OV6620:
1735 case SEN_OV6630: 1712 case SEN_OV6630:
1736 if (win->quarter) { /* QCIF */ 1713 if (qvga) { /* QCIF */
1737 hwscale = 0; 1714 hwscale = 0;
1738 vwscale = 0; 1715 vwscale = 0;
1739 } else { /* CIF */ 1716 } else { /* CIF */
@@ -1743,7 +1720,7 @@ static int set_ov_sensor_window(struct sd *sd,
1743 } 1720 }
1744 break; 1721 break;
1745 case SEN_OV8610: 1722 case SEN_OV8610:
1746 if (win->quarter) { /* QSVGA */ 1723 if (qvga) { /* QSVGA */
1747 hwscale = 1; 1724 hwscale = 1;
1748 vwscale = 1; 1725 vwscale = 1;
1749 } else { /* SVGA */ 1726 } else { /* SVGA */
@@ -1752,7 +1729,7 @@ static int set_ov_sensor_window(struct sd *sd,
1752 } 1729 }
1753 break; 1730 break;
1754 default: /* SEN_OV7xx0 */ 1731 default: /* SEN_OV7xx0 */
1755 if (win->quarter) { /* QVGA */ 1732 if (qvga) { /* QVGA */
1756 hwscale = 1; 1733 hwscale = 1;
1757 vwscale = 0; 1734 vwscale = 0;
1758 } else { /* VGA */ 1735 } else { /* VGA */
@@ -1761,7 +1738,7 @@ static int set_ov_sensor_window(struct sd *sd,
1761 } 1738 }
1762 } 1739 }
1763 1740
1764 ret = mode_init_ov_sensor_regs(sd, win); 1741 ret = mode_init_ov_sensor_regs(sd);
1765 if (ret < 0) 1742 if (ret < 0)
1766 return ret; 1743 return ret;
1767 1744
@@ -1782,7 +1759,7 @@ static int set_ov_sensor_window(struct sd *sd,
1782 /* I can hard code this for OV7670s */ 1759 /* I can hard code this for OV7670s */
1783 /* Yes, these numbers do look odd, but they're tested and work! */ 1760 /* Yes, these numbers do look odd, but they're tested and work! */
1784 if (sd->sensor == SEN_OV7670) { 1761 if (sd->sensor == SEN_OV7670) {
1785 if (win->quarter) { /* QVGA from ov7670.c by 1762 if (qvga) { /* QVGA from ov7670.c by
1786 * Jonathan Corbet */ 1763 * Jonathan Corbet */
1787 hstart = 164; 1764 hstart = 164;
1788 hstop = 20; 1765 hstop = 20;
@@ -1796,75 +1773,45 @@ static int set_ov_sensor_window(struct sd *sd,
1796 } 1773 }
1797 /* OV7670 hardware window registers are split across 1774 /* OV7670 hardware window registers are split across
1798 * multiple locations */ 1775 * multiple locations */
1799 i2c_w(sd, OV7670_REG_HSTART, (hstart >> 3) & 0xff); 1776 i2c_w(sd, OV7670_REG_HSTART, hstart >> 3);
1800 i2c_w(sd, OV7670_REG_HSTOP, (hstop >> 3) & 0xff); 1777 i2c_w(sd, OV7670_REG_HSTOP, hstop >> 3);
1801 v = i2c_r(sd, OV7670_REG_HREF); 1778 v = i2c_r(sd, OV7670_REG_HREF);
1802 v = (v & 0xc0) | ((hstop & 0x7) << 3) | (hstart & 0x07); 1779 v = (v & 0xc0) | ((hstop & 0x7) << 3) | (hstart & 0x07);
1803 msleep(10); /* need to sleep between read and write to 1780 msleep(10); /* need to sleep between read and write to
1804 * same reg! */ 1781 * same reg! */
1805 i2c_w(sd, OV7670_REG_HREF, v); 1782 i2c_w(sd, OV7670_REG_HREF, v);
1806 1783
1807 i2c_w(sd, OV7670_REG_VSTART, (vstart >> 2) & 0xff); 1784 i2c_w(sd, OV7670_REG_VSTART, vstart >> 2);
1808 i2c_w(sd, OV7670_REG_VSTOP, (vstop >> 2) & 0xff); 1785 i2c_w(sd, OV7670_REG_VSTOP, vstop >> 2);
1809 v = i2c_r(sd, OV7670_REG_VREF); 1786 v = i2c_r(sd, OV7670_REG_VREF);
1810 v = (v & 0xc0) | ((vstop & 0x3) << 2) | (vstart & 0x03); 1787 v = (v & 0xc0) | ((vstop & 0x3) << 2) | (vstart & 0x03);
1811 msleep(10); /* need to sleep between read and write to 1788 msleep(10); /* need to sleep between read and write to
1812 * same reg! */ 1789 * same reg! */
1813 i2c_w(sd, OV7670_REG_VREF, v); 1790 i2c_w(sd, OV7670_REG_VREF, v);
1814 1791 sethvflip(sd);
1815 } else { 1792 } else {
1816 i2c_w(sd, 0x17, hwsbase + (win->x >> hwscale)); 1793 i2c_w(sd, 0x17, hwsbase);
1817 i2c_w(sd, 0x18, hwebase + ((win->x + win->width) >> hwscale)); 1794 i2c_w(sd, 0x18, hwebase + (sd->gspca_dev.width >> hwscale));
1818 i2c_w(sd, 0x19, vwsbase + (win->y >> vwscale)); 1795 i2c_w(sd, 0x19, vwsbase);
1819 i2c_w(sd, 0x1a, vwebase + ((win->y + win->height) >> vwscale)); 1796 i2c_w(sd, 0x1a, vwebase + (sd->gspca_dev.height >> vwscale));
1820 } 1797 }
1821 return 0; 1798 return 0;
1822} 1799}
1823 1800
1824static int ov_sensor_mode_setup(struct sd *sd,
1825 int width, int height)
1826{
1827 struct ovsensor_window win;
1828
1829/* win.format = mode; */
1830
1831 /* Unless subcapture is enabled,
1832 * center the image window and downsample
1833 * if possible to increase the field of view */
1834 /* NOTE: OV518(+) and OV519 does downsampling on its own */
1835 win.width = width;
1836 win.height = height;
1837 if (width == sd->maxwidth)
1838 win.quarter = 0;
1839 else
1840 win.quarter = 1;
1841
1842 /* Center it */
1843 win.x = (win.width - width) / 2;
1844 win.y = (win.height - height) / 2;
1845
1846 /* Clock is determined by OV519 frame rate code */
1847 win.clockdiv = sd->clockdiv;
1848
1849 PDEBUG(D_CONF, "Setting clock divider to %d", win.clockdiv);
1850 return set_ov_sensor_window(sd, &win);
1851}
1852
1853/* -- start the camera -- */ 1801/* -- start the camera -- */
1854static void sd_start(struct gspca_dev *gspca_dev) 1802static void sd_start(struct gspca_dev *gspca_dev)
1855{ 1803{
1856 struct sd *sd = (struct sd *) gspca_dev; 1804 struct sd *sd = (struct sd *) gspca_dev;
1857 int ret; 1805 int ret;
1858 1806
1859 1807 ret = ov519_mode_init_regs(sd);
1860 ret = ov519_mode_init_regs(sd, gspca_dev->width, gspca_dev->height);
1861 if (ret < 0) 1808 if (ret < 0)
1862 goto out; 1809 goto out;
1863 ret = ov_sensor_mode_setup(sd, gspca_dev->width, gspca_dev->height); 1810 ret = set_ov_sensor_window(sd);
1864 if (ret < 0) 1811 if (ret < 0)
1865 goto out; 1812 goto out;
1866 1813
1867 ret = ov51x_restart((struct sd *) gspca_dev); 1814 ret = ov51x_restart(sd);
1868 if (ret < 0) 1815 if (ret < 0)
1869 goto out; 1816 goto out;
1870 PDEBUG(D_STREAM, "camera started alt: 0x%02x", gspca_dev->alt); 1817 PDEBUG(D_STREAM, "camera started alt: 0x%02x", gspca_dev->alt);
@@ -1938,12 +1885,10 @@ static void setbrightness(struct gspca_dev *gspca_dev)
1938{ 1885{
1939 struct sd *sd = (struct sd *) gspca_dev; 1886 struct sd *sd = (struct sd *) gspca_dev;
1940 int val; 1887 int val;
1941/* int was_streaming; */
1942 1888
1943 val = sd->brightness; 1889 val = sd->brightness;
1944 PDEBUG(D_CONF, "brightness:%d", val); 1890 PDEBUG(D_CONF, "brightness:%d", val);
1945/* was_streaming = gspca_dev->streaming; 1891/* if (gspca_dev->streaming)
1946 * if (was_streaming)
1947 * ov51x_stop(sd); */ 1892 * ov51x_stop(sd); */
1948 switch (sd->sensor) { 1893 switch (sd->sensor) {
1949 case SEN_OV8610: 1894 case SEN_OV8610:
@@ -1961,12 +1906,12 @@ static void setbrightness(struct gspca_dev *gspca_dev)
1961 i2c_w(sd, OV7610_REG_BRT, val); 1906 i2c_w(sd, OV7610_REG_BRT, val);
1962 break; 1907 break;
1963 case SEN_OV7670: 1908 case SEN_OV7670:
1964/*jfm - from windblows 1909/*win trace
1965 * i2c_w_mask(sd, OV7670_REG_COM8, 0, OV7670_COM8_AEC); */ 1910 * i2c_w_mask(sd, OV7670_REG_COM8, 0, OV7670_COM8_AEC); */
1966 i2c_w(sd, OV7670_REG_BRIGHT, ov7670_abs_to_sm(val)); 1911 i2c_w(sd, OV7670_REG_BRIGHT, ov7670_abs_to_sm(val));
1967 break; 1912 break;
1968 } 1913 }
1969/* if (was_streaming) 1914/* if (gspca_dev->streaming)
1970 * ov51x_restart(sd); */ 1915 * ov51x_restart(sd); */
1971} 1916}
1972 1917
@@ -1974,12 +1919,10 @@ static void setcontrast(struct gspca_dev *gspca_dev)
1974{ 1919{
1975 struct sd *sd = (struct sd *) gspca_dev; 1920 struct sd *sd = (struct sd *) gspca_dev;
1976 int val; 1921 int val;
1977/* int was_streaming; */
1978 1922
1979 val = sd->contrast; 1923 val = sd->contrast;
1980 PDEBUG(D_CONF, "contrast:%d", val); 1924 PDEBUG(D_CONF, "contrast:%d", val);
1981/* was_streaming = gspca_dev->streaming; 1925/* if (gspca_dev->streaming)
1982 if (was_streaming)
1983 ov51x_stop(sd); */ 1926 ov51x_stop(sd); */
1984 switch (sd->sensor) { 1927 switch (sd->sensor) {
1985 case SEN_OV7610: 1928 case SEN_OV7610:
@@ -2016,7 +1959,7 @@ static void setcontrast(struct gspca_dev *gspca_dev)
2016 i2c_w(sd, OV7670_REG_CONTRAS, val >> 1); 1959 i2c_w(sd, OV7670_REG_CONTRAS, val >> 1);
2017 break; 1960 break;
2018 } 1961 }
2019/* if (was_streaming) 1962/* if (gspca_dev->streaming)
2020 ov51x_restart(sd); */ 1963 ov51x_restart(sd); */
2021} 1964}
2022 1965
@@ -2024,12 +1967,10 @@ static void setcolors(struct gspca_dev *gspca_dev)
2024{ 1967{
2025 struct sd *sd = (struct sd *) gspca_dev; 1968 struct sd *sd = (struct sd *) gspca_dev;
2026 int val; 1969 int val;
2027/* int was_streaming; */
2028 1970
2029 val = sd->colors; 1971 val = sd->colors;
2030 PDEBUG(D_CONF, "saturation:%d", val); 1972 PDEBUG(D_CONF, "saturation:%d", val);
2031/* was_streaming = gspca_dev->streaming; 1973/* if (gspca_dev->streaming)
2032 if (was_streaming)
2033 ov51x_stop(sd); */ 1974 ov51x_stop(sd); */
2034 switch (sd->sensor) { 1975 switch (sd->sensor) {
2035 case SEN_OV8610: 1976 case SEN_OV8610:
@@ -2055,7 +1996,7 @@ static void setcolors(struct gspca_dev *gspca_dev)
2055 /* set REG_COM13 values for UV sat auto mode */ 1996 /* set REG_COM13 values for UV sat auto mode */
2056 break; 1997 break;
2057 } 1998 }
2058/* if (was_streaming) 1999/* if (gspca_dev->streaming)
2059 ov51x_restart(sd); */ 2000 ov51x_restart(sd); */
2060} 2001}
2061 2002
@@ -2110,6 +2051,40 @@ static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val)
2110 return 0; 2051 return 0;
2111} 2052}
2112 2053
2054static int sd_sethflip(struct gspca_dev *gspca_dev, __s32 val)
2055{
2056 struct sd *sd = (struct sd *) gspca_dev;
2057
2058 sd->hflip = val;
2059 sethvflip(sd);
2060 return 0;
2061}
2062
2063static int sd_gethflip(struct gspca_dev *gspca_dev, __s32 *val)
2064{
2065 struct sd *sd = (struct sd *) gspca_dev;
2066
2067 *val = sd->hflip;
2068 return 0;
2069}
2070
2071static int sd_setvflip(struct gspca_dev *gspca_dev, __s32 val)
2072{
2073 struct sd *sd = (struct sd *) gspca_dev;
2074
2075 sd->vflip = val;
2076 sethvflip(sd);
2077 return 0;
2078}
2079
2080static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val)
2081{
2082 struct sd *sd = (struct sd *) gspca_dev;
2083
2084 *val = sd->vflip;
2085 return 0;
2086}
2087
2113/* sub-driver description */ 2088/* sub-driver description */
2114static const struct sd_desc sd_desc = { 2089static const struct sd_desc sd_desc = {
2115 .name = MODULE_NAME, 2090 .name = MODULE_NAME,
@@ -2178,4 +2153,3 @@ module_exit(sd_mod_exit);
2178 2153
2179module_param(frame_rate, int, 0644); 2154module_param(frame_rate, int, 0644);
2180MODULE_PARM_DESC(frame_rate, "Frame rate (5, 10, 15, 20 or 30 fps)"); 2155MODULE_PARM_DESC(frame_rate, "Frame rate (5, 10, 15, 20 or 30 fps)");
2181
diff --git a/drivers/media/video/gspca/pac7311.c b/drivers/media/video/gspca/pac7311.c
index ea3d7021f401..815bea6edc44 100644
--- a/drivers/media/video/gspca/pac7311.c
+++ b/drivers/media/video/gspca/pac7311.c
@@ -31,7 +31,9 @@ MODULE_LICENSE("GPL");
31struct sd { 31struct sd {
32 struct gspca_dev gspca_dev; /* !! must be the first item */ 32 struct gspca_dev gspca_dev; /* !! must be the first item */
33 33
34 int avg_lum; 34 int lum_sum;
35 atomic_t avg_lum;
36 atomic_t do_gain;
35 37
36 unsigned char brightness; 38 unsigned char brightness;
37 unsigned char contrast; 39 unsigned char contrast;
@@ -271,6 +273,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
271 sd->contrast = CONTRAST_DEF; 273 sd->contrast = CONTRAST_DEF;
272 sd->colors = COLOR_DEF; 274 sd->colors = COLOR_DEF;
273 sd->autogain = AUTOGAIN_DEF; 275 sd->autogain = AUTOGAIN_DEF;
276 sd->ag_cnt = -1;
274 return 0; 277 return 0;
275} 278}
276 279
@@ -311,6 +314,18 @@ static void setcolors(struct gspca_dev *gspca_dev)
311 PDEBUG(D_CONF|D_STREAM, "color: %i", sd->colors); 314 PDEBUG(D_CONF|D_STREAM, "color: %i", sd->colors);
312} 315}
313 316
317static void setautogain(struct gspca_dev *gspca_dev)
318{
319 struct sd *sd = (struct sd *) gspca_dev;
320
321 if (sd->autogain) {
322 sd->lum_sum = 0;
323 sd->ag_cnt = AG_CNT_START;
324 } else {
325 sd->ag_cnt = -1;
326 }
327}
328
314/* this function is called at open time */ 329/* this function is called at open time */
315static int sd_open(struct gspca_dev *gspca_dev) 330static int sd_open(struct gspca_dev *gspca_dev)
316{ 331{
@@ -320,8 +335,6 @@ static int sd_open(struct gspca_dev *gspca_dev)
320 335
321static void sd_start(struct gspca_dev *gspca_dev) 336static void sd_start(struct gspca_dev *gspca_dev)
322{ 337{
323 struct sd *sd = (struct sd *) gspca_dev;
324
325 reg_w(gspca_dev, 0xff, 0x01); 338 reg_w(gspca_dev, 0xff, 0x01);
326 reg_w_buf(gspca_dev, 0x0002, "\x48\x0a\x40\x08\x00\x00\x08\x00", 8); 339 reg_w_buf(gspca_dev, 0x0002, "\x48\x0a\x40\x08\x00\x00\x08\x00", 8);
327 reg_w_buf(gspca_dev, 0x000a, "\x06\xff\x11\xff\x5a\x30\x90\x4c", 8); 340 reg_w_buf(gspca_dev, 0x000a, "\x06\xff\x11\xff\x5a\x30\x90\x4c", 8);
@@ -394,6 +407,7 @@ static void sd_start(struct gspca_dev *gspca_dev)
394 setcontrast(gspca_dev); 407 setcontrast(gspca_dev);
395 setbrightness(gspca_dev); 408 setbrightness(gspca_dev);
396 setcolors(gspca_dev); 409 setcolors(gspca_dev);
410 setautogain(gspca_dev);
397 411
398 /* set correct resolution */ 412 /* set correct resolution */
399 switch (gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv) { 413 switch (gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv) {
@@ -431,13 +445,6 @@ static void sd_start(struct gspca_dev *gspca_dev)
431 reg_w(gspca_dev, 0xff, 0x01); 445 reg_w(gspca_dev, 0xff, 0x01);
432 reg_w(gspca_dev, 0x78, 0x04); 446 reg_w(gspca_dev, 0x78, 0x04);
433 reg_w(gspca_dev, 0x78, 0x05); 447 reg_w(gspca_dev, 0x78, 0x05);
434
435 if (sd->autogain) {
436 sd->ag_cnt = AG_CNT_START;
437 sd->avg_lum = 0;
438 } else {
439 sd->ag_cnt = -1;
440 }
441} 448}
442 449
443static void sd_stopN(struct gspca_dev *gspca_dev) 450static void sd_stopN(struct gspca_dev *gspca_dev)
@@ -473,13 +480,20 @@ static void sd_close(struct gspca_dev *gspca_dev)
473 reg_w(gspca_dev, 0x78, 0x44); /* Bit_0=start stream, Bit_7=LED */ 480 reg_w(gspca_dev, 0x78, 0x44); /* Bit_0=start stream, Bit_7=LED */
474} 481}
475 482
476static void setautogain(struct gspca_dev *gspca_dev, int luma) 483static void do_autogain(struct gspca_dev *gspca_dev)
477{ 484{
485 struct sd *sd = (struct sd *) gspca_dev;
486 int luma;
478 int luma_mean = 128; 487 int luma_mean = 128;
479 int luma_delta = 20; 488 int luma_delta = 20;
480 __u8 spring = 5; 489 __u8 spring = 5;
481 int Gbright; 490 int Gbright;
482 491
492 if (!atomic_read(&sd->do_gain))
493 return;
494 atomic_set(&sd->do_gain, 0);
495
496 luma = atomic_read(&sd->avg_lum);
483 Gbright = reg_r(gspca_dev, 0x02); 497 Gbright = reg_r(gspca_dev, 0x02);
484 PDEBUG(D_FRAM, "luma mean %d", luma); 498 PDEBUG(D_FRAM, "luma mean %d", luma);
485 if (luma < luma_mean - luma_delta || 499 if (luma < luma_mean - luma_delta ||
@@ -523,12 +537,13 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
523 537
524 /* start of frame */ 538 /* start of frame */
525 if (sd->ag_cnt >= 0 && p > 28) { 539 if (sd->ag_cnt >= 0 && p > 28) {
526 sd->avg_lum += data[p - 23]; 540 sd->lum_sum += data[p - 23];
527 if (--sd->ag_cnt < 0) { 541 if (--sd->ag_cnt < 0) {
528 sd->ag_cnt = AG_CNT_START; 542 sd->ag_cnt = AG_CNT_START;
529 setautogain(gspca_dev, 543 atomic_set(&sd->avg_lum,
530 sd->avg_lum / AG_CNT_START); 544 sd->lum_sum / AG_CNT_START);
531 sd->avg_lum = 0; 545 sd->lum_sum = 0;
546 atomic_set(&sd->do_gain, 1);
532 } 547 }
533 } 548 }
534 549
@@ -677,12 +692,8 @@ static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val)
677 struct sd *sd = (struct sd *) gspca_dev; 692 struct sd *sd = (struct sd *) gspca_dev;
678 693
679 sd->autogain = val; 694 sd->autogain = val;
680 if (val) { 695 if (gspca_dev->streaming)
681 sd->ag_cnt = AG_CNT_START; 696 setautogain(gspca_dev);
682 sd->avg_lum = 0;
683 } else {
684 sd->ag_cnt = -1;
685 }
686 return 0; 697 return 0;
687} 698}
688 699
@@ -706,6 +717,7 @@ static struct sd_desc sd_desc = {
706 .stop0 = sd_stop0, 717 .stop0 = sd_stop0,
707 .close = sd_close, 718 .close = sd_close,
708 .pkt_scan = sd_pkt_scan, 719 .pkt_scan = sd_pkt_scan,
720 .dq_callback = do_autogain,
709}; 721};
710 722
711/* -- module initialisation -- */ 723/* -- module initialisation -- */
diff --git a/drivers/media/video/gspca/sonixb.c b/drivers/media/video/gspca/sonixb.c
index e18748c5a14d..11210c71f66c 100644
--- a/drivers/media/video/gspca/sonixb.c
+++ b/drivers/media/video/gspca/sonixb.c
@@ -408,7 +408,7 @@ static void reg_w(struct gspca_dev *gspca_dev,
408 const __u8 *buffer, 408 const __u8 *buffer,
409 int len) 409 int len)
410{ 410{
411#ifdef CONFIG_VIDEO_ADV_DEBUG 411#ifdef GSPCA_DEBUG
412 if (len > sizeof gspca_dev->usb_buf) { 412 if (len > sizeof gspca_dev->usb_buf) {
413 PDEBUG(D_ERR|D_PACK, "reg_w: buffer overflow"); 413 PDEBUG(D_ERR|D_PACK, "reg_w: buffer overflow");
414 return; 414 return;
diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c
index 33a3df1f6915..245a30ec5fb1 100644
--- a/drivers/media/video/gspca/sonixj.c
+++ b/drivers/media/video/gspca/sonixj.c
@@ -32,7 +32,7 @@ MODULE_LICENSE("GPL");
32struct sd { 32struct sd {
33 struct gspca_dev gspca_dev; /* !! must be the first item */ 33 struct gspca_dev gspca_dev; /* !! must be the first item */
34 34
35 int avg_lum; 35 atomic_t avg_lum;
36 unsigned int exposure; 36 unsigned int exposure;
37 37
38 unsigned short brightness; 38 unsigned short brightness;
@@ -148,55 +148,58 @@ static struct v4l2_pix_format vga_mode[] = {
148 148
149/*Data from sn9c102p+hv71331r */ 149/*Data from sn9c102p+hv71331r */
150static const __u8 sn_hv7131[] = { 150static const __u8 sn_hv7131[] = {
151/* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 reg9 */ 151/* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */
152 0x00, 0x03, 0x64, 0x00, 0x1A, 0x20, 0x20, 0x20, 0xA1, 0x11, 152 0x00, 0x03, 0x64, 0x00, 0x1a, 0x20, 0x20, 0x20,
153/* rega regb regc regd rege regf reg10 reg11 */ 153/* reg8 reg9 rega regb regc regd rege regf */
154 0x02, 0x09, 0x00, 0x00, 0x00, 0x10, 0x03, 0x00, /* 00 */ 154 0xa1, 0x11, 0x02, 0x09, 0x00, 0x00, 0x00, 0x10,
155/* reg12 reg13 reg14 reg15 reg16 reg17 reg18 reg19 reg1a reg1b */ 155/* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */
156 0x00, 0x01, 0x03, 0x28, 0x1e, 0x41, 0x0a, 0x00, 0x00, 0x00, 156 0x03, 0x00, 0x00, 0x01, 0x03, 0x28, 0x1e, 0x41,
157/* reg1c reg1d reg1e reg1f reg20 reg21 reg22 reg23 */ 157/* reg18 reg19 reg1a reg1b reg1c reg1d reg1e reg1f */
158 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 158 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
159}; 159};
160 160
161static const __u8 sn_mi0360[] = { 161static const __u8 sn_mi0360[] = {
162/* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 reg9 */ 162/* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */
163 0x00, 0x61, 0x44, 0x00, 0x1a, 0x20, 0x20, 0x20, 0xb1, 0x5d, 163 0x00, 0x61, 0x44, 0x00, 0x1a, 0x20, 0x20, 0x20,
164/* rega regb regc regd rege regf reg10 reg11 */ 164/* reg8 reg9 rega regb regc regd rege regf */
165 0x07, 0x00, 0x00, 0x00, 0x00, 0x10, 0x03, 0x00, 165 0xb1, 0x5d, 0x07, 0x00, 0x00, 0x00, 0x00, 0x10,
166/* reg12 reg13 reg14 reg15 reg16 reg17 reg18 reg19 reg1a reg1b */ 166/* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */
167 0x00, 0x02, 0x0a, 0x28, 0x1e, 0x61, 0x06, 0x00, 0x00, 0x00, 167 0x03, 0x00, 0x00, 0x02, 0x0a, 0x28, 0x1e, 0x61,
168/* reg1c reg1d reg1e reg1f reg20 reg21 reg22 reg23 */ 168/* reg18 reg19 reg1a reg1b reg1c reg1d reg1e reg1f */
169 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 169 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
170}; 170};
171 171
172static const __u8 sn_mo4000[] = { 172static const __u8 sn_mo4000[] = {
173/* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 */ 173/* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */
174 0x12, 0x23, 0x60, 0x00, 0x1A, 0x00, 0x20, 0x18, 0x81, 174 0x12, 0x23, 0x60, 0x00, 0x1a, 0x00, 0x20, 0x18,
175/* reg9 rega regb regc regd rege regf reg10 reg11*/ 175/* reg8 reg9 rega regb regc regd rege regf */
176 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 176 0x81, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
177/* reg12 reg13 reg14 reg15 reg16 reg17 reg18 reg19 reg1a*/ 177/* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */
178 0x0b, 0x0f, 0x14, 0x28, 0x1e, 0x40, 0x08, 0x00, 0x00, 178 0x03, 0x00, 0x0b, 0x0f, 0x14, 0x28, 0x1e, 0x40,
179/* reg1b reg1c reg1d reg1e reg1f reg20 reg21 reg22 reg23*/ 179/* reg18 reg19 reg1a reg1b reg1c reg1d reg1e reg1f */
180 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x25, 0x39, 0x4b, 180 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
181 0x5c, 0x6b, 0x79, 0x87, 0x95, 0xa2, 0xaf, 0xbb, 0xc7,
182 0xd3, 0xdf, 0xea, 0xf5
183}; 181};
184 182
185static const __u8 sn_ov7648[] = { 183static const __u8 sn_ov7648[] = {
186 0x00, 0x21, 0x62, 0x00, 0x1a, 0x20, 0x20, 0x20, 0xA1, 0x6E, 0x18, 0x65, 184/* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */
187 0x00, 0x00, 0x00, 0x10, 0x03, 0x00, 0x00, 0x06, 0x06, 0x28, 0x1E, 0x82, 185 0x00, 0x21, 0x62, 0x00, 0x1a, 0x20, 0x20, 0x20,
188 0x07, 0x00, 0x00, 0x00, 0x00, 0x00 186/* reg8 reg9 rega regb regc regd rege regf */
187 0xa1, 0x6e, 0x18, 0x65, 0x00, 0x00, 0x00, 0x10,
188/* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */
189 0x03, 0x00, 0x00, 0x06, 0x06, 0x28, 0x1e, 0x82,
190/* reg18 reg19 reg1a reg1b reg1c reg1d reg1e reg1f */
191 0x07, 0x00, 0x00, 0x00, 0x00, 0x00
189}; 192};
190 193
191static const __u8 sn_ov7660[] = { 194static const __u8 sn_ov7660[] = {
192/* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 */ 195/* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */
193 0x00, 0x61, 0x40, 0x00, 0x1a, 0x00, 0x00, 0x00, 0x81, 196 0x00, 0x61, 0x40, 0x00, 0x1a, 0x20, 0x20, 0x20,
194/* reg9 rega regb regc regd rege regf reg10 reg11*/ 197/* reg8 reg9 rega regb regc regd rege regf */
195 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 198 0x81, 0x21, 0x07, 0x00, 0x00, 0x00, 0x00, 0x10,
196/* reg12 reg13 reg14 reg15 reg16 reg17 reg18 reg19 reg1a*/ 199/* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */
197 0x01, 0x01, 0x14, 0x28, 0x1e, 0x00, 0x07, 0x00, 0x00, 200 0x03, 0x00, 0x01, 0x01, 0x08, 0x28, 0x1e, 0x20,
198/* reg1b reg1c reg1d reg1e reg1f reg20 reg21 reg22 reg23*/ 201/* reg18 reg19 reg1a reg1b reg1c reg1d reg1e reg1f */
199 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 202 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
200}; 203};
201 204
202/* sequence specific to the sensors - !! index = SENSOR_xxx */ 205/* sequence specific to the sensors - !! index = SENSOR_xxx */
@@ -212,10 +215,6 @@ static const __u8 regsn20[] = {
212 0x00, 0x2d, 0x46, 0x5a, 0x6c, 0x7c, 0x8b, 0x99, 215 0x00, 0x2d, 0x46, 0x5a, 0x6c, 0x7c, 0x8b, 0x99,
213 0xa6, 0xb2, 0xbf, 0xca, 0xd5, 0xe0, 0xeb, 0xf5, 0xff 216 0xa6, 0xb2, 0xbf, 0xca, 0xd5, 0xe0, 0xeb, 0xf5, 0xff
214}; 217};
215static const __u8 regsn20_sn9c120[] = {
216 0x00, 0x25, 0x3c, 0x50, 0x62, 0x72, 0x81, 0x90,
217 0x9e, 0xab, 0xb8, 0xc5, 0xd1, 0xdd, 0xe9, 0xf4, 0xff
218};
219static const __u8 regsn20_sn9c325[] = { 218static const __u8 regsn20_sn9c325[] = {
220 0x0a, 0x3a, 0x56, 0x6c, 0x7e, 0x8d, 0x9a, 0xa4, 219 0x0a, 0x3a, 0x56, 0x6c, 0x7e, 0x8d, 0x9a, 0xa4,
221 0xaf, 0xbb, 0xc5, 0xcd, 0xd5, 0xde, 0xe8, 0xed, 0xf5 220 0xaf, 0xbb, 0xc5, 0xcd, 0xd5, 0xde, 0xe8, 0xed, 0xf5
@@ -227,21 +226,6 @@ static const __u8 reg84[] = {
227/* 0x00, 0x00, 0x00, 0x00, 0x00 */ 226/* 0x00, 0x00, 0x00, 0x00, 0x00 */
228 0xf7, 0x0f, 0x0a, 0x00, 0x00 227 0xf7, 0x0f, 0x0a, 0x00, 0x00
229}; 228};
230static const __u8 reg84_sn9c120_1[] = {
231 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
232 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
233 0x00, 0x00, 0x0c, 0x00, 0x00
234};
235static const __u8 reg84_sn9c120_2[] = {
236 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
237 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
238 0x00, 0x00, 0x0c, 0x02, 0x3b
239};
240static const __u8 reg84_sn9c120_3[] = {
241 0x14, 0x00, 0x27, 0x00, 0x08, 0x00, 0xeb, 0x0f,
242 0xd5, 0x0f, 0x42, 0x00, 0x41, 0x00, 0xca, 0x0f,
243 0xf5, 0x0f, 0x0c, 0x02, 0x3b
244};
245static const __u8 reg84_sn9c325[] = { 229static const __u8 reg84_sn9c325[] = {
246 0x14, 0x00, 0x27, 0x00, 0x07, 0x00, 0xe4, 0x0f, 230 0x14, 0x00, 0x27, 0x00, 0x07, 0x00, 0xe4, 0x0f,
247 0xd3, 0x0f, 0x4b, 0x00, 0x48, 0x00, 0xc0, 0x0f, 231 0xd3, 0x0f, 0x4b, 0x00, 0x48, 0x00, 0xc0, 0x0f,
@@ -360,17 +344,15 @@ static const __u8 ov7660_sensor_init[][8] = {
360 {0xa1, 0x21, 0x12, 0x80, 0x00, 0x00, 0x00, 0x10}, /* reset SCCB */ 344 {0xa1, 0x21, 0x12, 0x80, 0x00, 0x00, 0x00, 0x10}, /* reset SCCB */
361/* (delay 20ms) */ 345/* (delay 20ms) */
362 {0xa1, 0x21, 0x12, 0x05, 0x00, 0x00, 0x00, 0x10}, 346 {0xa1, 0x21, 0x12, 0x05, 0x00, 0x00, 0x00, 0x10},
363 /* Outformat ?? rawRGB */ 347 /* Outformat = rawRGB */
364 {0xa1, 0x21, 0x13, 0xb8, 0x00, 0x00, 0x00, 0x10}, /* init COM8 */ 348 {0xa1, 0x21, 0x13, 0xb8, 0x00, 0x00, 0x00, 0x10}, /* init COM8 */
365 {0xd1, 0x21, 0x00, 0x01, 0x74, 0x92, 0x00, 0x10}, 349 {0xd1, 0x21, 0x00, 0x01, 0x74, 0x74, 0x00, 0x10},
366/* {0xd1, 0x21, 0x00, 0x01, 0x74, 0x74, 0x00, 0x10}, */
367 /* GAIN BLUE RED VREF */ 350 /* GAIN BLUE RED VREF */
368 {0xd1, 0x21, 0x04, 0x00, 0x7d, 0x62, 0x00, 0x10}, 351 {0xd1, 0x21, 0x04, 0x00, 0x7d, 0x62, 0x00, 0x10},
369 /* COM 1 BAVE GEAVE AECHH */ 352 /* COM 1 BAVE GEAVE AECHH */
370 {0xb1, 0x21, 0x08, 0x83, 0x01, 0x00, 0x00, 0x10}, /* RAVE COM2 */ 353 {0xb1, 0x21, 0x08, 0x83, 0x01, 0x00, 0x00, 0x10}, /* RAVE COM2 */
371 {0xd1, 0x21, 0x0c, 0x00, 0x08, 0x04, 0x4f, 0x10}, /* COM 3 4 5 6 */ 354 {0xd1, 0x21, 0x0c, 0x00, 0x08, 0x04, 0x4f, 0x10}, /* COM 3 4 5 6 */
372 {0xd1, 0x21, 0x10, 0x7f, 0x40, 0x05, 0xf8, 0x10}, 355 {0xd1, 0x21, 0x10, 0x7f, 0x40, 0x05, 0xff, 0x10},
373/* {0xd1, 0x21, 0x10, 0x7f, 0x40, 0x05, 0xff, 0x10}, */
374 /* AECH CLKRC COM7 COM8 */ 356 /* AECH CLKRC COM7 COM8 */
375 {0xc1, 0x21, 0x14, 0x2c, 0x00, 0x02, 0x00, 0x10}, /* COM9 COM10 */ 357 {0xc1, 0x21, 0x14, 0x2c, 0x00, 0x02, 0x00, 0x10}, /* COM9 COM10 */
376 {0xd1, 0x21, 0x17, 0x10, 0x60, 0x02, 0x7b, 0x10}, 358 {0xd1, 0x21, 0x17, 0x10, 0x60, 0x02, 0x7b, 0x10},
@@ -379,8 +361,8 @@ static const __u8 ov7660_sensor_init[][8] = {
379 {0xb1, 0x21, 0x1e, 0x01, 0x0e, 0x00, 0x00, 0x10}, /* MVFP LAEC */ 361 {0xb1, 0x21, 0x1e, 0x01, 0x0e, 0x00, 0x00, 0x10}, /* MVFP LAEC */
380 {0xd1, 0x21, 0x20, 0x07, 0x07, 0x07, 0x07, 0x10}, 362 {0xd1, 0x21, 0x20, 0x07, 0x07, 0x07, 0x07, 0x10},
381 /* BOS GBOS GROS ROS (BGGR offset) */ 363 /* BOS GBOS GROS ROS (BGGR offset) */
382 {0xd1, 0x21, 0x24, 0x68, 0x58, 0xd4, 0x80, 0x10}, 364/* {0xd1, 0x21, 0x24, 0x68, 0x58, 0xd4, 0x80, 0x10}, */
383/* {0xd1, 0x21, 0x24, 0x78, 0x68, 0xd4, 0x80, 0x10}, */ 365 {0xd1, 0x21, 0x24, 0x78, 0x68, 0xd4, 0x80, 0x10},
384 /* AEW AEB VPT BBIAS */ 366 /* AEW AEB VPT BBIAS */
385 {0xd1, 0x21, 0x28, 0x80, 0x30, 0x00, 0x00, 0x10}, 367 {0xd1, 0x21, 0x28, 0x80, 0x30, 0x00, 0x00, 0x10},
386 /* GbBIAS RSVD EXHCH EXHCL */ 368 /* GbBIAS RSVD EXHCH EXHCL */
@@ -407,9 +389,9 @@ static const __u8 ov7660_sensor_init[][8] = {
407 {0xd1, 0x21, 0x62, 0x00, 0x00, 0x50, 0x30, 0x10}, 389 {0xd1, 0x21, 0x62, 0x00, 0x00, 0x50, 0x30, 0x10},
408 /* LCC1 LCC2 LCC3 LCC4 */ 390 /* LCC1 LCC2 LCC3 LCC4 */
409 {0xa1, 0x21, 0x66, 0x00, 0x00, 0x00, 0x00, 0x10}, /* LCC5 */ 391 {0xa1, 0x21, 0x66, 0x00, 0x00, 0x00, 0x00, 0x10}, /* LCC5 */
410 {0xd1, 0x21, 0x67, 0x80, 0x7a, 0x90, 0x80, 0x10}, 392 {0xd1, 0x21, 0x67, 0x80, 0x7a, 0x90, 0x80, 0x10}, /* MANU */
411 {0xa1, 0x21, 0x6b, 0x0a, 0x00, 0x00, 0x00, 0x10}, 393 {0xa1, 0x21, 0x6b, 0x0a, 0x00, 0x00, 0x00, 0x10},
412 /* band gap reference [0..3] DBLV */ 394 /* band gap reference [0:3] DBLV */
413 {0xd1, 0x21, 0x6c, 0x30, 0x48, 0x80, 0x74, 0x10}, /* gamma curve */ 395 {0xd1, 0x21, 0x6c, 0x30, 0x48, 0x80, 0x74, 0x10}, /* gamma curve */
414 {0xd1, 0x21, 0x70, 0x64, 0x60, 0x5c, 0x58, 0x10}, /* gamma curve */ 396 {0xd1, 0x21, 0x70, 0x64, 0x60, 0x5c, 0x58, 0x10}, /* gamma curve */
415 {0xd1, 0x21, 0x74, 0x54, 0x4c, 0x40, 0x38, 0x10}, /* gamma curve */ 397 {0xd1, 0x21, 0x74, 0x54, 0x4c, 0x40, 0x38, 0x10}, /* gamma curve */
@@ -419,37 +401,35 @@ static const __u8 ov7660_sensor_init[][8] = {
419 {0xd1, 0x21, 0x84, 0x6e, 0x77, 0x87, 0x95, 0x10}, /* gamma curve */ 401 {0xd1, 0x21, 0x84, 0x6e, 0x77, 0x87, 0x95, 0x10}, /* gamma curve */
420 {0xc1, 0x21, 0x88, 0xaf, 0xc7, 0xdf, 0x00, 0x10}, /* gamma curve */ 402 {0xc1, 0x21, 0x88, 0xaf, 0xc7, 0xdf, 0x00, 0x10}, /* gamma curve */
421 {0xc1, 0x21, 0x8b, 0x99, 0x99, 0xcf, 0x00, 0x10}, /* reserved */ 403 {0xc1, 0x21, 0x8b, 0x99, 0x99, 0xcf, 0x00, 0x10}, /* reserved */
422 {0xb1, 0x21, 0x92, 0x00, 0x00, 0x00, 0x00, 0x10}, 404 {0xb1, 0x21, 0x92, 0x00, 0x00, 0x00, 0x00, 0x10}, /* DM_LNL/H */
423/****** (some exchanges in the win trace) ******/ 405/****** (some exchanges in the win trace) ******/
424 {0xa1, 0x21, 0x1e, 0x01, 0x00, 0x00, 0x00, 0x10}, 406 {0xa1, 0x21, 0x1e, 0x01, 0x00, 0x00, 0x00, 0x10}, /* MVFP */
425 /* bits[3..0]reserved */ 407 /* bits[3..0]reserved */
426 {0xa1, 0x21, 0x1e, 0x01, 0x00, 0x00, 0x00, 0x10}, 408 {0xa1, 0x21, 0x1e, 0x01, 0x00, 0x00, 0x00, 0x10},
427 {0xa1, 0x21, 0x03, 0x00, 0x00, 0x00, 0x00, 0x10}, 409 {0xa1, 0x21, 0x03, 0x00, 0x00, 0x00, 0x00, 0x10},
428 /* VREF vertical frame ctrl */ 410 /* VREF vertical frame ctrl */
429 {0xa1, 0x21, 0x03, 0x00, 0x00, 0x00, 0x00, 0x10}, 411 {0xa1, 0x21, 0x03, 0x00, 0x00, 0x00, 0x00, 0x10},
430 {0xa1, 0x21, 0x10, 0x20, 0x00, 0x00, 0x00, 0x10}, /* 0x20 */ 412 {0xa1, 0x21, 0x10, 0x20, 0x00, 0x00, 0x00, 0x10}, /* AECH 0x20 */
431 {0xa1, 0x21, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x10}, 413 {0xa1, 0x21, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x10}, /* ADVFL */
432 {0xa1, 0x21, 0x2e, 0x00, 0x00, 0x00, 0x00, 0x10}, 414 {0xa1, 0x21, 0x2e, 0x00, 0x00, 0x00, 0x00, 0x10}, /* ADVFH */
433/* {0xa1, 0x21, 0x00, 0x1f, 0x00, 0x00, 0x00, 0x10}, */ 415 {0xa1, 0x21, 0x00, 0x1f, 0x00, 0x00, 0x00, 0x10}, /* GAIN */
434 {0xa1, 0x21, 0x00, 0x0a, 0x00, 0x00, 0x00, 0x10}, 416/* {0xb1, 0x21, 0x01, 0x78, 0x78, 0x00, 0x00, 0x10}, * BLUE */
435 {0xb1, 0x21, 0x01, 0x78, 0x78, 0x00, 0x00, 0x10},
436/****** (some exchanges in the win trace) ******/ 417/****** (some exchanges in the win trace) ******/
437 {0xa1, 0x21, 0x93, 0x00, 0x00, 0x00, 0x00, 0x10},/* dummy line hight */ 418 {0xa1, 0x21, 0x93, 0x00, 0x00, 0x00, 0x00, 0x10},/* dummy line hight */
438 {0xa1, 0x21, 0x92, 0x25, 0x00, 0x00, 0x00, 0x10},/* dummy line low */ 419 {0xa1, 0x21, 0x92, 0x25, 0x00, 0x00, 0x00, 0x10}, /* dummy line low */
439 {0xa1, 0x21, 0x2a, 0x00, 0x00, 0x00, 0x00, 0x10}, 420 {0xa1, 0x21, 0x2a, 0x00, 0x00, 0x00, 0x00, 0x10}, /* EXHCH */
440 {0xa1, 0x21, 0x2b, 0x00, 0x00, 0x00, 0x00, 0x10}, 421 {0xa1, 0x21, 0x2b, 0x00, 0x00, 0x00, 0x00, 0x10}, /* EXHCL */
441 {0xa1, 0x21, 0x02, 0x90, 0x00, 0x00, 0x00, 0x10}, 422/* {0xa1, 0x21, 0x02, 0x90, 0x00, 0x00, 0x00, 0x10}, * RED */
442/****** (some exchanges in the win trace) ******/ 423/****** (some exchanges in the win trace) ******/
443/**********startsensor KO if changed !!****/ 424/******!! startsensor KO if changed !!****/
444 {0xa1, 0x21, 0x93, 0x01, 0x00, 0x00, 0x00, 0x10}, 425 {0xa1, 0x21, 0x93, 0x01, 0x00, 0x00, 0x00, 0x10},
445 {0xa1, 0x21, 0x92, 0xff, 0x00, 0x00, 0x00, 0x10}, 426 {0xa1, 0x21, 0x92, 0xff, 0x00, 0x00, 0x00, 0x10},
446 {0xa1, 0x21, 0x2a, 0x00, 0x00, 0x00, 0x00, 0x10}, 427 {0xa1, 0x21, 0x2a, 0x00, 0x00, 0x00, 0x00, 0x10},
447 {0xa1, 0x21, 0x2b, 0xc3, 0x00, 0x00, 0x00, 0x10}, 428 {0xa1, 0x21, 0x2b, 0xc3, 0x00, 0x00, 0x00, 0x10},
448/* here may start the isoc exchanges */
449 {} 429 {}
450}; 430};
451/* reg0x04 reg0x07 reg 0x10 */ 431/* reg 0x04 reg 0x07 reg 0x10 */
452/* expo = (COM1 & 0x02) | (AECHH & 0x2f <<10) [ (AECh << 2) */ 432/* expo = (COM1 & 0x02) | ((AECHH & 0x2f) << 10) | (AECh << 2) */
453 433
454static const __u8 ov7648_sensor_init[][8] = { 434static const __u8 ov7648_sensor_init[][8] = {
455 {0xC1, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00}, 435 {0xC1, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00},
@@ -680,13 +660,12 @@ static int configure_gpio(struct gspca_dev *gspca_dev,
680 const __u8 *reg9a; 660 const __u8 *reg9a;
681 static const __u8 reg9a_def[] = 661 static const __u8 reg9a_def[] =
682 {0x08, 0x40, 0x20, 0x10, 0x00, 0x04}; 662 {0x08, 0x40, 0x20, 0x10, 0x00, 0x04};
683 static const __u8 reg9a_sn9c120[] = /* from win trace */
684 {0x00, 0x40, 0x38, 0x30, 0x00, 0x20};
685 static const __u8 reg9a_sn9c325[] = 663 static const __u8 reg9a_sn9c325[] =
686 {0x0a, 0x40, 0x38, 0x30, 0x00, 0x20}; 664 {0x0a, 0x40, 0x38, 0x30, 0x00, 0x20};
665 static const __u8 regd4[] = {0x60, 0x00, 0x00};
687 666
688 reg_w1(gspca_dev, 0xf1, 0x00); 667 reg_w1(gspca_dev, 0xf1, 0x00);
689 reg_w1(gspca_dev, 0x01, sn9c1xx[0]); /*fixme:jfm was [1] en v1*/ 668 reg_w1(gspca_dev, 0x01, 0x00); /*jfm was sn9c1xx[1] in v1*/
690 669
691 /* configure gpio */ 670 /* configure gpio */
692 reg_w(gspca_dev, 0x01, &sn9c1xx[1], 2); 671 reg_w(gspca_dev, 0x01, &sn9c1xx[1], 2);
@@ -696,25 +675,17 @@ static int configure_gpio(struct gspca_dev *gspca_dev,
696 case BRIDGE_SN9C325: 675 case BRIDGE_SN9C325:
697 reg9a = reg9a_sn9c325; 676 reg9a = reg9a_sn9c325;
698 break; 677 break;
699 case BRIDGE_SN9C120:
700 reg9a = reg9a_sn9c120;
701 break;
702 default: 678 default:
703 reg9a = reg9a_def; 679 reg9a = reg9a_def;
704 break; 680 break;
705 } 681 }
706 reg_w(gspca_dev, 0x9a, reg9a, 6); 682 reg_w(gspca_dev, 0x9a, reg9a, 6);
707 683
708 reg_w1(gspca_dev, 0xd4, 0x60); /*fixme:jfm 60 00 00 (3) ? */ 684 reg_w(gspca_dev, 0xd4, regd4, sizeof regd4); /*fixme:jfm was 60 only*/
709 685
710 reg_w(gspca_dev, 0x03, &sn9c1xx[3], 0x0f); 686 reg_w(gspca_dev, 0x03, &sn9c1xx[3], 0x0f);
711 687
712 switch (sd->bridge) { 688 switch (sd->bridge) {
713 case BRIDGE_SN9C120: /* from win trace */
714 reg_w1(gspca_dev, 0x01, 0x61);
715 reg_w1(gspca_dev, 0x17, 0x20);
716 reg_w1(gspca_dev, 0x01, 0x60);
717 break;
718 case BRIDGE_SN9C325: 689 case BRIDGE_SN9C325:
719 reg_w1(gspca_dev, 0x01, 0x43); 690 reg_w1(gspca_dev, 0x01, 0x43);
720 reg_w1(gspca_dev, 0x17, 0xae); 691 reg_w1(gspca_dev, 0x17, 0xae);
@@ -810,6 +781,8 @@ static int sd_config(struct gspca_dev *gspca_dev,
810 sd->contrast = CONTRAST_DEF; 781 sd->contrast = CONTRAST_DEF;
811 sd->colors = COLOR_DEF; 782 sd->colors = COLOR_DEF;
812 sd->autogain = AUTOGAIN_DEF; 783 sd->autogain = AUTOGAIN_DEF;
784 sd->ag_cnt = -1;
785
813 return 0; 786 return 0;
814} 787}
815 788
@@ -823,10 +796,11 @@ static int sd_open(struct gspca_dev *gspca_dev)
823 796
824 /* setup a selector by bridge */ 797 /* setup a selector by bridge */
825 reg_w1(gspca_dev, 0xf1, 0x01); 798 reg_w1(gspca_dev, 0xf1, 0x01);
826 reg_r(gspca_dev, 0x00, 1); /* -> regF1 = 0x00 */
827 reg_w1(gspca_dev, 0xf1, gspca_dev->usb_buf[0]);
828 reg_r(gspca_dev, 0x00, 1); 799 reg_r(gspca_dev, 0x00, 1);
800 reg_w1(gspca_dev, 0xf1, gspca_dev->usb_buf[0]);
801 reg_r(gspca_dev, 0x00, 1); /* get sonix chip id */
829 regF1 = gspca_dev->usb_buf[0]; 802 regF1 = gspca_dev->usb_buf[0];
803 PDEBUG(D_PROBE, "Sonix chip id: %02x", regF1);
830 switch (sd->bridge) { 804 switch (sd->bridge) {
831 case BRIDGE_SN9C102P: 805 case BRIDGE_SN9C102P:
832 if (regF1 != 0x11) 806 if (regF1 != 0x11)
@@ -937,15 +911,10 @@ static void setbrightness(struct gspca_dev *gspca_dev)
937 sd->exposure = setexposure(gspca_dev, expo); 911 sd->exposure = setexposure(gspca_dev, expo);
938 break; 912 break;
939 case SENSOR_MI0360: 913 case SENSOR_MI0360:
940 expo = sd->brightness >> 4;
941 sd->exposure = setexposure(gspca_dev, expo);
942 break;
943 case SENSOR_MO4000: 914 case SENSOR_MO4000:
944 expo = sd->brightness >> 4; 915 expo = sd->brightness >> 4;
945 sd->exposure = setexposure(gspca_dev, expo); 916 sd->exposure = setexposure(gspca_dev, expo);
946 break; 917 break;
947 case SENSOR_OV7660:
948 return; /*jfm??*/
949 } 918 }
950 919
951 k2 = sd->brightness >> 10; 920 k2 = sd->brightness >> 10;
@@ -958,8 +927,6 @@ static void setcontrast(struct gspca_dev *gspca_dev)
958 __u8 k2; 927 __u8 k2;
959 __u8 contrast[] = { 0x00, 0x00, 0x28, 0x00, 0x07, 0x00 }; 928 __u8 contrast[] = { 0x00, 0x00, 0x28, 0x00, 0x07, 0x00 };
960 929
961 if (sd->sensor == SENSOR_OV7660)
962 return; /*jfm??*/
963 k2 = sd->contrast; 930 k2 = sd->contrast;
964 contrast[2] = k2; 931 contrast[2] = k2;
965 contrast[0] = (k2 + 1) >> 1; 932 contrast[0] = (k2 + 1) >> 1;
@@ -981,20 +948,32 @@ static void setcolors(struct gspca_dev *gspca_dev)
981 reg_w1(gspca_dev, 0x05, data); 948 reg_w1(gspca_dev, 0x05, data);
982} 949}
983 950
951static void setautogain(struct gspca_dev *gspca_dev)
952{
953 struct sd *sd = (struct sd *) gspca_dev;
954
955 switch (sd->sensor) {
956 case SENSOR_HV7131R:
957 case SENSOR_MO4000:
958 case SENSOR_MI0360:
959 if (sd->autogain)
960 sd->ag_cnt = AG_CNT_START;
961 else
962 sd->ag_cnt = -1;
963 break;
964 }
965}
966
984/* -- start the camera -- */ 967/* -- start the camera -- */
985static void sd_start(struct gspca_dev *gspca_dev) 968static void sd_start(struct gspca_dev *gspca_dev)
986{ 969{
987 struct sd *sd = (struct sd *) gspca_dev; 970 struct sd *sd = (struct sd *) gspca_dev;
988 int i; 971 int i;
989 __u8 data; 972 __u8 reg1, reg17, reg18;
990 __u8 reg1;
991 __u8 reg17;
992 const __u8 *sn9c1xx; 973 const __u8 *sn9c1xx;
993 int mode; 974 int mode;
994 static const __u8 C0[] = { 0x2d, 0x2d, 0x3a, 0x05, 0x04, 0x3f }; 975 static const __u8 C0[] = { 0x2d, 0x2d, 0x3a, 0x05, 0x04, 0x3f };
995 static const __u8 CA[] = { 0x28, 0xd8, 0x14, 0xec }; 976 static const __u8 CA[] = { 0x28, 0xd8, 0x14, 0xec };
996 static const __u8 CA_sn9c120[] =
997 { 0x14, 0xec, 0x0a, 0xf6 }; /* SN9C120 */
998 static const __u8 CE[] = { 0x32, 0xdd, 0x2d, 0xdd }; /* MI0360 */ 977 static const __u8 CE[] = { 0x32, 0xdd, 0x2d, 0xdd }; /* MI0360 */
999 static const __u8 CE_sn9c325[] = 978 static const __u8 CE_sn9c325[] =
1000 { 0x32, 0xdd, 0x32, 0xdd }; /* OV7648 - SN9C325 */ 979 { 0x32, 0xdd, 0x32, 0xdd }; /* OV7648 - SN9C325 */
@@ -1002,9 +981,7 @@ static void sd_start(struct gspca_dev *gspca_dev)
1002 sn9c1xx = sn_tb[(int) sd->sensor]; 981 sn9c1xx = sn_tb[(int) sd->sensor];
1003 configure_gpio(gspca_dev, sn9c1xx); 982 configure_gpio(gspca_dev, sn9c1xx);
1004 983
1005/*fixme:jfm this sequence should appear at end of sd_start */ 984/* reg_w1(gspca_dev, 0x01, 0x44); jfm from win trace*/
1006/* with
1007 reg_w1(gspca_dev, 0x01, 0x44); */
1008 reg_w1(gspca_dev, 0x15, sn9c1xx[0x15]); 985 reg_w1(gspca_dev, 0x15, sn9c1xx[0x15]);
1009 reg_w1(gspca_dev, 0x16, sn9c1xx[0x16]); 986 reg_w1(gspca_dev, 0x16, sn9c1xx[0x16]);
1010 reg_w1(gspca_dev, 0x12, sn9c1xx[0x12]); 987 reg_w1(gspca_dev, 0x12, sn9c1xx[0x12]);
@@ -1016,20 +993,16 @@ static void sd_start(struct gspca_dev *gspca_dev)
1016 reg_w1(gspca_dev, 0xc7, 0x00); 993 reg_w1(gspca_dev, 0xc7, 0x00);
1017 reg_w1(gspca_dev, 0xc8, 0x50); 994 reg_w1(gspca_dev, 0xc8, 0x50);
1018 reg_w1(gspca_dev, 0xc9, 0x3c); 995 reg_w1(gspca_dev, 0xc9, 0x3c);
1019/*fixme:jfm end of ending sequence */
1020 reg_w1(gspca_dev, 0x18, sn9c1xx[0x18]); 996 reg_w1(gspca_dev, 0x18, sn9c1xx[0x18]);
1021 switch (sd->bridge) { 997 switch (sd->bridge) {
1022 case BRIDGE_SN9C325: 998 case BRIDGE_SN9C325:
1023 data = 0xae; 999 reg17 = 0xae;
1024 break;
1025 case BRIDGE_SN9C120:
1026 data = 0xa0;
1027 break; 1000 break;
1028 default: 1001 default:
1029 data = 0x60; 1002 reg17 = 0x60;
1030 break; 1003 break;
1031 } 1004 }
1032 reg_w1(gspca_dev, 0x17, data); 1005 reg_w1(gspca_dev, 0x17, reg17);
1033 reg_w1(gspca_dev, 0x05, sn9c1xx[5]); 1006 reg_w1(gspca_dev, 0x05, sn9c1xx[5]);
1034 reg_w1(gspca_dev, 0x07, sn9c1xx[7]); 1007 reg_w1(gspca_dev, 0x07, sn9c1xx[7]);
1035 reg_w1(gspca_dev, 0x06, sn9c1xx[6]); 1008 reg_w1(gspca_dev, 0x06, sn9c1xx[6]);
@@ -1044,20 +1017,6 @@ static void sd_start(struct gspca_dev *gspca_dev)
1044 reg_w1(gspca_dev, 0x9a, 0x0a); 1017 reg_w1(gspca_dev, 0x9a, 0x0a);
1045 reg_w1(gspca_dev, 0x99, 0x60); 1018 reg_w1(gspca_dev, 0x99, 0x60);
1046 break; 1019 break;
1047 case BRIDGE_SN9C120:
1048 reg_w(gspca_dev, 0x20, regsn20_sn9c120,
1049 sizeof regsn20_sn9c120);
1050 for (i = 0; i < 2; i++)
1051 reg_w(gspca_dev, 0x84, reg84_sn9c120_1,
1052 sizeof reg84_sn9c120_1);
1053 for (i = 0; i < 6; i++)
1054 reg_w(gspca_dev, 0x84, reg84_sn9c120_2,
1055 sizeof reg84_sn9c120_2);
1056 reg_w(gspca_dev, 0x84, reg84_sn9c120_3,
1057 sizeof reg84_sn9c120_3);
1058 reg_w1(gspca_dev, 0x9a, 0x05);
1059 reg_w1(gspca_dev, 0x99, 0x5b);
1060 break;
1061 default: 1020 default:
1062 reg_w(gspca_dev, 0x20, regsn20, sizeof regsn20); 1021 reg_w(gspca_dev, 0x20, regsn20, sizeof regsn20);
1063 for (i = 0; i < 8; i++) 1022 for (i = 0; i < 8; i++)
@@ -1107,22 +1066,14 @@ static void sd_start(struct gspca_dev *gspca_dev)
1107/* reg1 = 0x44; */ 1066/* reg1 = 0x44; */
1108/* reg1 = 0x46; (done) */ 1067/* reg1 = 0x46; (done) */
1109 } else { 1068 } else {
1110 reg17 = 0xa2; /* 640 */ 1069 reg17 = 0x22; /* 640 MCKSIZE */
1111 reg1 = 0x40; 1070 reg1 = 0x06;
1112 } 1071 }
1113 break; 1072 break;
1114 } 1073 }
1115 reg_w(gspca_dev, 0xc0, C0, 6); 1074 reg_w(gspca_dev, 0xc0, C0, 6);
1075 reg_w(gspca_dev, 0xca, CA, 4);
1116 switch (sd->bridge) { 1076 switch (sd->bridge) {
1117 case BRIDGE_SN9C120: /*jfm ?? */
1118 reg_w(gspca_dev, 0xca, CA_sn9c120, 4);
1119 break;
1120 default:
1121 reg_w(gspca_dev, 0xca, CA, 4);
1122 break;
1123 }
1124 switch (sd->bridge) {
1125 case BRIDGE_SN9C120: /*jfm ?? */
1126 case BRIDGE_SN9C325: 1077 case BRIDGE_SN9C325:
1127 reg_w(gspca_dev, 0xce, CE_sn9c325, 4); 1078 reg_w(gspca_dev, 0xce, CE_sn9c325, 4);
1128 break; 1079 break;
@@ -1133,19 +1084,19 @@ static void sd_start(struct gspca_dev *gspca_dev)
1133 } 1084 }
1134 1085
1135 /* here change size mode 0 -> VGA; 1 -> CIF */ 1086 /* here change size mode 0 -> VGA; 1 -> CIF */
1136 data = 0x40 | sn9c1xx[0x18] | (mode << 4); 1087 reg18 = sn9c1xx[0x18] | (mode << 4);
1137 reg_w1(gspca_dev, 0x18, data); 1088 reg_w1(gspca_dev, 0x18, reg18 | 0x40);
1138 1089
1139 reg_w(gspca_dev, 0x100, qtable4, 0x40); 1090 reg_w(gspca_dev, 0x100, qtable4, 0x40);
1140 reg_w(gspca_dev, 0x140, qtable4 + 0x40, 0x40); 1091 reg_w(gspca_dev, 0x140, qtable4 + 0x40, 0x40);
1141 1092
1142 data = sn9c1xx[0x18] | (mode << 4); 1093 reg_w1(gspca_dev, 0x18, reg18);
1143 reg_w1(gspca_dev, 0x18, data);
1144 1094
1145 reg_w1(gspca_dev, 0x17, reg17); 1095 reg_w1(gspca_dev, 0x17, reg17);
1146 reg_w1(gspca_dev, 0x01, reg1); 1096 reg_w1(gspca_dev, 0x01, reg1);
1147 setbrightness(gspca_dev); 1097 setbrightness(gspca_dev);
1148 setcontrast(gspca_dev); 1098 setcontrast(gspca_dev);
1099 setautogain(gspca_dev);
1149} 1100}
1150 1101
1151static void sd_stopN(struct gspca_dev *gspca_dev) 1102static void sd_stopN(struct gspca_dev *gspca_dev)
@@ -1168,12 +1119,11 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
1168 i2c_w8(gspca_dev, stopmi0360); 1119 i2c_w8(gspca_dev, stopmi0360);
1169 data = 0x29; 1120 data = 0x29;
1170 break; 1121 break;
1171 case SENSOR_MO4000:
1172 break;
1173 case SENSOR_OV7648: 1122 case SENSOR_OV7648:
1174 data = 0x29; 1123 data = 0x29;
1175 break; 1124 break;
1176 default: 1125 default:
1126/* case SENSOR_MO4000: */
1177/* case SENSOR_OV7660: */ 1127/* case SENSOR_OV7660: */
1178 break; 1128 break;
1179 } 1129 }
@@ -1193,16 +1143,23 @@ static void sd_close(struct gspca_dev *gspca_dev)
1193{ 1143{
1194} 1144}
1195 1145
1196static void setautogain(struct gspca_dev *gspca_dev) 1146static void do_autogain(struct gspca_dev *gspca_dev)
1197{ 1147{
1198 struct sd *sd = (struct sd *) gspca_dev; 1148 struct sd *sd = (struct sd *) gspca_dev;
1199 /* Thanks S., without your advice, autobright should not work :) */
1200 int delta; 1149 int delta;
1201 int expotimes = 0; 1150 int expotimes;
1202 __u8 luma_mean = 130; 1151 __u8 luma_mean = 130;
1203 __u8 luma_delta = 20; 1152 __u8 luma_delta = 20;
1204 1153
1205 delta = sd->avg_lum; 1154 /* Thanks S., without your advice, autobright should not work :) */
1155 if (sd->ag_cnt < 0)
1156 return;
1157 if (--sd->ag_cnt >= 0)
1158 return;
1159 sd->ag_cnt = AG_CNT_START;
1160
1161 delta = atomic_read(&sd->avg_lum);
1162 PDEBUG(D_FRAM, "mean lum %d", delta);
1206 if (delta < luma_mean - luma_delta || 1163 if (delta < luma_mean - luma_delta ||
1207 delta > luma_mean + luma_delta) { 1164 delta > luma_mean + luma_delta) {
1208 switch (sd->sensor) { 1165 switch (sd->sensor) {
@@ -1214,8 +1171,9 @@ static void setautogain(struct gspca_dev *gspca_dev)
1214 sd->exposure = setexposure(gspca_dev, 1171 sd->exposure = setexposure(gspca_dev,
1215 (unsigned int) (expotimes << 8)); 1172 (unsigned int) (expotimes << 8));
1216 break; 1173 break;
1217 case SENSOR_MO4000: 1174 default:
1218 case SENSOR_MI0360: 1175/* case SENSOR_MO4000: */
1176/* case SENSOR_MI0360: */
1219 expotimes = sd->exposure; 1177 expotimes = sd->exposure;
1220 expotimes += (luma_mean - delta) >> 6; 1178 expotimes += (luma_mean - delta) >> 6;
1221 if (expotimes < 0) 1179 if (expotimes < 0)
@@ -1228,6 +1186,8 @@ static void setautogain(struct gspca_dev *gspca_dev)
1228 } 1186 }
1229} 1187}
1230 1188
1189/* scan the URB packets */
1190/* This function is run at interrupt level. */
1231static void sd_pkt_scan(struct gspca_dev *gspca_dev, 1191static void sd_pkt_scan(struct gspca_dev *gspca_dev,
1232 struct gspca_frame *frame, /* target */ 1192 struct gspca_frame *frame, /* target */
1233 __u8 *data, /* isoc packet */ 1193 __u8 *data, /* isoc packet */
@@ -1244,9 +1204,6 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
1244 frame, data, sof + 2); 1204 frame, data, sof + 2);
1245 if (sd->ag_cnt < 0) 1205 if (sd->ag_cnt < 0)
1246 return; 1206 return;
1247 if (--sd->ag_cnt >= 0)
1248 return;
1249 sd->ag_cnt = AG_CNT_START;
1250/* w1 w2 w3 */ 1207/* w1 w2 w3 */
1251/* w4 w5 w6 */ 1208/* w4 w5 w6 */
1252/* w7 w8 */ 1209/* w7 w8 */
@@ -1261,9 +1218,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
1261/* w5 */ 1218/* w5 */
1262 avg_lum += ((data[sof + 31] << 8) | data[sof + 32]) >> 4; 1219 avg_lum += ((data[sof + 31] << 8) | data[sof + 32]) >> 4;
1263 avg_lum >>= 4; 1220 avg_lum >>= 4;
1264 sd->avg_lum = avg_lum; 1221 atomic_set(&sd->avg_lum, avg_lum);
1265 PDEBUG(D_PACK, "mean lum %d", avg_lum);
1266 setautogain(gspca_dev);
1267 return; 1222 return;
1268 } 1223 }
1269 if (gspca_dev->last_packet_type == LAST_PACKET) { 1224 if (gspca_dev->last_packet_type == LAST_PACKET) {
@@ -1300,6 +1255,7 @@ static unsigned int getexposure(struct gspca_dev *gspca_dev)
1300 (hexpo << 10) | (mexpo << 2) | lexpo); 1255 (hexpo << 10) | (mexpo << 2) | lexpo);
1301 return (hexpo << 10) | (mexpo << 2) | lexpo; 1256 return (hexpo << 10) | (mexpo << 2) | lexpo;
1302 default: 1257 default:
1258/* case SENSOR_OV7648: * jfm: is it ok for 7648? */
1303/* case SENSOR_OV7660: */ 1259/* case SENSOR_OV7660: */
1304 /* read sensor exposure */ 1260 /* read sensor exposure */
1305 i2c_r5(gspca_dev, 0x04); 1261 i2c_r5(gspca_dev, 0x04);
@@ -1318,14 +1274,12 @@ static void getbrightness(struct gspca_dev *gspca_dev)
1318 /* hardcoded registers seem not readable */ 1274 /* hardcoded registers seem not readable */
1319 switch (sd->sensor) { 1275 switch (sd->sensor) {
1320 case SENSOR_HV7131R: 1276 case SENSOR_HV7131R:
1321/* sd->brightness = 0x7fff; */
1322 sd->brightness = getexposure(gspca_dev) >> 4; 1277 sd->brightness = getexposure(gspca_dev) >> 4;
1323 break; 1278 break;
1324 case SENSOR_MI0360: 1279 case SENSOR_MI0360:
1325 sd->brightness = getexposure(gspca_dev) << 4; 1280 sd->brightness = getexposure(gspca_dev) << 4;
1326 break; 1281 break;
1327 case SENSOR_MO4000: 1282 case SENSOR_MO4000:
1328/* sd->brightness = 0x1fff; */
1329 sd->brightness = getexposure(gspca_dev) << 4; 1283 sd->brightness = getexposure(gspca_dev) << 4;
1330 break; 1284 break;
1331 } 1285 }
@@ -1391,10 +1345,8 @@ static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val)
1391 struct sd *sd = (struct sd *) gspca_dev; 1345 struct sd *sd = (struct sd *) gspca_dev;
1392 1346
1393 sd->autogain = val; 1347 sd->autogain = val;
1394 if (val) 1348 if (gspca_dev->streaming)
1395 sd->ag_cnt = AG_CNT_START; 1349 setautogain(gspca_dev);
1396 else
1397 sd->ag_cnt = -1;
1398 return 0; 1350 return 0;
1399} 1351}
1400 1352
@@ -1418,6 +1370,7 @@ static const struct sd_desc sd_desc = {
1418 .stop0 = sd_stop0, 1370 .stop0 = sd_stop0,
1419 .close = sd_close, 1371 .close = sd_close,
1420 .pkt_scan = sd_pkt_scan, 1372 .pkt_scan = sd_pkt_scan,
1373 .dq_callback = do_autogain,
1421}; 1374};
1422 1375
1423/* -- module initialisation -- */ 1376/* -- module initialisation -- */
diff --git a/drivers/media/video/gspca/spca505.c b/drivers/media/video/gspca/spca505.c
index 3c2be80cbd65..eda29d609359 100644
--- a/drivers/media/video/gspca/spca505.c
+++ b/drivers/media/video/gspca/spca505.c
@@ -61,27 +61,27 @@ static struct ctrl sd_ctrls[] = {
61 61
62static struct v4l2_pix_format vga_mode[] = { 62static struct v4l2_pix_format vga_mode[] = {
63 {160, 120, V4L2_PIX_FMT_SPCA505, V4L2_FIELD_NONE, 63 {160, 120, V4L2_PIX_FMT_SPCA505, V4L2_FIELD_NONE,
64 .bytesperline = 160 * 3, 64 .bytesperline = 160,
65 .sizeimage = 160 * 120 * 3 / 2, 65 .sizeimage = 160 * 120 * 3 / 2,
66 .colorspace = V4L2_COLORSPACE_SRGB, 66 .colorspace = V4L2_COLORSPACE_SRGB,
67 .priv = 5}, 67 .priv = 5},
68 {176, 144, V4L2_PIX_FMT_SPCA505, V4L2_FIELD_NONE, 68 {176, 144, V4L2_PIX_FMT_SPCA505, V4L2_FIELD_NONE,
69 .bytesperline = 176 * 3, 69 .bytesperline = 176,
70 .sizeimage = 176 * 144 * 3 / 2, 70 .sizeimage = 176 * 144 * 3 / 2,
71 .colorspace = V4L2_COLORSPACE_SRGB, 71 .colorspace = V4L2_COLORSPACE_SRGB,
72 .priv = 4}, 72 .priv = 4},
73 {320, 240, V4L2_PIX_FMT_SPCA505, V4L2_FIELD_NONE, 73 {320, 240, V4L2_PIX_FMT_SPCA505, V4L2_FIELD_NONE,
74 .bytesperline = 320 * 3, 74 .bytesperline = 320,
75 .sizeimage = 320 * 240 * 3 / 2, 75 .sizeimage = 320 * 240 * 3 / 2,
76 .colorspace = V4L2_COLORSPACE_SRGB, 76 .colorspace = V4L2_COLORSPACE_SRGB,
77 .priv = 2}, 77 .priv = 2},
78 {352, 288, V4L2_PIX_FMT_SPCA505, V4L2_FIELD_NONE, 78 {352, 288, V4L2_PIX_FMT_SPCA505, V4L2_FIELD_NONE,
79 .bytesperline = 352 * 3, 79 .bytesperline = 352,
80 .sizeimage = 352 * 288 * 3 / 2, 80 .sizeimage = 352 * 288 * 3 / 2,
81 .colorspace = V4L2_COLORSPACE_SRGB, 81 .colorspace = V4L2_COLORSPACE_SRGB,
82 .priv = 1}, 82 .priv = 1},
83 {640, 480, V4L2_PIX_FMT_SPCA505, V4L2_FIELD_NONE, 83 {640, 480, V4L2_PIX_FMT_SPCA505, V4L2_FIELD_NONE,
84 .bytesperline = 640 * 3, 84 .bytesperline = 640,
85 .sizeimage = 640 * 480 * 3 / 2, 85 .sizeimage = 640 * 480 * 3 / 2,
86 .colorspace = V4L2_COLORSPACE_SRGB, 86 .colorspace = V4L2_COLORSPACE_SRGB,
87 .priv = 0}, 87 .priv = 0},
@@ -776,7 +776,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
776 default: 776 default:
777 data += 1; 777 data += 1;
778 len -= 1; 778 len -= 1;
779 gspca_frame_add(gspca_dev, FIRST_PACKET, frame, 779 gspca_frame_add(gspca_dev, INTER_PACKET, frame,
780 data, len); 780 data, len);
781 break; 781 break;
782 } 782 }
diff --git a/drivers/media/video/gspca/spca506.c b/drivers/media/video/gspca/spca506.c
index 6fe715c80ad2..f622fa75766d 100644
--- a/drivers/media/video/gspca/spca506.c
+++ b/drivers/media/video/gspca/spca506.c
@@ -112,27 +112,27 @@ static struct ctrl sd_ctrls[] = {
112 112
113static struct v4l2_pix_format vga_mode[] = { 113static struct v4l2_pix_format vga_mode[] = {
114 {160, 120, V4L2_PIX_FMT_SPCA505, V4L2_FIELD_NONE, 114 {160, 120, V4L2_PIX_FMT_SPCA505, V4L2_FIELD_NONE,
115 .bytesperline = 160 * 3, 115 .bytesperline = 160,
116 .sizeimage = 160 * 120 * 3 / 2, 116 .sizeimage = 160 * 120 * 3 / 2,
117 .colorspace = V4L2_COLORSPACE_SRGB, 117 .colorspace = V4L2_COLORSPACE_SRGB,
118 .priv = 5}, 118 .priv = 5},
119 {176, 144, V4L2_PIX_FMT_SPCA505, V4L2_FIELD_NONE, 119 {176, 144, V4L2_PIX_FMT_SPCA505, V4L2_FIELD_NONE,
120 .bytesperline = 176 * 3, 120 .bytesperline = 176,
121 .sizeimage = 176 * 144 * 3 / 2, 121 .sizeimage = 176 * 144 * 3 / 2,
122 .colorspace = V4L2_COLORSPACE_SRGB, 122 .colorspace = V4L2_COLORSPACE_SRGB,
123 .priv = 4}, 123 .priv = 4},
124 {320, 240, V4L2_PIX_FMT_SPCA505, V4L2_FIELD_NONE, 124 {320, 240, V4L2_PIX_FMT_SPCA505, V4L2_FIELD_NONE,
125 .bytesperline = 320 * 3, 125 .bytesperline = 320,
126 .sizeimage = 320 * 240 * 3 / 2, 126 .sizeimage = 320 * 240 * 3 / 2,
127 .colorspace = V4L2_COLORSPACE_SRGB, 127 .colorspace = V4L2_COLORSPACE_SRGB,
128 .priv = 2}, 128 .priv = 2},
129 {352, 288, V4L2_PIX_FMT_SPCA505, V4L2_FIELD_NONE, 129 {352, 288, V4L2_PIX_FMT_SPCA505, V4L2_FIELD_NONE,
130 .bytesperline = 352 * 3, 130 .bytesperline = 352,
131 .sizeimage = 352 * 288 * 3 / 2, 131 .sizeimage = 352 * 288 * 3 / 2,
132 .colorspace = V4L2_COLORSPACE_SRGB, 132 .colorspace = V4L2_COLORSPACE_SRGB,
133 .priv = 1}, 133 .priv = 1},
134 {640, 480, V4L2_PIX_FMT_SPCA505, V4L2_FIELD_NONE, 134 {640, 480, V4L2_PIX_FMT_SPCA505, V4L2_FIELD_NONE,
135 .bytesperline = 640 * 3, 135 .bytesperline = 640,
136 .sizeimage = 640 * 480 * 3 / 2, 136 .sizeimage = 640 * 480 * 3 / 2,
137 .colorspace = V4L2_COLORSPACE_SRGB, 137 .colorspace = V4L2_COLORSPACE_SRGB,
138 .priv = 0}, 138 .priv = 0},
@@ -588,7 +588,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
588 default: 588 default:
589 data += 1; 589 data += 1;
590 len -= 1; 590 len -= 1;
591 gspca_frame_add(gspca_dev, FIRST_PACKET, frame, 591 gspca_frame_add(gspca_dev, INTER_PACKET, frame,
592 data, len); 592 data, len);
593 break; 593 break;
594 } 594 }
diff --git a/drivers/media/video/gspca/spca508.c b/drivers/media/video/gspca/spca508.c
index b608a27ad115..699340c17dea 100644
--- a/drivers/media/video/gspca/spca508.c
+++ b/drivers/media/video/gspca/spca508.c
@@ -63,23 +63,23 @@ static struct ctrl sd_ctrls[] = {
63}; 63};
64 64
65static struct v4l2_pix_format sif_mode[] = { 65static struct v4l2_pix_format sif_mode[] = {
66 {160, 120, V4L2_PIX_FMT_YUYV, V4L2_FIELD_NONE, 66 {160, 120, V4L2_PIX_FMT_SPCA508, V4L2_FIELD_NONE,
67 .bytesperline = 160 * 3, 67 .bytesperline = 160,
68 .sizeimage = 160 * 120 * 3 / 2, 68 .sizeimage = 160 * 120 * 3 / 2,
69 .colorspace = V4L2_COLORSPACE_SRGB, 69 .colorspace = V4L2_COLORSPACE_SRGB,
70 .priv = 3}, 70 .priv = 3},
71 {176, 144, V4L2_PIX_FMT_YUYV, V4L2_FIELD_NONE, 71 {176, 144, V4L2_PIX_FMT_SPCA508, V4L2_FIELD_NONE,
72 .bytesperline = 176 * 3, 72 .bytesperline = 176,
73 .sizeimage = 176 * 144 * 3 / 2, 73 .sizeimage = 176 * 144 * 3 / 2,
74 .colorspace = V4L2_COLORSPACE_SRGB, 74 .colorspace = V4L2_COLORSPACE_SRGB,
75 .priv = 2}, 75 .priv = 2},
76 {320, 240, V4L2_PIX_FMT_YUYV, V4L2_FIELD_NONE, 76 {320, 240, V4L2_PIX_FMT_SPCA508, V4L2_FIELD_NONE,
77 .bytesperline = 320 * 3, 77 .bytesperline = 320,
78 .sizeimage = 320 * 240 * 3 / 2, 78 .sizeimage = 320 * 240 * 3 / 2,
79 .colorspace = V4L2_COLORSPACE_SRGB, 79 .colorspace = V4L2_COLORSPACE_SRGB,
80 .priv = 1}, 80 .priv = 1},
81 {352, 288, V4L2_PIX_FMT_YUYV, V4L2_FIELD_NONE, 81 {352, 288, V4L2_PIX_FMT_SPCA508, V4L2_FIELD_NONE,
82 .bytesperline = 352 * 3, 82 .bytesperline = 352,
83 .sizeimage = 352 * 288 * 3 / 2, 83 .sizeimage = 352 * 288 * 3 / 2,
84 .colorspace = V4L2_COLORSPACE_SRGB, 84 .colorspace = V4L2_COLORSPACE_SRGB,
85 .priv = 0}, 85 .priv = 0},
@@ -1583,7 +1583,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
1583 default: 1583 default:
1584 data += 1; 1584 data += 1;
1585 len -= 1; 1585 len -= 1;
1586 gspca_frame_add(gspca_dev, FIRST_PACKET, frame, 1586 gspca_frame_add(gspca_dev, INTER_PACKET, frame,
1587 data, len); 1587 data, len);
1588 break; 1588 break;
1589 } 1589 }
diff --git a/drivers/media/video/gspca/spca561.c b/drivers/media/video/gspca/spca561.c
index a26174508cb9..1073ac3d2ec6 100644
--- a/drivers/media/video/gspca/spca561.c
+++ b/drivers/media/video/gspca/spca561.c
@@ -644,6 +644,18 @@ static void setcontrast(struct gspca_dev *gspca_dev)
644 } 644 }
645} 645}
646 646
647static void setautogain(struct gspca_dev *gspca_dev)
648{
649 struct sd *sd = (struct sd *) gspca_dev;
650
651 if (sd->chip_revision == Rev072A) {
652 if (sd->autogain)
653 sd->ag_cnt = AG_CNT_START;
654 else
655 sd->ag_cnt = -1;
656 }
657}
658
647static void sd_start(struct gspca_dev *gspca_dev) 659static void sd_start(struct gspca_dev *gspca_dev)
648{ 660{
649 struct sd *sd = (struct sd *) gspca_dev; 661 struct sd *sd = (struct sd *) gspca_dev;
@@ -671,6 +683,7 @@ static void sd_start(struct gspca_dev *gspca_dev)
671 reg_w_val(dev, 0x8500, mode); /* mode */ 683 reg_w_val(dev, 0x8500, mode); /* mode */
672 reg_w_val(dev, 0x8700, Clck); /* 0x27 clock */ 684 reg_w_val(dev, 0x8700, Clck); /* 0x27 clock */
673 reg_w_val(dev, 0x8112, 0x10 | 0x20); 685 reg_w_val(dev, 0x8112, 0x10 | 0x20);
686 setautogain(gspca_dev);
674 break; 687 break;
675 default: 688 default:
676/* case Rev012A: */ 689/* case Rev012A: */
@@ -720,18 +733,24 @@ static void sd_close(struct gspca_dev *gspca_dev)
720 reg_w_val(gspca_dev->dev, 0x8114, 0); 733 reg_w_val(gspca_dev->dev, 0x8114, 0);
721} 734}
722 735
723static void setautogain(struct gspca_dev *gspca_dev) 736static void do_autogain(struct gspca_dev *gspca_dev)
724{ 737{
725 struct sd *sd = (struct sd *) gspca_dev; 738 struct sd *sd = (struct sd *) gspca_dev;
726 int expotimes = 0; 739 int expotimes;
727 int pixelclk = 0; 740 int pixelclk;
728 int gainG = 0; 741 int gainG;
729 __u8 R, Gr, Gb, B; 742 __u8 R, Gr, Gb, B;
730 int y; 743 int y;
731 __u8 luma_mean = 110; 744 __u8 luma_mean = 110;
732 __u8 luma_delta = 20; 745 __u8 luma_delta = 20;
733 __u8 spring = 4; 746 __u8 spring = 4;
734 747
748 if (sd->ag_cnt < 0)
749 return;
750 if (--sd->ag_cnt >= 0)
751 return;
752 sd->ag_cnt = AG_CNT_START;
753
735 switch (sd->chip_revision) { 754 switch (sd->chip_revision) {
736 case Rev072A: 755 case Rev072A:
737 reg_r(gspca_dev, 0x8621, 1); 756 reg_r(gspca_dev, 0x8621, 1);
@@ -795,18 +814,10 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
795 __u8 *data, /* isoc packet */ 814 __u8 *data, /* isoc packet */
796 int len) /* iso packet length */ 815 int len) /* iso packet length */
797{ 816{
798 struct sd *sd = (struct sd *) gspca_dev;
799
800 switch (data[0]) { 817 switch (data[0]) {
801 case 0: /* start of frame */ 818 case 0: /* start of frame */
802 frame = gspca_frame_add(gspca_dev, LAST_PACKET, frame, 819 frame = gspca_frame_add(gspca_dev, LAST_PACKET, frame,
803 data, 0); 820 data, 0);
804 if (sd->ag_cnt >= 0) {
805 if (--sd->ag_cnt < 0) {
806 sd->ag_cnt = AG_CNT_START;
807 setautogain(gspca_dev);
808 }
809 }
810 data += SPCA561_OFFSET_DATA; 821 data += SPCA561_OFFSET_DATA;
811 len -= SPCA561_OFFSET_DATA; 822 len -= SPCA561_OFFSET_DATA;
812 if (data[1] & 0x10) { 823 if (data[1] & 0x10) {
@@ -944,10 +955,8 @@ static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val)
944 struct sd *sd = (struct sd *) gspca_dev; 955 struct sd *sd = (struct sd *) gspca_dev;
945 956
946 sd->autogain = val; 957 sd->autogain = val;
947 if (val) 958 if (gspca_dev->streaming)
948 sd->ag_cnt = AG_CNT_START; 959 setautogain(gspca_dev);
949 else
950 sd->ag_cnt = -1;
951 return 0; 960 return 0;
952} 961}
953 962
@@ -971,6 +980,7 @@ static const struct sd_desc sd_desc = {
971 .stop0 = sd_stop0, 980 .stop0 = sd_stop0,
972 .close = sd_close, 981 .close = sd_close,
973 .pkt_scan = sd_pkt_scan, 982 .pkt_scan = sd_pkt_scan,
983 .dq_callback = do_autogain,
974}; 984};
975 985
976/* -- module initialisation -- */ 986/* -- module initialisation -- */
diff --git a/drivers/media/video/gspca/vc032x.c b/drivers/media/video/gspca/vc032x.c
index a4221753e1bf..f4a52956e0d9 100644
--- a/drivers/media/video/gspca/vc032x.c
+++ b/drivers/media/video/gspca/vc032x.c
@@ -88,12 +88,12 @@ static struct ctrl sd_ctrls[] = {
88 88
89static struct v4l2_pix_format vc0321_mode[] = { 89static struct v4l2_pix_format vc0321_mode[] = {
90 {320, 240, V4L2_PIX_FMT_YUV420, V4L2_FIELD_NONE, 90 {320, 240, V4L2_PIX_FMT_YUV420, V4L2_FIELD_NONE,
91 .bytesperline = 320 * 2, 91 .bytesperline = 320,
92 .sizeimage = 320 * 240 * 2, 92 .sizeimage = 320 * 240 * 2,
93 .colorspace = V4L2_COLORSPACE_SRGB, 93 .colorspace = V4L2_COLORSPACE_SRGB,
94 .priv = 1}, 94 .priv = 1},
95 {640, 480, V4L2_PIX_FMT_YUV420, V4L2_FIELD_NONE, 95 {640, 480, V4L2_PIX_FMT_YUV420, V4L2_FIELD_NONE,
96 .bytesperline = 640 * 2, 96 .bytesperline = 640,
97 .sizeimage = 640 * 480 * 2, 97 .sizeimage = 640 * 480 * 2,
98 .colorspace = V4L2_COLORSPACE_SRGB, 98 .colorspace = V4L2_COLORSPACE_SRGB,
99 .priv = 0}, 99 .priv = 0},
diff --git a/drivers/media/video/gspca/zc3xx.c b/drivers/media/video/gspca/zc3xx.c
index 22a994ccb1d5..bc7d0eedcd81 100644
--- a/drivers/media/video/gspca/zc3xx.c
+++ b/drivers/media/video/gspca/zc3xx.c
@@ -6469,7 +6469,7 @@ static void setcontrast(struct gspca_dev *gspca_dev)
6469 NULL, Tgradient_1, Tgradient_2, 6469 NULL, Tgradient_1, Tgradient_2,
6470 Tgradient_3, Tgradient_4, Tgradient_5, Tgradient_6 6470 Tgradient_3, Tgradient_4, Tgradient_5, Tgradient_6
6471 }; 6471 };
6472#ifdef CONFIG_VIDEO_ADV_DEBUG 6472#ifdef GSPCA_DEBUG
6473 __u8 v[16]; 6473 __u8 v[16];
6474#endif 6474#endif
6475 6475
@@ -6487,7 +6487,7 @@ static void setcontrast(struct gspca_dev *gspca_dev)
6487 else if (g <= 0) 6487 else if (g <= 0)
6488 g = 1; 6488 g = 1;
6489 reg_w(dev, g, 0x0120 + i); /* gamma */ 6489 reg_w(dev, g, 0x0120 + i); /* gamma */
6490#ifdef CONFIG_VIDEO_ADV_DEBUG 6490#ifdef GSPCA_DEBUG
6491 if (gspca_debug & D_CONF) 6491 if (gspca_debug & D_CONF)
6492 v[i] = g; 6492 v[i] = g;
6493#endif 6493#endif
@@ -6507,7 +6507,7 @@ static void setcontrast(struct gspca_dev *gspca_dev)
6507 g = 1; 6507 g = 1;
6508 } 6508 }
6509 reg_w(dev, g, 0x0130 + i); /* gradient */ 6509 reg_w(dev, g, 0x0130 + i); /* gradient */
6510#ifdef CONFIG_VIDEO_ADV_DEBUG 6510#ifdef GSPCA_DEBUG
6511 if (gspca_debug & D_CONF) 6511 if (gspca_debug & D_CONF)
6512 v[i] = g; 6512 v[i] = g;
6513#endif 6513#endif
diff --git a/drivers/media/video/mt9v022.c b/drivers/media/video/mt9v022.c
index b31ba4e09327..56808cd2f8a9 100644
--- a/drivers/media/video/mt9v022.c
+++ b/drivers/media/video/mt9v022.c
@@ -25,7 +25,7 @@
25 25
26static char *sensor_type; 26static char *sensor_type;
27module_param(sensor_type, charp, S_IRUGO); 27module_param(sensor_type, charp, S_IRUGO);
28MODULE_PARM_DESC(sensor_type, "Sensor type: \"colour\" or \"monochrome\"\n"); 28MODULE_PARM_DESC(sensor_type, "Sensor type: \"colour\" or \"monochrome\"");
29 29
30/* mt9v022 selected register addresses */ 30/* mt9v022 selected register addresses */
31#define MT9V022_CHIP_VERSION 0x00 31#define MT9V022_CHIP_VERSION 0x00
diff --git a/drivers/media/video/planb.c b/drivers/media/video/planb.c
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/drivers/media/video/planb.c
+++ /dev/null
diff --git a/drivers/media/video/planb.h b/drivers/media/video/planb.h
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/drivers/media/video/planb.h
+++ /dev/null
diff --git a/drivers/media/video/pxa_camera.c b/drivers/media/video/pxa_camera.c
index b15f82c49766..388cf94055d3 100644
--- a/drivers/media/video/pxa_camera.c
+++ b/drivers/media/video/pxa_camera.c
@@ -36,8 +36,8 @@
36#include <linux/videodev2.h> 36#include <linux/videodev2.h>
37 37
38#include <asm/dma.h> 38#include <asm/dma.h>
39#include <asm/arch/pxa-regs.h> 39#include <mach/pxa-regs.h>
40#include <asm/arch/camera.h> 40#include <mach/camera.h>
41 41
42#define PXA_CAM_VERSION_CODE KERNEL_VERSION(0, 0, 5) 42#define PXA_CAM_VERSION_CODE KERNEL_VERSION(0, 0, 5)
43#define PXA_CAM_DRV_NAME "pxa27x-camera" 43#define PXA_CAM_DRV_NAME "pxa27x-camera"
@@ -128,6 +128,8 @@ struct pxa_camera_dev {
128 128
129 struct pxa_buffer *active; 129 struct pxa_buffer *active;
130 struct pxa_dma_desc *sg_tail[3]; 130 struct pxa_dma_desc *sg_tail[3];
131
132 u32 save_cicr[5];
131}; 133};
132 134
133static const char *pxa_cam_driver_description = "PXA_Camera"; 135static const char *pxa_cam_driver_description = "PXA_Camera";
@@ -997,10 +999,64 @@ static int pxa_camera_querycap(struct soc_camera_host *ici,
997 return 0; 999 return 0;
998} 1000}
999 1001
1002static int pxa_camera_suspend(struct soc_camera_device *icd, pm_message_t state)
1003{
1004 struct soc_camera_host *ici =
1005 to_soc_camera_host(icd->dev.parent);
1006 struct pxa_camera_dev *pcdev = ici->priv;
1007 int i = 0, ret = 0;
1008
1009 pcdev->save_cicr[i++] = CICR0;
1010 pcdev->save_cicr[i++] = CICR1;
1011 pcdev->save_cicr[i++] = CICR2;
1012 pcdev->save_cicr[i++] = CICR3;
1013 pcdev->save_cicr[i++] = CICR4;
1014
1015 if ((pcdev->icd) && (pcdev->icd->ops->suspend))
1016 ret = pcdev->icd->ops->suspend(pcdev->icd, state);
1017
1018 return ret;
1019}
1020
1021static int pxa_camera_resume(struct soc_camera_device *icd)
1022{
1023 struct soc_camera_host *ici =
1024 to_soc_camera_host(icd->dev.parent);
1025 struct pxa_camera_dev *pcdev = ici->priv;
1026 int i = 0, ret = 0;
1027
1028 DRCMR68 = pcdev->dma_chans[0] | DRCMR_MAPVLD;
1029 DRCMR69 = pcdev->dma_chans[1] | DRCMR_MAPVLD;
1030 DRCMR70 = pcdev->dma_chans[2] | DRCMR_MAPVLD;
1031
1032 CICR0 = pcdev->save_cicr[i++] & ~CICR0_ENB;
1033 CICR1 = pcdev->save_cicr[i++];
1034 CICR2 = pcdev->save_cicr[i++];
1035 CICR3 = pcdev->save_cicr[i++];
1036 CICR4 = pcdev->save_cicr[i++];
1037
1038 if ((pcdev->icd) && (pcdev->icd->ops->resume))
1039 ret = pcdev->icd->ops->resume(pcdev->icd);
1040
1041 /* Restart frame capture if active buffer exists */
1042 if (!ret && pcdev->active) {
1043 /* Reset the FIFOs */
1044 CIFR |= CIFR_RESET_F;
1045 /* Enable End-Of-Frame Interrupt */
1046 CICR0 &= ~CICR0_EOFM;
1047 /* Restart the Capture Interface */
1048 CICR0 |= CICR0_ENB;
1049 }
1050
1051 return ret;
1052}
1053
1000static struct soc_camera_host_ops pxa_soc_camera_host_ops = { 1054static struct soc_camera_host_ops pxa_soc_camera_host_ops = {
1001 .owner = THIS_MODULE, 1055 .owner = THIS_MODULE,
1002 .add = pxa_camera_add_device, 1056 .add = pxa_camera_add_device,
1003 .remove = pxa_camera_remove_device, 1057 .remove = pxa_camera_remove_device,
1058 .suspend = pxa_camera_suspend,
1059 .resume = pxa_camera_resume,
1004 .set_fmt_cap = pxa_camera_set_fmt_cap, 1060 .set_fmt_cap = pxa_camera_set_fmt_cap,
1005 .try_fmt_cap = pxa_camera_try_fmt_cap, 1061 .try_fmt_cap = pxa_camera_try_fmt_cap,
1006 .init_videobuf = pxa_camera_init_videobuf, 1062 .init_videobuf = pxa_camera_init_videobuf,
@@ -1198,7 +1254,7 @@ static int __devinit pxa_camera_init(void)
1198 1254
1199static void __exit pxa_camera_exit(void) 1255static void __exit pxa_camera_exit(void)
1200{ 1256{
1201 return platform_driver_unregister(&pxa_camera_driver); 1257 platform_driver_unregister(&pxa_camera_driver);
1202} 1258}
1203 1259
1204module_init(pxa_camera_init); 1260module_init(pxa_camera_init);
diff --git a/drivers/media/video/saa7196.h b/drivers/media/video/saa7196.h
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/drivers/media/video/saa7196.h
+++ /dev/null
diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c
index f7ca3cb9340a..318754e73132 100644
--- a/drivers/media/video/sh_mobile_ceu_camera.c
+++ b/drivers/media/video/sh_mobile_ceu_camera.c
@@ -647,7 +647,7 @@ static int __init sh_mobile_ceu_init(void)
647 647
648static void __exit sh_mobile_ceu_exit(void) 648static void __exit sh_mobile_ceu_exit(void)
649{ 649{
650 return platform_driver_unregister(&sh_mobile_ceu_driver); 650 platform_driver_unregister(&sh_mobile_ceu_driver);
651} 651}
652 652
653module_init(sh_mobile_ceu_init); 653module_init(sh_mobile_ceu_init);
diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c
index b6be5ee678b6..66ebe5956a87 100644
--- a/drivers/media/video/soc_camera.c
+++ b/drivers/media/video/soc_camera.c
@@ -732,10 +732,36 @@ static int soc_camera_remove(struct device *dev)
732 return 0; 732 return 0;
733} 733}
734 734
735static int soc_camera_suspend(struct device *dev, pm_message_t state)
736{
737 struct soc_camera_device *icd = to_soc_camera_dev(dev);
738 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
739 int ret = 0;
740
741 if (ici->ops->suspend)
742 ret = ici->ops->suspend(icd, state);
743
744 return ret;
745}
746
747static int soc_camera_resume(struct device *dev)
748{
749 struct soc_camera_device *icd = to_soc_camera_dev(dev);
750 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
751 int ret = 0;
752
753 if (ici->ops->resume)
754 ret = ici->ops->resume(icd);
755
756 return ret;
757}
758
735static struct bus_type soc_camera_bus_type = { 759static struct bus_type soc_camera_bus_type = {
736 .name = "soc-camera", 760 .name = "soc-camera",
737 .probe = soc_camera_probe, 761 .probe = soc_camera_probe,
738 .remove = soc_camera_remove, 762 .remove = soc_camera_remove,
763 .suspend = soc_camera_suspend,
764 .resume = soc_camera_resume,
739}; 765};
740 766
741static struct device_driver ic_drv = { 767static struct device_driver ic_drv = {
diff --git a/drivers/media/video/soc_camera_platform.c b/drivers/media/video/soc_camera_platform.c
index eefb0327ebb6..1adc257ebdb9 100644
--- a/drivers/media/video/soc_camera_platform.c
+++ b/drivers/media/video/soc_camera_platform.c
@@ -187,7 +187,7 @@ static int __init soc_camera_platform_module_init(void)
187 187
188static void __exit soc_camera_platform_module_exit(void) 188static void __exit soc_camera_platform_module_exit(void)
189{ 189{
190 return platform_driver_unregister(&soc_camera_platform_driver); 190 platform_driver_unregister(&soc_camera_platform_driver);
191} 191}
192 192
193module_init(soc_camera_platform_module_init); 193module_init(soc_camera_platform_module_init);
diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c
index 626f4ad7e876..6ef3e5297de8 100644
--- a/drivers/media/video/uvc/uvc_ctrl.c
+++ b/drivers/media/video/uvc/uvc_ctrl.c
@@ -585,13 +585,17 @@ int uvc_query_v4l2_ctrl(struct uvc_video_device *video,
585 struct uvc_control_mapping *mapping; 585 struct uvc_control_mapping *mapping;
586 struct uvc_menu_info *menu; 586 struct uvc_menu_info *menu;
587 unsigned int i; 587 unsigned int i;
588 __u8 data[8]; 588 __u8 *data;
589 int ret; 589 int ret;
590 590
591 ctrl = uvc_find_control(video, v4l2_ctrl->id, &mapping); 591 ctrl = uvc_find_control(video, v4l2_ctrl->id, &mapping);
592 if (ctrl == NULL) 592 if (ctrl == NULL)
593 return -EINVAL; 593 return -EINVAL;
594 594
595 data = kmalloc(8, GFP_KERNEL);
596 if (data == NULL)
597 return -ENOMEM;
598
595 memset(v4l2_ctrl, 0, sizeof *v4l2_ctrl); 599 memset(v4l2_ctrl, 0, sizeof *v4l2_ctrl);
596 v4l2_ctrl->id = mapping->id; 600 v4l2_ctrl->id = mapping->id;
597 v4l2_ctrl->type = mapping->v4l2_type; 601 v4l2_ctrl->type = mapping->v4l2_type;
@@ -604,8 +608,8 @@ int uvc_query_v4l2_ctrl(struct uvc_video_device *video,
604 if (ctrl->info->flags & UVC_CONTROL_GET_DEF) { 608 if (ctrl->info->flags & UVC_CONTROL_GET_DEF) {
605 if ((ret = uvc_query_ctrl(video->dev, GET_DEF, ctrl->entity->id, 609 if ((ret = uvc_query_ctrl(video->dev, GET_DEF, ctrl->entity->id,
606 video->dev->intfnum, ctrl->info->selector, 610 video->dev->intfnum, ctrl->info->selector,
607 &data, ctrl->info->size)) < 0) 611 data, ctrl->info->size)) < 0)
608 return ret; 612 goto out;
609 v4l2_ctrl->default_value = uvc_get_le_value(data, mapping); 613 v4l2_ctrl->default_value = uvc_get_le_value(data, mapping);
610 } 614 }
611 615
@@ -623,13 +627,15 @@ int uvc_query_v4l2_ctrl(struct uvc_video_device *video,
623 } 627 }
624 } 628 }
625 629
626 return 0; 630 ret = 0;
631 goto out;
627 632
628 case V4L2_CTRL_TYPE_BOOLEAN: 633 case V4L2_CTRL_TYPE_BOOLEAN:
629 v4l2_ctrl->minimum = 0; 634 v4l2_ctrl->minimum = 0;
630 v4l2_ctrl->maximum = 1; 635 v4l2_ctrl->maximum = 1;
631 v4l2_ctrl->step = 1; 636 v4l2_ctrl->step = 1;
632 return 0; 637 ret = 0;
638 goto out;
633 639
634 default: 640 default:
635 break; 641 break;
@@ -638,26 +644,29 @@ int uvc_query_v4l2_ctrl(struct uvc_video_device *video,
638 if (ctrl->info->flags & UVC_CONTROL_GET_MIN) { 644 if (ctrl->info->flags & UVC_CONTROL_GET_MIN) {
639 if ((ret = uvc_query_ctrl(video->dev, GET_MIN, ctrl->entity->id, 645 if ((ret = uvc_query_ctrl(video->dev, GET_MIN, ctrl->entity->id,
640 video->dev->intfnum, ctrl->info->selector, 646 video->dev->intfnum, ctrl->info->selector,
641 &data, ctrl->info->size)) < 0) 647 data, ctrl->info->size)) < 0)
642 return ret; 648 goto out;
643 v4l2_ctrl->minimum = uvc_get_le_value(data, mapping); 649 v4l2_ctrl->minimum = uvc_get_le_value(data, mapping);
644 } 650 }
645 if (ctrl->info->flags & UVC_CONTROL_GET_MAX) { 651 if (ctrl->info->flags & UVC_CONTROL_GET_MAX) {
646 if ((ret = uvc_query_ctrl(video->dev, GET_MAX, ctrl->entity->id, 652 if ((ret = uvc_query_ctrl(video->dev, GET_MAX, ctrl->entity->id,
647 video->dev->intfnum, ctrl->info->selector, 653 video->dev->intfnum, ctrl->info->selector,
648 &data, ctrl->info->size)) < 0) 654 data, ctrl->info->size)) < 0)
649 return ret; 655 goto out;
650 v4l2_ctrl->maximum = uvc_get_le_value(data, mapping); 656 v4l2_ctrl->maximum = uvc_get_le_value(data, mapping);
651 } 657 }
652 if (ctrl->info->flags & UVC_CONTROL_GET_RES) { 658 if (ctrl->info->flags & UVC_CONTROL_GET_RES) {
653 if ((ret = uvc_query_ctrl(video->dev, GET_RES, ctrl->entity->id, 659 if ((ret = uvc_query_ctrl(video->dev, GET_RES, ctrl->entity->id,
654 video->dev->intfnum, ctrl->info->selector, 660 video->dev->intfnum, ctrl->info->selector,
655 &data, ctrl->info->size)) < 0) 661 data, ctrl->info->size)) < 0)
656 return ret; 662 goto out;
657 v4l2_ctrl->step = uvc_get_le_value(data, mapping); 663 v4l2_ctrl->step = uvc_get_le_value(data, mapping);
658 } 664 }
659 665
660 return 0; 666 ret = 0;
667out:
668 kfree(data);
669 return ret;
661} 670}
662 671
663 672
diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c
index b3c4d75e8490..7e102034d38d 100644
--- a/drivers/media/video/uvc/uvc_driver.c
+++ b/drivers/media/video/uvc/uvc_driver.c
@@ -1884,7 +1884,7 @@ static struct usb_device_id uvc_ids[] = {
1884 .bInterfaceSubClass = 1, 1884 .bInterfaceSubClass = 1,
1885 .bInterfaceProtocol = 0, 1885 .bInterfaceProtocol = 0,
1886 .driver_info = UVC_QUIRK_PROBE_MINMAX }, 1886 .driver_info = UVC_QUIRK_PROBE_MINMAX },
1887 /* Packard Bell OEM Webcam */ 1887 /* Packard Bell OEM Webcam - Bison Electronics */
1888 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE 1888 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
1889 | USB_DEVICE_ID_MATCH_INT_INFO, 1889 | USB_DEVICE_ID_MATCH_INT_INFO,
1890 .idVendor = 0x5986, 1890 .idVendor = 0x5986,
@@ -1893,7 +1893,7 @@ static struct usb_device_id uvc_ids[] = {
1893 .bInterfaceSubClass = 1, 1893 .bInterfaceSubClass = 1,
1894 .bInterfaceProtocol = 0, 1894 .bInterfaceProtocol = 0,
1895 .driver_info = UVC_QUIRK_PROBE_MINMAX }, 1895 .driver_info = UVC_QUIRK_PROBE_MINMAX },
1896 /* Acer Crystal Eye webcam */ 1896 /* Acer Crystal Eye webcam - Bison Electronics */
1897 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE 1897 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
1898 | USB_DEVICE_ID_MATCH_INT_INFO, 1898 | USB_DEVICE_ID_MATCH_INT_INFO,
1899 .idVendor = 0x5986, 1899 .idVendor = 0x5986,
@@ -1902,7 +1902,7 @@ static struct usb_device_id uvc_ids[] = {
1902 .bInterfaceSubClass = 1, 1902 .bInterfaceSubClass = 1,
1903 .bInterfaceProtocol = 0, 1903 .bInterfaceProtocol = 0,
1904 .driver_info = UVC_QUIRK_PROBE_MINMAX }, 1904 .driver_info = UVC_QUIRK_PROBE_MINMAX },
1905 /* Medion Akoya Mini E1210 */ 1905 /* Medion Akoya Mini E1210 - Bison Electronics */
1906 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE 1906 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
1907 | USB_DEVICE_ID_MATCH_INT_INFO, 1907 | USB_DEVICE_ID_MATCH_INT_INFO,
1908 .idVendor = 0x5986, 1908 .idVendor = 0x5986,
@@ -1911,7 +1911,7 @@ static struct usb_device_id uvc_ids[] = {
1911 .bInterfaceSubClass = 1, 1911 .bInterfaceSubClass = 1,
1912 .bInterfaceProtocol = 0, 1912 .bInterfaceProtocol = 0,
1913 .driver_info = UVC_QUIRK_PROBE_MINMAX }, 1913 .driver_info = UVC_QUIRK_PROBE_MINMAX },
1914 /* Acer OrbiCam - Unknown vendor */ 1914 /* Acer OrbiCam - Bison Electronics */
1915 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE 1915 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
1916 | USB_DEVICE_ID_MATCH_INT_INFO, 1916 | USB_DEVICE_ID_MATCH_INT_INFO,
1917 .idVendor = 0x5986, 1917 .idVendor = 0x5986,
@@ -1920,6 +1920,24 @@ static struct usb_device_id uvc_ids[] = {
1920 .bInterfaceSubClass = 1, 1920 .bInterfaceSubClass = 1,
1921 .bInterfaceProtocol = 0, 1921 .bInterfaceProtocol = 0,
1922 .driver_info = UVC_QUIRK_PROBE_MINMAX }, 1922 .driver_info = UVC_QUIRK_PROBE_MINMAX },
1923 /* Bison Electronics */
1924 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
1925 | USB_DEVICE_ID_MATCH_INT_INFO,
1926 .idVendor = 0x5986,
1927 .idProduct = 0x0300,
1928 .bInterfaceClass = USB_CLASS_VIDEO,
1929 .bInterfaceSubClass = 1,
1930 .bInterfaceProtocol = 0,
1931 .driver_info = UVC_QUIRK_PROBE_MINMAX },
1932 /* Clevo M570TU - Bison Electronics */
1933 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
1934 | USB_DEVICE_ID_MATCH_INT_INFO,
1935 .idVendor = 0x5986,
1936 .idProduct = 0x0303,
1937 .bInterfaceClass = USB_CLASS_VIDEO,
1938 .bInterfaceSubClass = 1,
1939 .bInterfaceProtocol = 0,
1940 .driver_info = UVC_QUIRK_PROBE_MINMAX },
1923 /* Generic USB Video Class */ 1941 /* Generic USB Video Class */
1924 { USB_INTERFACE_INFO(USB_CLASS_VIDEO, 1, 0) }, 1942 { USB_INTERFACE_INFO(USB_CLASS_VIDEO, 1, 0) },
1925 {} 1943 {}
diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c
index ad63794fda77..6854ac78a161 100644
--- a/drivers/media/video/uvc/uvc_video.c
+++ b/drivers/media/video/uvc/uvc_video.c
@@ -90,17 +90,20 @@ static void uvc_fixup_buffer_size(struct uvc_video_device *video,
90static int uvc_get_video_ctrl(struct uvc_video_device *video, 90static int uvc_get_video_ctrl(struct uvc_video_device *video,
91 struct uvc_streaming_control *ctrl, int probe, __u8 query) 91 struct uvc_streaming_control *ctrl, int probe, __u8 query)
92{ 92{
93 __u8 data[34]; 93 __u8 *data;
94 __u8 size; 94 __u16 size;
95 int ret; 95 int ret;
96 96
97 size = video->dev->uvc_version >= 0x0110 ? 34 : 26; 97 size = video->dev->uvc_version >= 0x0110 ? 34 : 26;
98 data = kmalloc(size, GFP_KERNEL);
99 if (data == NULL)
100 return -ENOMEM;
101
98 ret = __uvc_query_ctrl(video->dev, query, 0, video->streaming->intfnum, 102 ret = __uvc_query_ctrl(video->dev, query, 0, video->streaming->intfnum,
99 probe ? VS_PROBE_CONTROL : VS_COMMIT_CONTROL, &data, size, 103 probe ? VS_PROBE_CONTROL : VS_COMMIT_CONTROL, data, size,
100 UVC_CTRL_STREAMING_TIMEOUT); 104 UVC_CTRL_STREAMING_TIMEOUT);
101
102 if (ret < 0) 105 if (ret < 0)
103 return ret; 106 goto out;
104 107
105 ctrl->bmHint = le16_to_cpup((__le16 *)&data[0]); 108 ctrl->bmHint = le16_to_cpup((__le16 *)&data[0]);
106 ctrl->bFormatIndex = data[2]; 109 ctrl->bFormatIndex = data[2];
@@ -136,17 +139,22 @@ static int uvc_get_video_ctrl(struct uvc_video_device *video,
136 */ 139 */
137 uvc_fixup_buffer_size(video, ctrl); 140 uvc_fixup_buffer_size(video, ctrl);
138 141
139 return 0; 142out:
143 kfree(data);
144 return ret;
140} 145}
141 146
142int uvc_set_video_ctrl(struct uvc_video_device *video, 147int uvc_set_video_ctrl(struct uvc_video_device *video,
143 struct uvc_streaming_control *ctrl, int probe) 148 struct uvc_streaming_control *ctrl, int probe)
144{ 149{
145 __u8 data[34]; 150 __u8 *data;
146 __u8 size; 151 __u16 size;
152 int ret;
147 153
148 size = video->dev->uvc_version >= 0x0110 ? 34 : 26; 154 size = video->dev->uvc_version >= 0x0110 ? 34 : 26;
149 memset(data, 0, sizeof data); 155 data = kzalloc(size, GFP_KERNEL);
156 if (data == NULL)
157 return -ENOMEM;
150 158
151 *(__le16 *)&data[0] = cpu_to_le16(ctrl->bmHint); 159 *(__le16 *)&data[0] = cpu_to_le16(ctrl->bmHint);
152 data[2] = ctrl->bFormatIndex; 160 data[2] = ctrl->bFormatIndex;
@@ -174,10 +182,13 @@ int uvc_set_video_ctrl(struct uvc_video_device *video,
174 data[33] = ctrl->bMaxVersion; 182 data[33] = ctrl->bMaxVersion;
175 } 183 }
176 184
177 return __uvc_query_ctrl(video->dev, SET_CUR, 0, 185 ret = __uvc_query_ctrl(video->dev, SET_CUR, 0,
178 video->streaming->intfnum, 186 video->streaming->intfnum,
179 probe ? VS_PROBE_CONTROL : VS_COMMIT_CONTROL, &data, size, 187 probe ? VS_PROBE_CONTROL : VS_COMMIT_CONTROL, data, size,
180 UVC_CTRL_STREAMING_TIMEOUT); 188 UVC_CTRL_STREAMING_TIMEOUT);
189
190 kfree(data);
191 return ret;
181} 192}
182 193
183int uvc_probe_video(struct uvc_video_device *video, 194int uvc_probe_video(struct uvc_video_device *video,
diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c
index 556615fe93de..6f36006aecda 100644
--- a/drivers/media/video/v4l2-dev.c
+++ b/drivers/media/video/v4l2-dev.c
@@ -222,11 +222,13 @@ int video_register_device(struct video_device *vfd, int type, int nr)
222EXPORT_SYMBOL(video_register_device); 222EXPORT_SYMBOL(video_register_device);
223 223
224/** 224/**
225 * video_register_device - register video4linux devices 225 * video_register_device_index - register video4linux devices
226 * @vfd: video device structure we want to register 226 * @vfd: video device structure we want to register
227 * @type: type of device to register 227 * @type: type of device to register
228 * @nr: which device number (0 == /dev/video0, 1 == /dev/video1, ... 228 * @nr: which device number (0 == /dev/video0, 1 == /dev/video1, ...
229 * -1 == first free) 229 * -1 == first free)
230 * @index: stream number based on parent device;
231 * -1 if auto assign, requested number otherwise
230 * 232 *
231 * The registration code assigns minor numbers based on the type 233 * The registration code assigns minor numbers based on the type
232 * requested. -ENFILE is returned in all the device slots for this 234 * requested. -ENFILE is returned in all the device slots for this
diff --git a/drivers/media/video/videodev.c b/drivers/media/video/videodev.c
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/drivers/media/video/videodev.c
+++ /dev/null
diff --git a/drivers/media/video/vino.c b/drivers/media/video/vino.c
index 3989b0eded28..1edda456fc64 100644
--- a/drivers/media/video/vino.c
+++ b/drivers/media/video/vino.c
@@ -39,7 +39,9 @@
39#include <linux/i2c-algo-sgi.h> 39#include <linux/i2c-algo-sgi.h>
40 40
41#include <linux/videodev2.h> 41#include <linux/videodev2.h>
42#include <media/v4l2-ioctl.h>
42#include <media/v4l2-common.h> 43#include <media/v4l2-common.h>
44#include <media/v4l2-ioctl.h>
43#include <linux/video_decoder.h> 45#include <linux/video_decoder.h>
44#include <linux/mutex.h> 46#include <linux/mutex.h>
45 47
diff --git a/drivers/mfd/mcp-sa11x0.c b/drivers/mfd/mcp-sa11x0.c
index b5272b5ce3fa..28380b20bc70 100644
--- a/drivers/mfd/mcp-sa11x0.c
+++ b/drivers/mfd/mcp-sa11x0.c
@@ -21,12 +21,12 @@
21#include <linux/platform_device.h> 21#include <linux/platform_device.h>
22 22
23#include <asm/dma.h> 23#include <asm/dma.h>
24#include <asm/hardware.h> 24#include <mach/hardware.h>
25#include <asm/mach-types.h> 25#include <asm/mach-types.h>
26#include <asm/system.h> 26#include <asm/system.h>
27#include <asm/arch/mcp.h> 27#include <mach/mcp.h>
28 28
29#include <asm/arch/assabet.h> 29#include <mach/assabet.h>
30 30
31#include "mcp.h" 31#include "mcp.h"
32 32
diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
index f6b10dda31fd..a316f1b75933 100644
--- a/drivers/mfd/ucb1x00-core.c
+++ b/drivers/mfd/ucb1x00-core.c
@@ -26,7 +26,7 @@
26#include <linux/mutex.h> 26#include <linux/mutex.h>
27 27
28#include <asm/dma.h> 28#include <asm/dma.h>
29#include <asm/hardware.h> 29#include <mach/hardware.h>
30 30
31#include "ucb1x00.h" 31#include "ucb1x00.h"
32 32
diff --git a/drivers/mfd/ucb1x00-ts.c b/drivers/mfd/ucb1x00-ts.c
index ad34e2d22524..44762ca86a8d 100644
--- a/drivers/mfd/ucb1x00-ts.c
+++ b/drivers/mfd/ucb1x00-ts.c
@@ -32,7 +32,7 @@
32#include <linux/kthread.h> 32#include <linux/kthread.h>
33 33
34#include <asm/dma.h> 34#include <asm/dma.h>
35#include <asm/arch/collie.h> 35#include <mach/collie.h>
36#include <asm/mach-types.h> 36#include <asm/mach-types.h>
37 37
38#include "ucb1x00.h" 38#include "ucb1x00.h"
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index f5ade1904aad..a726f3b01a6b 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -426,9 +426,11 @@ config ENCLOSURE_SERVICES
426 426
427config SGI_XP 427config SGI_XP
428 tristate "Support communication between SGI SSIs" 428 tristate "Support communication between SGI SSIs"
429 depends on IA64_GENERIC || IA64_SGI_SN2 429 depends on NET
430 depends on (IA64_GENERIC || IA64_SGI_SN2 || IA64_SGI_UV || X86_64) && SMP
430 select IA64_UNCACHED_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2 431 select IA64_UNCACHED_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2
431 select GENERIC_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2 432 select GENERIC_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2
433 select SGI_GRU if (IA64_GENERIC || IA64_SGI_UV || X86_64) && SMP
432 ---help--- 434 ---help---
433 An SGI machine can be divided into multiple Single System 435 An SGI machine can be divided into multiple Single System
434 Images which act independently of each other and have 436 Images which act independently of each other and have
@@ -450,4 +452,27 @@ config HP_ILO
450 To compile this driver as a module, choose M here: the 452 To compile this driver as a module, choose M here: the
451 module will be called hpilo. 453 module will be called hpilo.
452 454
455config SGI_GRU
456 tristate "SGI GRU driver"
457 depends on (X86_64 || IA64_SGI_UV || IA64_GENERIC) && SMP
458 default n
459 select MMU_NOTIFIER
460 ---help---
461 The GRU is a hardware resource located in the system chipset. The GRU
462 contains memory that can be mmapped into the user address space. This memory is
463 used to communicate with the GRU to perform functions such as load/store,
464 scatter/gather, bcopy, AMOs, etc. The GRU is directly accessed by user
465 instructions using user virtual addresses. GRU instructions (ex., bcopy) use
466 user virtual addresses for operands.
467
468 If you are not running on a SGI UV system, say N.
469
470config SGI_GRU_DEBUG
471 bool "SGI GRU driver debug"
472 depends on SGI_GRU
473 default n
474 ---help---
475 This option enables addition debugging code for the SGI GRU driver. If
476 you are unsure, say N.
477
453endif # MISC_DEVICES 478endif # MISC_DEVICES
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index f5e273420c09..c6c13f60b452 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -28,4 +28,5 @@ obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o
28obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o 28obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
29obj-$(CONFIG_KGDB_TESTS) += kgdbts.o 29obj-$(CONFIG_KGDB_TESTS) += kgdbts.o
30obj-$(CONFIG_SGI_XP) += sgi-xp/ 30obj-$(CONFIG_SGI_XP) += sgi-xp/
31obj-$(CONFIG_SGI_GRU) += sgi-gru/
31obj-$(CONFIG_HP_ILO) += hpilo.o 32obj-$(CONFIG_HP_ILO) += hpilo.o
diff --git a/drivers/misc/sgi-gru/Makefile b/drivers/misc/sgi-gru/Makefile
new file mode 100644
index 000000000000..d03597a521b0
--- /dev/null
+++ b/drivers/misc/sgi-gru/Makefile
@@ -0,0 +1,3 @@
1obj-$(CONFIG_SGI_GRU) := gru.o
2gru-y := grufile.o grumain.o grufault.o grutlbpurge.o gruprocfs.o grukservices.o
3
diff --git a/drivers/misc/sgi-gru/gru.h b/drivers/misc/sgi-gru/gru.h
new file mode 100644
index 000000000000..40df7cb3f0a5
--- /dev/null
+++ b/drivers/misc/sgi-gru/gru.h
@@ -0,0 +1,67 @@
1/*
2 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU Lesser General Public License as published by
6 * the Free Software Foundation; either version 2.1 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU Lesser General Public License for more details.
13 *
14 * You should have received a copy of the GNU Lesser General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#ifndef __GRU_H__
20#define __GRU_H__
21
22/*
23 * GRU architectural definitions
24 */
25#define GRU_CACHE_LINE_BYTES 64
26#define GRU_HANDLE_STRIDE 256
27#define GRU_CB_BASE 0
28#define GRU_DS_BASE 0x20000
29
30/*
31 * Size used to map GRU GSeg
32 */
33#if defined CONFIG_IA64
34#define GRU_GSEG_PAGESIZE (256 * 1024UL)
35#elif defined CONFIG_X86_64
36#define GRU_GSEG_PAGESIZE (256 * 1024UL) /* ZZZ 2MB ??? */
37#else
38#error "Unsupported architecture"
39#endif
40
41/*
42 * Structure for obtaining GRU resource information
43 */
44struct gru_chiplet_info {
45 int node;
46 int chiplet;
47 int blade;
48 int total_dsr_bytes;
49 int total_cbr;
50 int total_user_dsr_bytes;
51 int total_user_cbr;
52 int free_user_dsr_bytes;
53 int free_user_cbr;
54};
55
56/* Flags for GRU options on the gru_create_context() call */
57/* Select one of the follow 4 options to specify how TLB misses are handled */
58#define GRU_OPT_MISS_DEFAULT 0x0000 /* Use default mode */
59#define GRU_OPT_MISS_USER_POLL 0x0001 /* User will poll CB for faults */
60#define GRU_OPT_MISS_FMM_INTR 0x0002 /* Send interrupt to cpu to
61 handle fault */
62#define GRU_OPT_MISS_FMM_POLL 0x0003 /* Use system polling thread */
63#define GRU_OPT_MISS_MASK 0x0003 /* Mask for TLB MISS option */
64
65
66
67#endif /* __GRU_H__ */
diff --git a/drivers/misc/sgi-gru/gru_instructions.h b/drivers/misc/sgi-gru/gru_instructions.h
new file mode 100644
index 000000000000..0dc36225c7c6
--- /dev/null
+++ b/drivers/misc/sgi-gru/gru_instructions.h
@@ -0,0 +1,669 @@
1/*
2 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU Lesser General Public License as published by
6 * the Free Software Foundation; either version 2.1 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU Lesser General Public License for more details.
13 *
14 * You should have received a copy of the GNU Lesser General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#ifndef __GRU_INSTRUCTIONS_H__
20#define __GRU_INSTRUCTIONS_H__
21
22#define gru_flush_cache_hook(p)
23#define gru_emulator_wait_hook(p, w)
24
25/*
26 * Architecture dependent functions
27 */
28
29#if defined CONFIG_IA64
30#include <linux/compiler.h>
31#include <asm/intrinsics.h>
32#define __flush_cache(p) ia64_fc(p)
33/* Use volatile on IA64 to ensure ordering via st4.rel */
34#define gru_ordered_store_int(p,v) \
35 do { \
36 barrier(); \
37 *((volatile int *)(p)) = v; /* force st.rel */ \
38 } while (0)
39#elif defined CONFIG_X86_64
40#define __flush_cache(p) clflush(p)
41#define gru_ordered_store_int(p,v) \
42 do { \
43 barrier(); \
44 *(int *)p = v; \
45 } while (0)
46#else
47#error "Unsupported architecture"
48#endif
49
50/*
51 * Control block status and exception codes
52 */
53#define CBS_IDLE 0
54#define CBS_EXCEPTION 1
55#define CBS_ACTIVE 2
56#define CBS_CALL_OS 3
57
58/* CB substatus bitmasks */
59#define CBSS_MSG_QUEUE_MASK 7
60#define CBSS_IMPLICIT_ABORT_ACTIVE_MASK 8
61
62/* CB substatus message queue values (low 3 bits of substatus) */
63#define CBSS_NO_ERROR 0
64#define CBSS_LB_OVERFLOWED 1
65#define CBSS_QLIMIT_REACHED 2
66#define CBSS_PAGE_OVERFLOW 3
67#define CBSS_AMO_NACKED 4
68#define CBSS_PUT_NACKED 5
69
70/*
71 * Structure used to fetch exception detail for CBs that terminate with
72 * CBS_EXCEPTION
73 */
74struct control_block_extended_exc_detail {
75 unsigned long cb;
76 int opc;
77 int ecause;
78 int exopc;
79 long exceptdet0;
80 int exceptdet1;
81};
82
83/*
84 * Instruction formats
85 */
86
87/*
88 * Generic instruction format.
89 * This definition has precise bit field definitions.
90 */
91struct gru_instruction_bits {
92 /* DW 0 - low */
93 unsigned int icmd: 1;
94 unsigned char ima: 3; /* CB_DelRep, unmapped mode */
95 unsigned char reserved0: 4;
96 unsigned int xtype: 3;
97 unsigned int iaa0: 2;
98 unsigned int iaa1: 2;
99 unsigned char reserved1: 1;
100 unsigned char opc: 8; /* opcode */
101 unsigned char exopc: 8; /* extended opcode */
102 /* DW 0 - high */
103 unsigned int idef2: 22; /* TRi0 */
104 unsigned char reserved2: 2;
105 unsigned char istatus: 2;
106 unsigned char isubstatus:4;
107 unsigned char reserved3: 2;
108 /* DW 1 */
109 unsigned long idef4; /* 42 bits: TRi1, BufSize */
110 /* DW 2-6 */
111 unsigned long idef1; /* BAddr0 */
112 unsigned long idef5; /* Nelem */
113 unsigned long idef6; /* Stride, Operand1 */
114 unsigned long idef3; /* BAddr1, Value, Operand2 */
115 unsigned long reserved4;
116 /* DW 7 */
117 unsigned long avalue; /* AValue */
118};
119
120/*
121 * Generic instruction with friendlier names. This format is used
122 * for inline instructions.
123 */
124struct gru_instruction {
125 /* DW 0 */
126 unsigned int op32; /* icmd,xtype,iaa0,ima,opc */
127 unsigned int tri0;
128 unsigned long tri1_bufsize; /* DW 1 */
129 unsigned long baddr0; /* DW 2 */
130 unsigned long nelem; /* DW 3 */
131 unsigned long op1_stride; /* DW 4 */
132 unsigned long op2_value_baddr1; /* DW 5 */
133 unsigned long reserved0; /* DW 6 */
134 unsigned long avalue; /* DW 7 */
135};
136
137/* Some shifts and masks for the low 32 bits of a GRU command */
138#define GRU_CB_ICMD_SHFT 0
139#define GRU_CB_ICMD_MASK 0x1
140#define GRU_CB_XTYPE_SHFT 8
141#define GRU_CB_XTYPE_MASK 0x7
142#define GRU_CB_IAA0_SHFT 11
143#define GRU_CB_IAA0_MASK 0x3
144#define GRU_CB_IAA1_SHFT 13
145#define GRU_CB_IAA1_MASK 0x3
146#define GRU_CB_IMA_SHFT 1
147#define GRU_CB_IMA_MASK 0x3
148#define GRU_CB_OPC_SHFT 16
149#define GRU_CB_OPC_MASK 0xff
150#define GRU_CB_EXOPC_SHFT 24
151#define GRU_CB_EXOPC_MASK 0xff
152
153/* GRU instruction opcodes (opc field) */
154#define OP_NOP 0x00
155#define OP_BCOPY 0x01
156#define OP_VLOAD 0x02
157#define OP_IVLOAD 0x03
158#define OP_VSTORE 0x04
159#define OP_IVSTORE 0x05
160#define OP_VSET 0x06
161#define OP_IVSET 0x07
162#define OP_MESQ 0x08
163#define OP_GAMXR 0x09
164#define OP_GAMIR 0x0a
165#define OP_GAMIRR 0x0b
166#define OP_GAMER 0x0c
167#define OP_GAMERR 0x0d
168#define OP_BSTORE 0x0e
169#define OP_VFLUSH 0x0f
170
171
172/* Extended opcodes values (exopc field) */
173
174/* GAMIR - AMOs with implicit operands */
175#define EOP_IR_FETCH 0x01 /* Plain fetch of memory */
176#define EOP_IR_CLR 0x02 /* Fetch and clear */
177#define EOP_IR_INC 0x05 /* Fetch and increment */
178#define EOP_IR_DEC 0x07 /* Fetch and decrement */
179#define EOP_IR_QCHK1 0x0d /* Queue check, 64 byte msg */
180#define EOP_IR_QCHK2 0x0e /* Queue check, 128 byte msg */
181
182/* GAMIRR - Registered AMOs with implicit operands */
183#define EOP_IRR_FETCH 0x01 /* Registered fetch of memory */
184#define EOP_IRR_CLR 0x02 /* Registered fetch and clear */
185#define EOP_IRR_INC 0x05 /* Registered fetch and increment */
186#define EOP_IRR_DEC 0x07 /* Registered fetch and decrement */
187#define EOP_IRR_DECZ 0x0f /* Registered fetch and decrement, update on zero*/
188
189/* GAMER - AMOs with explicit operands */
190#define EOP_ER_SWAP 0x00 /* Exchange argument and memory */
191#define EOP_ER_OR 0x01 /* Logical OR with memory */
192#define EOP_ER_AND 0x02 /* Logical AND with memory */
193#define EOP_ER_XOR 0x03 /* Logical XOR with memory */
194#define EOP_ER_ADD 0x04 /* Add value to memory */
195#define EOP_ER_CSWAP 0x08 /* Compare with operand2, write operand1 if match*/
196#define EOP_ER_CADD 0x0c /* Queue check, operand1*64 byte msg */
197
198/* GAMERR - Registered AMOs with explicit operands */
199#define EOP_ERR_SWAP 0x00 /* Exchange argument and memory */
200#define EOP_ERR_OR 0x01 /* Logical OR with memory */
201#define EOP_ERR_AND 0x02 /* Logical AND with memory */
202#define EOP_ERR_XOR 0x03 /* Logical XOR with memory */
203#define EOP_ERR_ADD 0x04 /* Add value to memory */
204#define EOP_ERR_CSWAP 0x08 /* Compare with operand2, write operand1 if match*/
205#define EOP_ERR_EPOLL 0x09 /* Poll for equality */
206#define EOP_ERR_NPOLL 0x0a /* Poll for inequality */
207
208/* GAMXR - SGI Arithmetic unit */
209#define EOP_XR_CSWAP 0x0b /* Masked compare exchange */
210
211
212/* Transfer types (xtype field) */
213#define XTYPE_B 0x0 /* byte */
214#define XTYPE_S 0x1 /* short (2-byte) */
215#define XTYPE_W 0x2 /* word (4-byte) */
216#define XTYPE_DW 0x3 /* doubleword (8-byte) */
217#define XTYPE_CL 0x6 /* cacheline (64-byte) */
218
219
220/* Instruction access attributes (iaa0, iaa1 fields) */
221#define IAA_RAM 0x0 /* normal cached RAM access */
222#define IAA_NCRAM 0x2 /* noncoherent RAM access */
223#define IAA_MMIO 0x1 /* noncoherent memory-mapped I/O space */
224#define IAA_REGISTER 0x3 /* memory-mapped registers, etc. */
225
226
227/* Instruction mode attributes (ima field) */
228#define IMA_MAPPED 0x0 /* Virtual mode */
229#define IMA_CB_DELAY 0x1 /* hold read responses until status changes */
230#define IMA_UNMAPPED 0x2 /* bypass the TLBs (OS only) */
231#define IMA_INTERRUPT 0x4 /* Interrupt when instruction completes */
232
233/* CBE ecause bits */
234#define CBE_CAUSE_RI (1 << 0)
235#define CBE_CAUSE_INVALID_INSTRUCTION (1 << 1)
236#define CBE_CAUSE_UNMAPPED_MODE_FORBIDDEN (1 << 2)
237#define CBE_CAUSE_PE_CHECK_DATA_ERROR (1 << 3)
238#define CBE_CAUSE_IAA_GAA_MISMATCH (1 << 4)
239#define CBE_CAUSE_DATA_SEGMENT_LIMIT_EXCEPTION (1 << 5)
240#define CBE_CAUSE_OS_FATAL_TLB_FAULT (1 << 6)
241#define CBE_CAUSE_EXECUTION_HW_ERROR (1 << 7)
242#define CBE_CAUSE_TLBHW_ERROR (1 << 8)
243#define CBE_CAUSE_RA_REQUEST_TIMEOUT (1 << 9)
244#define CBE_CAUSE_HA_REQUEST_TIMEOUT (1 << 10)
245#define CBE_CAUSE_RA_RESPONSE_FATAL (1 << 11)
246#define CBE_CAUSE_RA_RESPONSE_NON_FATAL (1 << 12)
247#define CBE_CAUSE_HA_RESPONSE_FATAL (1 << 13)
248#define CBE_CAUSE_HA_RESPONSE_NON_FATAL (1 << 14)
249#define CBE_CAUSE_ADDRESS_SPACE_DECODE_ERROR (1 << 15)
250#define CBE_CAUSE_RESPONSE_DATA_ERROR (1 << 16)
251#define CBE_CAUSE_PROTOCOL_STATE_DATA_ERROR (1 << 17)
252
253/*
254 * Exceptions are retried for the following cases. If any OTHER bits are set
255 * in ecause, the exception is not retryable.
256 */
257#define EXCEPTION_RETRY_BITS (CBE_CAUSE_RESPONSE_DATA_ERROR | \
258 CBE_CAUSE_RA_REQUEST_TIMEOUT | \
259 CBE_CAUSE_TLBHW_ERROR | \
260 CBE_CAUSE_HA_REQUEST_TIMEOUT)
261
262/* Message queue head structure */
263union gru_mesqhead {
264 unsigned long val;
265 struct {
266 unsigned int head;
267 unsigned int limit;
268 };
269};
270
271
272/* Generate the low word of a GRU instruction */
273static inline unsigned int
274__opword(unsigned char opcode, unsigned char exopc, unsigned char xtype,
275 unsigned char iaa0, unsigned char iaa1,
276 unsigned char ima)
277{
278 return (1 << GRU_CB_ICMD_SHFT) |
279 (iaa0 << GRU_CB_IAA0_SHFT) |
280 (iaa1 << GRU_CB_IAA1_SHFT) |
281 (ima << GRU_CB_IMA_SHFT) |
282 (xtype << GRU_CB_XTYPE_SHFT) |
283 (opcode << GRU_CB_OPC_SHFT) |
284 (exopc << GRU_CB_EXOPC_SHFT);
285}
286
287/*
288 * Architecture specific intrinsics
289 */
290static inline void gru_flush_cache(void *p)
291{
292 __flush_cache(p);
293}
294
295/*
296 * Store the lower 32 bits of the command including the "start" bit. Then
297 * start the instruction executing.
298 */
299static inline void gru_start_instruction(struct gru_instruction *ins, int op32)
300{
301 gru_ordered_store_int(ins, op32);
302}
303
304
305/* Convert "hints" to IMA */
306#define CB_IMA(h) ((h) | IMA_UNMAPPED)
307
308/* Convert data segment cache line index into TRI0 / TRI1 value */
309#define GRU_DINDEX(i) ((i) * GRU_CACHE_LINE_BYTES)
310
311/* Inline functions for GRU instructions.
312 * Note:
313 * - nelem and stride are in elements
314 * - tri0/tri1 is in bytes for the beginning of the data segment.
315 */
316static inline void gru_vload(void *cb, unsigned long mem_addr,
317 unsigned int tri0, unsigned char xtype, unsigned long nelem,
318 unsigned long stride, unsigned long hints)
319{
320 struct gru_instruction *ins = (struct gru_instruction *)cb;
321
322 ins->baddr0 = (long)mem_addr;
323 ins->nelem = nelem;
324 ins->tri0 = tri0;
325 ins->op1_stride = stride;
326 gru_start_instruction(ins, __opword(OP_VLOAD, 0, xtype, IAA_RAM, 0,
327 CB_IMA(hints)));
328}
329
330static inline void gru_vstore(void *cb, unsigned long mem_addr,
331 unsigned int tri0, unsigned char xtype, unsigned long nelem,
332 unsigned long stride, unsigned long hints)
333{
334 struct gru_instruction *ins = (void *)cb;
335
336 ins->baddr0 = (long)mem_addr;
337 ins->nelem = nelem;
338 ins->tri0 = tri0;
339 ins->op1_stride = stride;
340 gru_start_instruction(ins, __opword(OP_VSTORE, 0, xtype, IAA_RAM, 0,
341 CB_IMA(hints)));
342}
343
344static inline void gru_ivload(void *cb, unsigned long mem_addr,
345 unsigned int tri0, unsigned int tri1, unsigned char xtype,
346 unsigned long nelem, unsigned long hints)
347{
348 struct gru_instruction *ins = (void *)cb;
349
350 ins->baddr0 = (long)mem_addr;
351 ins->nelem = nelem;
352 ins->tri0 = tri0;
353 ins->tri1_bufsize = tri1;
354 gru_start_instruction(ins, __opword(OP_IVLOAD, 0, xtype, IAA_RAM, 0,
355 CB_IMA(hints)));
356}
357
358static inline void gru_ivstore(void *cb, unsigned long mem_addr,
359 unsigned int tri0, unsigned int tri1,
360 unsigned char xtype, unsigned long nelem, unsigned long hints)
361{
362 struct gru_instruction *ins = (void *)cb;
363
364 ins->baddr0 = (long)mem_addr;
365 ins->nelem = nelem;
366 ins->tri0 = tri0;
367 ins->tri1_bufsize = tri1;
368 gru_start_instruction(ins, __opword(OP_IVSTORE, 0, xtype, IAA_RAM, 0,
369 CB_IMA(hints)));
370}
371
372static inline void gru_vset(void *cb, unsigned long mem_addr,
373 unsigned long value, unsigned char xtype, unsigned long nelem,
374 unsigned long stride, unsigned long hints)
375{
376 struct gru_instruction *ins = (void *)cb;
377
378 ins->baddr0 = (long)mem_addr;
379 ins->op2_value_baddr1 = value;
380 ins->nelem = nelem;
381 ins->op1_stride = stride;
382 gru_start_instruction(ins, __opword(OP_VSET, 0, xtype, IAA_RAM, 0,
383 CB_IMA(hints)));
384}
385
386static inline void gru_ivset(void *cb, unsigned long mem_addr,
387 unsigned int tri1, unsigned long value, unsigned char xtype,
388 unsigned long nelem, unsigned long hints)
389{
390 struct gru_instruction *ins = (void *)cb;
391
392 ins->baddr0 = (long)mem_addr;
393 ins->op2_value_baddr1 = value;
394 ins->nelem = nelem;
395 ins->tri1_bufsize = tri1;
396 gru_start_instruction(ins, __opword(OP_IVSET, 0, xtype, IAA_RAM, 0,
397 CB_IMA(hints)));
398}
399
400static inline void gru_vflush(void *cb, unsigned long mem_addr,
401 unsigned long nelem, unsigned char xtype, unsigned long stride,
402 unsigned long hints)
403{
404 struct gru_instruction *ins = (void *)cb;
405
406 ins->baddr0 = (long)mem_addr;
407 ins->op1_stride = stride;
408 ins->nelem = nelem;
409 gru_start_instruction(ins, __opword(OP_VFLUSH, 0, xtype, IAA_RAM, 0,
410 CB_IMA(hints)));
411}
412
413static inline void gru_nop(void *cb, int hints)
414{
415 struct gru_instruction *ins = (void *)cb;
416
417 gru_start_instruction(ins, __opword(OP_NOP, 0, 0, 0, 0, CB_IMA(hints)));
418}
419
420
421static inline void gru_bcopy(void *cb, const unsigned long src,
422 unsigned long dest,
423 unsigned int tri0, unsigned int xtype, unsigned long nelem,
424 unsigned int bufsize, unsigned long hints)
425{
426 struct gru_instruction *ins = (void *)cb;
427
428 ins->baddr0 = (long)src;
429 ins->op2_value_baddr1 = (long)dest;
430 ins->nelem = nelem;
431 ins->tri0 = tri0;
432 ins->tri1_bufsize = bufsize;
433 gru_start_instruction(ins, __opword(OP_BCOPY, 0, xtype, IAA_RAM,
434 IAA_RAM, CB_IMA(hints)));
435}
436
437static inline void gru_bstore(void *cb, const unsigned long src,
438 unsigned long dest, unsigned int tri0, unsigned int xtype,
439 unsigned long nelem, unsigned long hints)
440{
441 struct gru_instruction *ins = (void *)cb;
442
443 ins->baddr0 = (long)src;
444 ins->op2_value_baddr1 = (long)dest;
445 ins->nelem = nelem;
446 ins->tri0 = tri0;
447 gru_start_instruction(ins, __opword(OP_BSTORE, 0, xtype, 0, IAA_RAM,
448 CB_IMA(hints)));
449}
450
451static inline void gru_gamir(void *cb, int exopc, unsigned long src,
452 unsigned int xtype, unsigned long hints)
453{
454 struct gru_instruction *ins = (void *)cb;
455
456 ins->baddr0 = (long)src;
457 gru_start_instruction(ins, __opword(OP_GAMIR, exopc, xtype, IAA_RAM, 0,
458 CB_IMA(hints)));
459}
460
461static inline void gru_gamirr(void *cb, int exopc, unsigned long src,
462 unsigned int xtype, unsigned long hints)
463{
464 struct gru_instruction *ins = (void *)cb;
465
466 ins->baddr0 = (long)src;
467 gru_start_instruction(ins, __opword(OP_GAMIRR, exopc, xtype, IAA_RAM, 0,
468 CB_IMA(hints)));
469}
470
471static inline void gru_gamer(void *cb, int exopc, unsigned long src,
472 unsigned int xtype,
473 unsigned long operand1, unsigned long operand2,
474 unsigned long hints)
475{
476 struct gru_instruction *ins = (void *)cb;
477
478 ins->baddr0 = (long)src;
479 ins->op1_stride = operand1;
480 ins->op2_value_baddr1 = operand2;
481 gru_start_instruction(ins, __opword(OP_GAMER, exopc, xtype, IAA_RAM, 0,
482 CB_IMA(hints)));
483}
484
485static inline void gru_gamerr(void *cb, int exopc, unsigned long src,
486 unsigned int xtype, unsigned long operand1,
487 unsigned long operand2, unsigned long hints)
488{
489 struct gru_instruction *ins = (void *)cb;
490
491 ins->baddr0 = (long)src;
492 ins->op1_stride = operand1;
493 ins->op2_value_baddr1 = operand2;
494 gru_start_instruction(ins, __opword(OP_GAMERR, exopc, xtype, IAA_RAM, 0,
495 CB_IMA(hints)));
496}
497
498static inline void gru_gamxr(void *cb, unsigned long src,
499 unsigned int tri0, unsigned long hints)
500{
501 struct gru_instruction *ins = (void *)cb;
502
503 ins->baddr0 = (long)src;
504 ins->nelem = 4;
505 gru_start_instruction(ins, __opword(OP_GAMXR, EOP_XR_CSWAP, XTYPE_DW,
506 IAA_RAM, 0, CB_IMA(hints)));
507}
508
509static inline void gru_mesq(void *cb, unsigned long queue,
510 unsigned long tri0, unsigned long nelem,
511 unsigned long hints)
512{
513 struct gru_instruction *ins = (void *)cb;
514
515 ins->baddr0 = (long)queue;
516 ins->nelem = nelem;
517 ins->tri0 = tri0;
518 gru_start_instruction(ins, __opword(OP_MESQ, 0, XTYPE_CL, IAA_RAM, 0,
519 CB_IMA(hints)));
520}
521
522static inline unsigned long gru_get_amo_value(void *cb)
523{
524 struct gru_instruction *ins = (void *)cb;
525
526 return ins->avalue;
527}
528
529static inline int gru_get_amo_value_head(void *cb)
530{
531 struct gru_instruction *ins = (void *)cb;
532
533 return ins->avalue & 0xffffffff;
534}
535
536static inline int gru_get_amo_value_limit(void *cb)
537{
538 struct gru_instruction *ins = (void *)cb;
539
540 return ins->avalue >> 32;
541}
542
543static inline union gru_mesqhead gru_mesq_head(int head, int limit)
544{
545 union gru_mesqhead mqh;
546
547 mqh.head = head;
548 mqh.limit = limit;
549 return mqh;
550}
551
552/*
553 * Get struct control_block_extended_exc_detail for CB.
554 */
555extern int gru_get_cb_exception_detail(void *cb,
556 struct control_block_extended_exc_detail *excdet);
557
558#define GRU_EXC_STR_SIZE 256
559
560extern int gru_check_status_proc(void *cb);
561extern int gru_wait_proc(void *cb);
562extern void gru_wait_abort_proc(void *cb);
563
564/*
565 * Control block definition for checking status
566 */
567struct gru_control_block_status {
568 unsigned int icmd :1;
569 unsigned int unused1 :31;
570 unsigned int unused2 :24;
571 unsigned int istatus :2;
572 unsigned int isubstatus :4;
573 unsigned int inused3 :2;
574};
575
576/* Get CB status */
577static inline int gru_get_cb_status(void *cb)
578{
579 struct gru_control_block_status *cbs = (void *)cb;
580
581 return cbs->istatus;
582}
583
584/* Get CB message queue substatus */
585static inline int gru_get_cb_message_queue_substatus(void *cb)
586{
587 struct gru_control_block_status *cbs = (void *)cb;
588
589 return cbs->isubstatus & CBSS_MSG_QUEUE_MASK;
590}
591
592/* Get CB substatus */
593static inline int gru_get_cb_substatus(void *cb)
594{
595 struct gru_control_block_status *cbs = (void *)cb;
596
597 return cbs->isubstatus;
598}
599
600/* Check the status of a CB. If the CB is in UPM mode, call the
601 * OS to handle the UPM status.
602 * Returns the CB status field value (0 for normal completion)
603 */
604static inline int gru_check_status(void *cb)
605{
606 struct gru_control_block_status *cbs = (void *)cb;
607 int ret = cbs->istatus;
608
609 if (ret == CBS_CALL_OS)
610 ret = gru_check_status_proc(cb);
611 return ret;
612}
613
614/* Wait for CB to complete.
615 * Returns the CB status field value (0 for normal completion)
616 */
617static inline int gru_wait(void *cb)
618{
619 struct gru_control_block_status *cbs = (void *)cb;
620 int ret = cbs->istatus;;
621
622 if (ret != CBS_IDLE)
623 ret = gru_wait_proc(cb);
624 return ret;
625}
626
627/* Wait for CB to complete. Aborts program if error. (Note: error does NOT
628 * mean TLB mis - only fatal errors such as memory parity error or user
629 * bugs will cause termination.
630 */
631static inline void gru_wait_abort(void *cb)
632{
633 struct gru_control_block_status *cbs = (void *)cb;
634
635 if (cbs->istatus != CBS_IDLE)
636 gru_wait_abort_proc(cb);
637}
638
639
640/*
641 * Get a pointer to a control block
642 * gseg - GSeg address returned from gru_get_thread_gru_segment()
643 * index - index of desired CB
644 */
645static inline void *gru_get_cb_pointer(void *gseg,
646 int index)
647{
648 return gseg + GRU_CB_BASE + index * GRU_HANDLE_STRIDE;
649}
650
651/*
652 * Get a pointer to a cacheline in the data segment portion of a GSeg
653 * gseg - GSeg address returned from gru_get_thread_gru_segment()
654 * index - index of desired cache line
655 */
656static inline void *gru_get_data_pointer(void *gseg, int index)
657{
658 return gseg + GRU_DS_BASE + index * GRU_CACHE_LINE_BYTES;
659}
660
661/*
662 * Convert a vaddr into the tri index within the GSEG
663 * vaddr - virtual address of within gseg
664 */
665static inline int gru_get_tri(void *vaddr)
666{
667 return ((unsigned long)vaddr & (GRU_GSEG_PAGESIZE - 1)) - GRU_DS_BASE;
668}
669#endif /* __GRU_INSTRUCTIONS_H__ */
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
new file mode 100644
index 000000000000..3d33015bbf31
--- /dev/null
+++ b/drivers/misc/sgi-gru/grufault.c
@@ -0,0 +1,633 @@
1/*
2 * SN Platform GRU Driver
3 *
4 * FAULT HANDLER FOR GRU DETECTED TLB MISSES
5 *
6 * This file contains code that handles TLB misses within the GRU.
7 * These misses are reported either via interrupts or user polling of
8 * the user CB.
9 *
10 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
26
27#include <linux/kernel.h>
28#include <linux/errno.h>
29#include <linux/spinlock.h>
30#include <linux/mm.h>
31#include <linux/hugetlb.h>
32#include <linux/device.h>
33#include <linux/io.h>
34#include <linux/uaccess.h>
35#include <asm/pgtable.h>
36#include "gru.h"
37#include "grutables.h"
38#include "grulib.h"
39#include "gru_instructions.h"
40#include <asm/uv/uv_hub.h>
41
42/*
43 * Test if a physical address is a valid GRU GSEG address
44 */
45static inline int is_gru_paddr(unsigned long paddr)
46{
47 return paddr >= gru_start_paddr && paddr < gru_end_paddr;
48}
49
50/*
51 * Find the vma of a GRU segment. Caller must hold mmap_sem.
52 */
53struct vm_area_struct *gru_find_vma(unsigned long vaddr)
54{
55 struct vm_area_struct *vma;
56
57 vma = find_vma(current->mm, vaddr);
58 if (vma && vma->vm_start <= vaddr && vma->vm_ops == &gru_vm_ops)
59 return vma;
60 return NULL;
61}
62
63/*
64 * Find and lock the gts that contains the specified user vaddr.
65 *
66 * Returns:
67 * - *gts with the mmap_sem locked for read and the GTS locked.
68 * - NULL if vaddr invalid OR is not a valid GSEG vaddr.
69 */
70
71static struct gru_thread_state *gru_find_lock_gts(unsigned long vaddr)
72{
73 struct mm_struct *mm = current->mm;
74 struct vm_area_struct *vma;
75 struct gru_thread_state *gts = NULL;
76
77 down_read(&mm->mmap_sem);
78 vma = gru_find_vma(vaddr);
79 if (vma)
80 gts = gru_find_thread_state(vma, TSID(vaddr, vma));
81 if (gts)
82 mutex_lock(&gts->ts_ctxlock);
83 else
84 up_read(&mm->mmap_sem);
85 return gts;
86}
87
88static struct gru_thread_state *gru_alloc_locked_gts(unsigned long vaddr)
89{
90 struct mm_struct *mm = current->mm;
91 struct vm_area_struct *vma;
92 struct gru_thread_state *gts = NULL;
93
94 down_write(&mm->mmap_sem);
95 vma = gru_find_vma(vaddr);
96 if (vma)
97 gts = gru_alloc_thread_state(vma, TSID(vaddr, vma));
98 if (gts) {
99 mutex_lock(&gts->ts_ctxlock);
100 downgrade_write(&mm->mmap_sem);
101 } else {
102 up_write(&mm->mmap_sem);
103 }
104
105 return gts;
106}
107
108/*
109 * Unlock a GTS that was previously locked with gru_find_lock_gts().
110 */
111static void gru_unlock_gts(struct gru_thread_state *gts)
112{
113 mutex_unlock(&gts->ts_ctxlock);
114 up_read(&current->mm->mmap_sem);
115}
116
117/*
118 * Set a CB.istatus to active using a user virtual address. This must be done
119 * just prior to a TFH RESTART. The new cb.istatus is an in-cache status ONLY.
120 * If the line is evicted, the status may be lost. The in-cache update
121 * is necessary to prevent the user from seeing a stale cb.istatus that will
122 * change as soon as the TFH restart is complete. Races may cause an
123 * occasional failure to clear the cb.istatus, but that is ok.
124 *
125 * If the cb address is not valid (should not happen, but...), nothing
126 * bad will happen.. The get_user()/put_user() will fail but there
127 * are no bad side-effects.
128 */
129static void gru_cb_set_istatus_active(unsigned long __user *cb)
130{
131 union {
132 struct gru_instruction_bits bits;
133 unsigned long dw;
134 } u;
135
136 if (cb) {
137 get_user(u.dw, cb);
138 u.bits.istatus = CBS_ACTIVE;
139 put_user(u.dw, cb);
140 }
141}
142
143/*
144 * Convert a interrupt IRQ to a pointer to the GRU GTS that caused the
145 * interrupt. Interrupts are always sent to a cpu on the blade that contains the
146 * GRU (except for headless blades which are not currently supported). A blade
147 * has N grus; a block of N consecutive IRQs is assigned to the GRUs. The IRQ
148 * number uniquely identifies the GRU chiplet on the local blade that caused the
149 * interrupt. Always called in interrupt context.
150 */
151static inline struct gru_state *irq_to_gru(int irq)
152{
153 return &gru_base[uv_numa_blade_id()]->bs_grus[irq - IRQ_GRU];
154}
155
156/*
157 * Read & clear a TFM
158 *
159 * The GRU has an array of fault maps. A map is private to a cpu
160 * Only one cpu will be accessing a cpu's fault map.
161 *
162 * This function scans the cpu-private fault map & clears all bits that
163 * are set. The function returns a bitmap that indicates the bits that
164 * were cleared. Note that sense the maps may be updated asynchronously by
165 * the GRU, atomic operations must be used to clear bits.
166 */
167static void get_clear_fault_map(struct gru_state *gru,
168 struct gru_tlb_fault_map *map)
169{
170 unsigned long i, k;
171 struct gru_tlb_fault_map *tfm;
172
173 tfm = get_tfm_for_cpu(gru, gru_cpu_fault_map_id());
174 prefetchw(tfm); /* Helps on hardware, required for emulator */
175 for (i = 0; i < BITS_TO_LONGS(GRU_NUM_CBE); i++) {
176 k = tfm->fault_bits[i];
177 if (k)
178 k = xchg(&tfm->fault_bits[i], 0UL);
179 map->fault_bits[i] = k;
180 }
181
182 /*
183 * Not functionally required but helps performance. (Required
184 * on emulator)
185 */
186 gru_flush_cache(tfm);
187}
188
189/*
190 * Atomic (interrupt context) & non-atomic (user context) functions to
191 * convert a vaddr into a physical address. The size of the page
192 * is returned in pageshift.
193 * returns:
194 * 0 - successful
195 * < 0 - error code
196 * 1 - (atomic only) try again in non-atomic context
197 */
198static int non_atomic_pte_lookup(struct vm_area_struct *vma,
199 unsigned long vaddr, int write,
200 unsigned long *paddr, int *pageshift)
201{
202 struct page *page;
203
204 /* ZZZ Need to handle HUGE pages */
205 if (is_vm_hugetlb_page(vma))
206 return -EFAULT;
207 *pageshift = PAGE_SHIFT;
208 if (get_user_pages
209 (current, current->mm, vaddr, 1, write, 0, &page, NULL) <= 0)
210 return -EFAULT;
211 *paddr = page_to_phys(page);
212 put_page(page);
213 return 0;
214}
215
216/*
217 *
218 * atomic_pte_lookup
219 *
220 * Convert a user virtual address to a physical address
221 * Only supports Intel large pages (2MB only) on x86_64.
222 * ZZZ - hugepage support is incomplete
223 */
224static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr,
225 int write, unsigned long *paddr, int *pageshift)
226{
227 pgd_t *pgdp;
228 pmd_t *pmdp;
229 pud_t *pudp;
230 pte_t pte;
231
232 WARN_ON(irqs_disabled()); /* ZZZ debug */
233
234 local_irq_disable();
235 pgdp = pgd_offset(vma->vm_mm, vaddr);
236 if (unlikely(pgd_none(*pgdp)))
237 goto err;
238
239 pudp = pud_offset(pgdp, vaddr);
240 if (unlikely(pud_none(*pudp)))
241 goto err;
242
243 pmdp = pmd_offset(pudp, vaddr);
244 if (unlikely(pmd_none(*pmdp)))
245 goto err;
246#ifdef CONFIG_X86_64
247 if (unlikely(pmd_large(*pmdp)))
248 pte = *(pte_t *) pmdp;
249 else
250#endif
251 pte = *pte_offset_kernel(pmdp, vaddr);
252
253 local_irq_enable();
254
255 if (unlikely(!pte_present(pte) ||
256 (write && (!pte_write(pte) || !pte_dirty(pte)))))
257 return 1;
258
259 *paddr = pte_pfn(pte) << PAGE_SHIFT;
260 *pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT;
261 return 0;
262
263err:
264 local_irq_enable();
265 return 1;
266}
267
268/*
269 * Drop a TLB entry into the GRU. The fault is described by info in an TFH.
270 * Input:
271 * cb Address of user CBR. Null if not running in user context
272 * Return:
273 * 0 = dropin, exception, or switch to UPM successful
274 * 1 = range invalidate active
275 * < 0 = error code
276 *
277 */
278static int gru_try_dropin(struct gru_thread_state *gts,
279 struct gru_tlb_fault_handle *tfh,
280 unsigned long __user *cb)
281{
282 struct mm_struct *mm = gts->ts_mm;
283 struct vm_area_struct *vma;
284 int pageshift, asid, write, ret;
285 unsigned long paddr, gpa, vaddr;
286
287 /*
288 * NOTE: The GRU contains magic hardware that eliminates races between
289 * TLB invalidates and TLB dropins. If an invalidate occurs
290 * in the window between reading the TFH and the subsequent TLB dropin,
291 * the dropin is ignored. This eliminates the need for additional locks.
292 */
293
294 /*
295 * Error if TFH state is IDLE or FMM mode & the user issuing a UPM call.
296 * Might be a hardware race OR a stupid user. Ignore FMM because FMM
297 * is a transient state.
298 */
299 if (tfh->state == TFHSTATE_IDLE)
300 goto failidle;
301 if (tfh->state == TFHSTATE_MISS_FMM && cb)
302 goto failfmm;
303
304 write = (tfh->cause & TFHCAUSE_TLB_MOD) != 0;
305 vaddr = tfh->missvaddr;
306 asid = tfh->missasid;
307 if (asid == 0)
308 goto failnoasid;
309
310 rmb(); /* TFH must be cache resident before reading ms_range_active */
311
312 /*
313 * TFH is cache resident - at least briefly. Fail the dropin
314 * if a range invalidate is active.
315 */
316 if (atomic_read(&gts->ts_gms->ms_range_active))
317 goto failactive;
318
319 vma = find_vma(mm, vaddr);
320 if (!vma)
321 goto failinval;
322
323 /*
324 * Atomic lookup is faster & usually works even if called in non-atomic
325 * context.
326 */
327 ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &pageshift);
328 if (ret) {
329 if (!cb)
330 goto failupm;
331 if (non_atomic_pte_lookup(vma, vaddr, write, &paddr,
332 &pageshift))
333 goto failinval;
334 }
335 if (is_gru_paddr(paddr))
336 goto failinval;
337
338 paddr = paddr & ~((1UL << pageshift) - 1);
339 gpa = uv_soc_phys_ram_to_gpa(paddr);
340 gru_cb_set_istatus_active(cb);
341 tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write,
342 GRU_PAGESIZE(pageshift));
343 STAT(tlb_dropin);
344 gru_dbg(grudev,
345 "%s: tfh 0x%p, vaddr 0x%lx, asid 0x%x, ps %d, gpa 0x%lx\n",
346 ret ? "non-atomic" : "atomic", tfh, vaddr, asid,
347 pageshift, gpa);
348 return 0;
349
350failnoasid:
351 /* No asid (delayed unload). */
352 STAT(tlb_dropin_fail_no_asid);
353 gru_dbg(grudev, "FAILED no_asid tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
354 if (!cb)
355 tfh_user_polling_mode(tfh);
356 else
357 gru_flush_cache(tfh);
358 return -EAGAIN;
359
360failupm:
361 /* Atomic failure switch CBR to UPM */
362 tfh_user_polling_mode(tfh);
363 STAT(tlb_dropin_fail_upm);
364 gru_dbg(grudev, "FAILED upm tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
365 return 1;
366
367failfmm:
368 /* FMM state on UPM call */
369 STAT(tlb_dropin_fail_fmm);
370 gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state);
371 return 0;
372
373failidle:
374 /* TFH was idle - no miss pending */
375 gru_flush_cache(tfh);
376 if (cb)
377 gru_flush_cache(cb);
378 STAT(tlb_dropin_fail_idle);
379 gru_dbg(grudev, "FAILED idle tfh: 0x%p, state %d\n", tfh, tfh->state);
380 return 0;
381
382failinval:
383 /* All errors (atomic & non-atomic) switch CBR to EXCEPTION state */
384 tfh_exception(tfh);
385 STAT(tlb_dropin_fail_invalid);
386 gru_dbg(grudev, "FAILED inval tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
387 return -EFAULT;
388
389failactive:
390 /* Range invalidate active. Switch to UPM iff atomic */
391 if (!cb)
392 tfh_user_polling_mode(tfh);
393 else
394 gru_flush_cache(tfh);
395 STAT(tlb_dropin_fail_range_active);
396 gru_dbg(grudev, "FAILED range active: tfh 0x%p, vaddr 0x%lx\n",
397 tfh, vaddr);
398 return 1;
399}
400
401/*
402 * Process an external interrupt from the GRU. This interrupt is
403 * caused by a TLB miss.
404 * Note that this is the interrupt handler that is registered with linux
405 * interrupt handlers.
406 */
407irqreturn_t gru_intr(int irq, void *dev_id)
408{
409 struct gru_state *gru;
410 struct gru_tlb_fault_map map;
411 struct gru_thread_state *gts;
412 struct gru_tlb_fault_handle *tfh = NULL;
413 int cbrnum, ctxnum;
414
415 STAT(intr);
416
417 gru = irq_to_gru(irq);
418 if (!gru) {
419 dev_err(grudev, "GRU: invalid interrupt: cpu %d, irq %d\n",
420 raw_smp_processor_id(), irq);
421 return IRQ_NONE;
422 }
423 get_clear_fault_map(gru, &map);
424 gru_dbg(grudev, "irq %d, gru %x, map 0x%lx\n", irq, gru->gs_gid,
425 map.fault_bits[0]);
426
427 for_each_cbr_in_tfm(cbrnum, map.fault_bits) {
428 tfh = get_tfh_by_index(gru, cbrnum);
429 prefetchw(tfh); /* Helps on hdw, required for emulator */
430
431 /*
432 * When hardware sets a bit in the faultmap, it implicitly
433 * locks the GRU context so that it cannot be unloaded.
434 * The gts cannot change until a TFH start/writestart command
435 * is issued.
436 */
437 ctxnum = tfh->ctxnum;
438 gts = gru->gs_gts[ctxnum];
439
440 /*
441 * This is running in interrupt context. Trylock the mmap_sem.
442 * If it fails, retry the fault in user context.
443 */
444 if (down_read_trylock(&gts->ts_mm->mmap_sem)) {
445 gru_try_dropin(gts, tfh, NULL);
446 up_read(&gts->ts_mm->mmap_sem);
447 } else {
448 tfh_user_polling_mode(tfh);
449 }
450 }
451 return IRQ_HANDLED;
452}
453
454
455static int gru_user_dropin(struct gru_thread_state *gts,
456 struct gru_tlb_fault_handle *tfh,
457 unsigned long __user *cb)
458{
459 struct gru_mm_struct *gms = gts->ts_gms;
460 int ret;
461
462 while (1) {
463 wait_event(gms->ms_wait_queue,
464 atomic_read(&gms->ms_range_active) == 0);
465 prefetchw(tfh); /* Helps on hdw, required for emulator */
466 ret = gru_try_dropin(gts, tfh, cb);
467 if (ret <= 0)
468 return ret;
469 STAT(call_os_wait_queue);
470 }
471}
472
473/*
474 * This interface is called as a result of a user detecting a "call OS" bit
475 * in a user CB. Normally means that a TLB fault has occurred.
476 * cb - user virtual address of the CB
477 */
478int gru_handle_user_call_os(unsigned long cb)
479{
480 struct gru_tlb_fault_handle *tfh;
481 struct gru_thread_state *gts;
482 unsigned long __user *cbp;
483 int ucbnum, cbrnum, ret = -EINVAL;
484
485 STAT(call_os);
486 gru_dbg(grudev, "address 0x%lx\n", cb);
487
488 /* sanity check the cb pointer */
489 ucbnum = get_cb_number((void *)cb);
490 if ((cb & (GRU_HANDLE_STRIDE - 1)) || ucbnum >= GRU_NUM_CB)
491 return -EINVAL;
492 cbp = (unsigned long *)cb;
493
494 gts = gru_find_lock_gts(cb);
495 if (!gts)
496 return -EINVAL;
497
498 if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) {
499 ret = -EINVAL;
500 goto exit;
501 }
502
503 /*
504 * If force_unload is set, the UPM TLB fault is phony. The task
505 * has migrated to another node and the GSEG must be moved. Just
506 * unload the context. The task will page fault and assign a new
507 * context.
508 */
509 ret = -EAGAIN;
510 cbrnum = thread_cbr_number(gts, ucbnum);
511 if (gts->ts_force_unload) {
512 gru_unload_context(gts, 1);
513 } else if (gts->ts_gru) {
514 tfh = get_tfh_by_index(gts->ts_gru, cbrnum);
515 ret = gru_user_dropin(gts, tfh, cbp);
516 }
517exit:
518 gru_unlock_gts(gts);
519 return ret;
520}
521
522/*
523 * Fetch the exception detail information for a CB that terminated with
524 * an exception.
525 */
526int gru_get_exception_detail(unsigned long arg)
527{
528 struct control_block_extended_exc_detail excdet;
529 struct gru_control_block_extended *cbe;
530 struct gru_thread_state *gts;
531 int ucbnum, cbrnum, ret;
532
533 STAT(user_exception);
534 if (copy_from_user(&excdet, (void __user *)arg, sizeof(excdet)))
535 return -EFAULT;
536
537 gru_dbg(grudev, "address 0x%lx\n", excdet.cb);
538 gts = gru_find_lock_gts(excdet.cb);
539 if (!gts)
540 return -EINVAL;
541
542 if (gts->ts_gru) {
543 ucbnum = get_cb_number((void *)excdet.cb);
544 cbrnum = thread_cbr_number(gts, ucbnum);
545 cbe = get_cbe_by_index(gts->ts_gru, cbrnum);
546 excdet.opc = cbe->opccpy;
547 excdet.exopc = cbe->exopccpy;
548 excdet.ecause = cbe->ecause;
549 excdet.exceptdet0 = cbe->idef1upd;
550 excdet.exceptdet1 = cbe->idef3upd;
551 ret = 0;
552 } else {
553 ret = -EAGAIN;
554 }
555 gru_unlock_gts(gts);
556
557 gru_dbg(grudev, "address 0x%lx, ecause 0x%x\n", excdet.cb,
558 excdet.ecause);
559 if (!ret && copy_to_user((void __user *)arg, &excdet, sizeof(excdet)))
560 ret = -EFAULT;
561 return ret;
562}
563
564/*
565 * User request to unload a context. Content is saved for possible reload.
566 */
567int gru_user_unload_context(unsigned long arg)
568{
569 struct gru_thread_state *gts;
570 struct gru_unload_context_req req;
571
572 STAT(user_unload_context);
573 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
574 return -EFAULT;
575
576 gru_dbg(grudev, "gseg 0x%lx\n", req.gseg);
577
578 gts = gru_find_lock_gts(req.gseg);
579 if (!gts)
580 return -EINVAL;
581
582 if (gts->ts_gru)
583 gru_unload_context(gts, 1);
584 gru_unlock_gts(gts);
585
586 return 0;
587}
588
589/*
590 * User request to flush a range of virtual addresses from the GRU TLB
591 * (Mainly for testing).
592 */
593int gru_user_flush_tlb(unsigned long arg)
594{
595 struct gru_thread_state *gts;
596 struct gru_flush_tlb_req req;
597
598 STAT(user_flush_tlb);
599 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
600 return -EFAULT;
601
602 gru_dbg(grudev, "gseg 0x%lx, vaddr 0x%lx, len 0x%lx\n", req.gseg,
603 req.vaddr, req.len);
604
605 gts = gru_find_lock_gts(req.gseg);
606 if (!gts)
607 return -EINVAL;
608
609 gru_flush_tlb_range(gts->ts_gms, req.vaddr, req.vaddr + req.len);
610 gru_unlock_gts(gts);
611
612 return 0;
613}
614
615/*
616 * Register the current task as the user of the GSEG slice.
617 * Needed for TLB fault interrupt targeting.
618 */
619int gru_set_task_slice(long address)
620{
621 struct gru_thread_state *gts;
622
623 STAT(set_task_slice);
624 gru_dbg(grudev, "address 0x%lx\n", address);
625 gts = gru_alloc_locked_gts(address);
626 if (!gts)
627 return -EINVAL;
628
629 gts->ts_tgid_owner = current->tgid;
630 gru_unlock_gts(gts);
631
632 return 0;
633}
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c
new file mode 100644
index 000000000000..23c91f5f6b61
--- /dev/null
+++ b/drivers/misc/sgi-gru/grufile.c
@@ -0,0 +1,485 @@
1/*
2 * SN Platform GRU Driver
3 *
4 * FILE OPERATIONS & DRIVER INITIALIZATION
5 *
6 * This file supports the user system call for file open, close, mmap, etc.
7 * This also incudes the driver initialization code.
8 *
9 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 */
25
26#include <linux/module.h>
27#include <linux/kernel.h>
28#include <linux/errno.h>
29#include <linux/slab.h>
30#include <linux/mm.h>
31#include <linux/io.h>
32#include <linux/smp_lock.h>
33#include <linux/spinlock.h>
34#include <linux/device.h>
35#include <linux/miscdevice.h>
36#include <linux/interrupt.h>
37#include <linux/proc_fs.h>
38#include <linux/uaccess.h>
39#include "gru.h"
40#include "grulib.h"
41#include "grutables.h"
42
43#if defined CONFIG_X86_64
44#include <asm/genapic.h>
45#include <asm/irq.h>
46#define IS_UV() is_uv_system()
47#elif defined CONFIG_IA64
48#include <asm/system.h>
49#include <asm/sn/simulator.h>
50/* temp support for running on hardware simulator */
51#define IS_UV() IS_MEDUSA() || ia64_platform_is("uv")
52#else
53#define IS_UV() 0
54#endif
55
56#include <asm/uv/uv_hub.h>
57#include <asm/uv/uv_mmrs.h>
58
59struct gru_blade_state *gru_base[GRU_MAX_BLADES] __read_mostly;
60unsigned long gru_start_paddr, gru_end_paddr __read_mostly;
61struct gru_stats_s gru_stats;
62
63/* Guaranteed user available resources on each node */
64static int max_user_cbrs, max_user_dsr_bytes;
65
66static struct file_operations gru_fops;
67static struct miscdevice gru_miscdev;
68
69
70/*
71 * gru_vma_close
72 *
73 * Called when unmapping a device mapping. Frees all gru resources
74 * and tables belonging to the vma.
75 */
76static void gru_vma_close(struct vm_area_struct *vma)
77{
78 struct gru_vma_data *vdata;
79 struct gru_thread_state *gts;
80 struct list_head *entry, *next;
81
82 if (!vma->vm_private_data)
83 return;
84
85 vdata = vma->vm_private_data;
86 vma->vm_private_data = NULL;
87 gru_dbg(grudev, "vma %p, file %p, vdata %p\n", vma, vma->vm_file,
88 vdata);
89 list_for_each_safe(entry, next, &vdata->vd_head) {
90 gts =
91 list_entry(entry, struct gru_thread_state, ts_next);
92 list_del(&gts->ts_next);
93 mutex_lock(&gts->ts_ctxlock);
94 if (gts->ts_gru)
95 gru_unload_context(gts, 0);
96 mutex_unlock(&gts->ts_ctxlock);
97 gts_drop(gts);
98 }
99 kfree(vdata);
100 STAT(vdata_free);
101}
102
103/*
104 * gru_file_mmap
105 *
106 * Called when mmaping the device. Initializes the vma with a fault handler
107 * and private data structure necessary to allocate, track, and free the
108 * underlying pages.
109 */
110static int gru_file_mmap(struct file *file, struct vm_area_struct *vma)
111{
112 if ((vma->vm_flags & (VM_SHARED | VM_WRITE)) != (VM_SHARED | VM_WRITE))
113 return -EPERM;
114
115 if (vma->vm_start & (GRU_GSEG_PAGESIZE - 1) ||
116 vma->vm_end & (GRU_GSEG_PAGESIZE - 1))
117 return -EINVAL;
118
119 vma->vm_flags |=
120 (VM_IO | VM_DONTCOPY | VM_LOCKED | VM_DONTEXPAND | VM_PFNMAP |
121 VM_RESERVED);
122 vma->vm_page_prot = PAGE_SHARED;
123 vma->vm_ops = &gru_vm_ops;
124
125 vma->vm_private_data = gru_alloc_vma_data(vma, 0);
126 if (!vma->vm_private_data)
127 return -ENOMEM;
128
129 gru_dbg(grudev, "file %p, vaddr 0x%lx, vma %p, vdata %p\n",
130 file, vma->vm_start, vma, vma->vm_private_data);
131 return 0;
132}
133
134/*
135 * Create a new GRU context
136 */
137static int gru_create_new_context(unsigned long arg)
138{
139 struct gru_create_context_req req;
140 struct vm_area_struct *vma;
141 struct gru_vma_data *vdata;
142 int ret = -EINVAL;
143
144
145 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
146 return -EFAULT;
147
148 if (req.data_segment_bytes == 0 ||
149 req.data_segment_bytes > max_user_dsr_bytes)
150 return -EINVAL;
151 if (!req.control_blocks || !req.maximum_thread_count ||
152 req.control_blocks > max_user_cbrs)
153 return -EINVAL;
154
155 if (!(req.options & GRU_OPT_MISS_MASK))
156 req.options |= GRU_OPT_MISS_FMM_INTR;
157
158 down_write(&current->mm->mmap_sem);
159 vma = gru_find_vma(req.gseg);
160 if (vma) {
161 vdata = vma->vm_private_data;
162 vdata->vd_user_options = req.options;
163 vdata->vd_dsr_au_count =
164 GRU_DS_BYTES_TO_AU(req.data_segment_bytes);
165 vdata->vd_cbr_au_count = GRU_CB_COUNT_TO_AU(req.control_blocks);
166 ret = 0;
167 }
168 up_write(&current->mm->mmap_sem);
169
170 return ret;
171}
172
173/*
174 * Get GRU configuration info (temp - for emulator testing)
175 */
176static long gru_get_config_info(unsigned long arg)
177{
178 struct gru_config_info info;
179 int nodesperblade;
180
181 if (num_online_nodes() > 1 &&
182 (uv_node_to_blade_id(1) == uv_node_to_blade_id(0)))
183 nodesperblade = 2;
184 else
185 nodesperblade = 1;
186 info.cpus = num_online_cpus();
187 info.nodes = num_online_nodes();
188 info.blades = info.nodes / nodesperblade;
189 info.chiplets = GRU_CHIPLETS_PER_BLADE * info.blades;
190
191 if (copy_to_user((void __user *)arg, &info, sizeof(info)))
192 return -EFAULT;
193 return 0;
194}
195
196/*
197 * Get GRU chiplet status
198 */
199static long gru_get_chiplet_status(unsigned long arg)
200{
201 struct gru_state *gru;
202 struct gru_chiplet_info info;
203
204 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
205 return -EFAULT;
206
207 if (info.node == -1)
208 info.node = numa_node_id();
209 if (info.node >= num_possible_nodes() ||
210 info.chiplet >= GRU_CHIPLETS_PER_HUB ||
211 info.node < 0 || info.chiplet < 0)
212 return -EINVAL;
213
214 info.blade = uv_node_to_blade_id(info.node);
215 gru = get_gru(info.blade, info.chiplet);
216
217 info.total_dsr_bytes = GRU_NUM_DSR_BYTES;
218 info.total_cbr = GRU_NUM_CB;
219 info.total_user_dsr_bytes = GRU_NUM_DSR_BYTES -
220 gru->gs_reserved_dsr_bytes;
221 info.total_user_cbr = GRU_NUM_CB - gru->gs_reserved_cbrs;
222 info.free_user_dsr_bytes = hweight64(gru->gs_dsr_map) *
223 GRU_DSR_AU_BYTES;
224 info.free_user_cbr = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE;
225
226 if (copy_to_user((void __user *)arg, &info, sizeof(info)))
227 return -EFAULT;
228 return 0;
229}
230
231/*
232 * gru_file_unlocked_ioctl
233 *
234 * Called to update file attributes via IOCTL calls.
235 */
236static long gru_file_unlocked_ioctl(struct file *file, unsigned int req,
237 unsigned long arg)
238{
239 int err = -EBADRQC;
240
241 gru_dbg(grudev, "file %p\n", file);
242
243 switch (req) {
244 case GRU_CREATE_CONTEXT:
245 err = gru_create_new_context(arg);
246 break;
247 case GRU_SET_TASK_SLICE:
248 err = gru_set_task_slice(arg);
249 break;
250 case GRU_USER_GET_EXCEPTION_DETAIL:
251 err = gru_get_exception_detail(arg);
252 break;
253 case GRU_USER_UNLOAD_CONTEXT:
254 err = gru_user_unload_context(arg);
255 break;
256 case GRU_GET_CHIPLET_STATUS:
257 err = gru_get_chiplet_status(arg);
258 break;
259 case GRU_USER_FLUSH_TLB:
260 err = gru_user_flush_tlb(arg);
261 break;
262 case GRU_USER_CALL_OS:
263 err = gru_handle_user_call_os(arg);
264 break;
265 case GRU_GET_CONFIG_INFO:
266 err = gru_get_config_info(arg);
267 break;
268 }
269 return err;
270}
271
272/*
273 * Called at init time to build tables for all GRUs that are present in the
274 * system.
275 */
276static void gru_init_chiplet(struct gru_state *gru, unsigned long paddr,
277 void *vaddr, int nid, int bid, int grunum)
278{
279 spin_lock_init(&gru->gs_lock);
280 spin_lock_init(&gru->gs_asid_lock);
281 gru->gs_gru_base_paddr = paddr;
282 gru->gs_gru_base_vaddr = vaddr;
283 gru->gs_gid = bid * GRU_CHIPLETS_PER_BLADE + grunum;
284 gru->gs_blade = gru_base[bid];
285 gru->gs_blade_id = bid;
286 gru->gs_cbr_map = (GRU_CBR_AU == 64) ? ~0 : (1UL << GRU_CBR_AU) - 1;
287 gru->gs_dsr_map = (1UL << GRU_DSR_AU) - 1;
288 gru_tgh_flush_init(gru);
289 gru_dbg(grudev, "bid %d, nid %d, gru %x, vaddr %p (0x%lx)\n",
290 bid, nid, gru->gs_gid, gru->gs_gru_base_vaddr,
291 gru->gs_gru_base_paddr);
292 gru_kservices_init(gru);
293}
294
295static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr)
296{
297 int pnode, nid, bid, chip;
298 int cbrs, dsrbytes, n;
299 int order = get_order(sizeof(struct gru_blade_state));
300 struct page *page;
301 struct gru_state *gru;
302 unsigned long paddr;
303 void *vaddr;
304
305 max_user_cbrs = GRU_NUM_CB;
306 max_user_dsr_bytes = GRU_NUM_DSR_BYTES;
307 for_each_online_node(nid) {
308 bid = uv_node_to_blade_id(nid);
309 pnode = uv_node_to_pnode(nid);
310 if (gru_base[bid])
311 continue;
312 page = alloc_pages_node(nid, GFP_KERNEL, order);
313 if (!page)
314 goto fail;
315 gru_base[bid] = page_address(page);
316 memset(gru_base[bid], 0, sizeof(struct gru_blade_state));
317 gru_base[bid]->bs_lru_gru = &gru_base[bid]->bs_grus[0];
318 spin_lock_init(&gru_base[bid]->bs_lock);
319
320 dsrbytes = 0;
321 cbrs = 0;
322 for (gru = gru_base[bid]->bs_grus, chip = 0;
323 chip < GRU_CHIPLETS_PER_BLADE;
324 chip++, gru++) {
325 paddr = gru_chiplet_paddr(gru_base_paddr, pnode, chip);
326 vaddr = gru_chiplet_vaddr(gru_base_vaddr, pnode, chip);
327 gru_init_chiplet(gru, paddr, vaddr, bid, nid, chip);
328 n = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE;
329 cbrs = max(cbrs, n);
330 n = hweight64(gru->gs_dsr_map) * GRU_DSR_AU_BYTES;
331 dsrbytes = max(dsrbytes, n);
332 }
333 max_user_cbrs = min(max_user_cbrs, cbrs);
334 max_user_dsr_bytes = min(max_user_dsr_bytes, dsrbytes);
335 }
336
337 return 0;
338
339fail:
340 for (nid--; nid >= 0; nid--)
341 free_pages((unsigned long)gru_base[nid], order);
342 return -ENOMEM;
343}
344
345#ifdef CONFIG_IA64
346
347static int get_base_irq(void)
348{
349 return IRQ_GRU;
350}
351
352#elif defined CONFIG_X86_64
353
354static void noop(unsigned int irq)
355{
356}
357
358static struct irq_chip gru_chip = {
359 .name = "gru",
360 .mask = noop,
361 .unmask = noop,
362 .ack = noop,
363};
364
365static int get_base_irq(void)
366{
367 set_irq_chip(IRQ_GRU, &gru_chip);
368 set_irq_chip(IRQ_GRU + 1, &gru_chip);
369 return IRQ_GRU;
370}
371#endif
372
373/*
374 * gru_init
375 *
376 * Called at boot or module load time to initialize the GRUs.
377 */
378static int __init gru_init(void)
379{
380 int ret, irq, chip;
381 char id[10];
382 void *gru_start_vaddr;
383
384 if (!IS_UV())
385 return 0;
386
387#if defined CONFIG_IA64
388 gru_start_paddr = 0xd000000000UL; /* ZZZZZZZZZZZZZZZZZZZ fixme */
389#else
390 gru_start_paddr = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR) &
391 0x7fffffffffffUL;
392
393#endif
394 gru_start_vaddr = __va(gru_start_paddr);
395 gru_end_paddr = gru_start_paddr + MAX_NUMNODES * GRU_SIZE;
396 printk(KERN_INFO "GRU space: 0x%lx - 0x%lx\n",
397 gru_start_paddr, gru_end_paddr);
398 irq = get_base_irq();
399 for (chip = 0; chip < GRU_CHIPLETS_PER_BLADE; chip++) {
400 ret = request_irq(irq + chip, gru_intr, 0, id, NULL);
401 if (ret) {
402 printk(KERN_ERR "%s: request_irq failed\n",
403 GRU_DRIVER_ID_STR);
404 goto exit1;
405 }
406 }
407
408 ret = misc_register(&gru_miscdev);
409 if (ret) {
410 printk(KERN_ERR "%s: misc_register failed\n",
411 GRU_DRIVER_ID_STR);
412 goto exit1;
413 }
414
415 ret = gru_proc_init();
416 if (ret) {
417 printk(KERN_ERR "%s: proc init failed\n", GRU_DRIVER_ID_STR);
418 goto exit2;
419 }
420
421 ret = gru_init_tables(gru_start_paddr, gru_start_vaddr);
422 if (ret) {
423 printk(KERN_ERR "%s: init tables failed\n", GRU_DRIVER_ID_STR);
424 goto exit3;
425 }
426
427 printk(KERN_INFO "%s: v%s\n", GRU_DRIVER_ID_STR,
428 GRU_DRIVER_VERSION_STR);
429 return 0;
430
431exit3:
432 gru_proc_exit();
433exit2:
434 misc_deregister(&gru_miscdev);
435exit1:
436 for (--chip; chip >= 0; chip--)
437 free_irq(irq + chip, NULL);
438 return ret;
439
440}
441
442static void __exit gru_exit(void)
443{
444 int i, bid;
445 int order = get_order(sizeof(struct gru_state) *
446 GRU_CHIPLETS_PER_BLADE);
447
448 for (i = 0; i < GRU_CHIPLETS_PER_BLADE; i++)
449 free_irq(IRQ_GRU + i, NULL);
450
451 for (bid = 0; bid < GRU_MAX_BLADES; bid++)
452 free_pages((unsigned long)gru_base[bid], order);
453
454 misc_deregister(&gru_miscdev);
455 gru_proc_exit();
456}
457
458static struct file_operations gru_fops = {
459 .owner = THIS_MODULE,
460 .unlocked_ioctl = gru_file_unlocked_ioctl,
461 .mmap = gru_file_mmap,
462};
463
464static struct miscdevice gru_miscdev = {
465 .minor = MISC_DYNAMIC_MINOR,
466 .name = "gru",
467 .fops = &gru_fops,
468};
469
470struct vm_operations_struct gru_vm_ops = {
471 .close = gru_vma_close,
472 .fault = gru_fault,
473};
474
475module_init(gru_init);
476module_exit(gru_exit);
477
478module_param(gru_options, ulong, 0644);
479MODULE_PARM_DESC(gru_options, "Various debug options");
480
481MODULE_AUTHOR("Silicon Graphics, Inc.");
482MODULE_LICENSE("GPL");
483MODULE_DESCRIPTION(GRU_DRIVER_ID_STR GRU_DRIVER_VERSION_STR);
484MODULE_VERSION(GRU_DRIVER_VERSION_STR);
485
diff --git a/drivers/misc/sgi-gru/gruhandles.h b/drivers/misc/sgi-gru/gruhandles.h
new file mode 100644
index 000000000000..d16031d62673
--- /dev/null
+++ b/drivers/misc/sgi-gru/gruhandles.h
@@ -0,0 +1,663 @@
1/*
2 * SN Platform GRU Driver
3 *
4 * GRU HANDLE DEFINITION
5 *
6 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23#ifndef __GRUHANDLES_H__
24#define __GRUHANDLES_H__
25#include "gru_instructions.h"
26
27/*
28 * Manifest constants for GRU Memory Map
29 */
30#define GRU_GSEG0_BASE 0
31#define GRU_MCS_BASE (64 * 1024 * 1024)
32#define GRU_SIZE (128UL * 1024 * 1024)
33
34/* Handle & resource counts */
35#define GRU_NUM_CB 128
36#define GRU_NUM_DSR_BYTES (32 * 1024)
37#define GRU_NUM_TFM 16
38#define GRU_NUM_TGH 24
39#define GRU_NUM_CBE 128
40#define GRU_NUM_TFH 128
41#define GRU_NUM_CCH 16
42#define GRU_NUM_GSH 1
43
44/* Maximum resource counts that can be reserved by user programs */
45#define GRU_NUM_USER_CBR GRU_NUM_CBE
46#define GRU_NUM_USER_DSR_BYTES GRU_NUM_DSR_BYTES
47
48/* Bytes per handle & handle stride. Code assumes all cb, tfh, cbe handles
49 * are the same */
50#define GRU_HANDLE_BYTES 64
51#define GRU_HANDLE_STRIDE 256
52
53/* Base addresses of handles */
54#define GRU_TFM_BASE (GRU_MCS_BASE + 0x00000)
55#define GRU_TGH_BASE (GRU_MCS_BASE + 0x08000)
56#define GRU_CBE_BASE (GRU_MCS_BASE + 0x10000)
57#define GRU_TFH_BASE (GRU_MCS_BASE + 0x18000)
58#define GRU_CCH_BASE (GRU_MCS_BASE + 0x20000)
59#define GRU_GSH_BASE (GRU_MCS_BASE + 0x30000)
60
61/* User gseg constants */
62#define GRU_GSEG_STRIDE (4 * 1024 * 1024)
63#define GSEG_BASE(a) ((a) & ~(GRU_GSEG_PAGESIZE - 1))
64
65/* Data segment constants */
66#define GRU_DSR_AU_BYTES 1024
67#define GRU_DSR_CL (GRU_NUM_DSR_BYTES / GRU_CACHE_LINE_BYTES)
68#define GRU_DSR_AU_CL (GRU_DSR_AU_BYTES / GRU_CACHE_LINE_BYTES)
69#define GRU_DSR_AU (GRU_NUM_DSR_BYTES / GRU_DSR_AU_BYTES)
70
71/* Control block constants */
72#define GRU_CBR_AU_SIZE 2
73#define GRU_CBR_AU (GRU_NUM_CBE / GRU_CBR_AU_SIZE)
74
75/* Convert resource counts to the number of AU */
76#define GRU_DS_BYTES_TO_AU(n) DIV_ROUND_UP(n, GRU_DSR_AU_BYTES)
77#define GRU_CB_COUNT_TO_AU(n) DIV_ROUND_UP(n, GRU_CBR_AU_SIZE)
78
79/* UV limits */
80#define GRU_CHIPLETS_PER_HUB 2
81#define GRU_HUBS_PER_BLADE 1
82#define GRU_CHIPLETS_PER_BLADE (GRU_HUBS_PER_BLADE * GRU_CHIPLETS_PER_HUB)
83
84/* User GRU Gseg offsets */
85#define GRU_CB_BASE 0
86#define GRU_CB_LIMIT (GRU_CB_BASE + GRU_HANDLE_STRIDE * GRU_NUM_CBE)
87#define GRU_DS_BASE 0x20000
88#define GRU_DS_LIMIT (GRU_DS_BASE + GRU_NUM_DSR_BYTES)
89
90/* Convert a GRU physical address to the chiplet offset */
91#define GSEGPOFF(h) ((h) & (GRU_SIZE - 1))
92
93/* Convert an arbitrary handle address to the beginning of the GRU segment */
94#ifndef __PLUGIN__
95#define GRUBASE(h) ((void *)((unsigned long)(h) & ~(GRU_SIZE - 1)))
96#else
97extern void *gmu_grubase(void *h);
98#define GRUBASE(h) gmu_grubase(h)
99#endif
100
101/* General addressing macros. */
102static inline void *get_gseg_base_address(void *base, int ctxnum)
103{
104 return (void *)(base + GRU_GSEG0_BASE + GRU_GSEG_STRIDE * ctxnum);
105}
106
107static inline void *get_gseg_base_address_cb(void *base, int ctxnum, int line)
108{
109 return (void *)(get_gseg_base_address(base, ctxnum) +
110 GRU_CB_BASE + GRU_HANDLE_STRIDE * line);
111}
112
113static inline void *get_gseg_base_address_ds(void *base, int ctxnum, int line)
114{
115 return (void *)(get_gseg_base_address(base, ctxnum) + GRU_DS_BASE +
116 GRU_CACHE_LINE_BYTES * line);
117}
118
119static inline struct gru_tlb_fault_map *get_tfm(void *base, int ctxnum)
120{
121 return (struct gru_tlb_fault_map *)(base + GRU_TFM_BASE +
122 ctxnum * GRU_HANDLE_STRIDE);
123}
124
125static inline struct gru_tlb_global_handle *get_tgh(void *base, int ctxnum)
126{
127 return (struct gru_tlb_global_handle *)(base + GRU_TGH_BASE +
128 ctxnum * GRU_HANDLE_STRIDE);
129}
130
131static inline struct gru_control_block_extended *get_cbe(void *base, int ctxnum)
132{
133 return (struct gru_control_block_extended *)(base + GRU_CBE_BASE +
134 ctxnum * GRU_HANDLE_STRIDE);
135}
136
137static inline struct gru_tlb_fault_handle *get_tfh(void *base, int ctxnum)
138{
139 return (struct gru_tlb_fault_handle *)(base + GRU_TFH_BASE +
140 ctxnum * GRU_HANDLE_STRIDE);
141}
142
143static inline struct gru_context_configuration_handle *get_cch(void *base,
144 int ctxnum)
145{
146 return (struct gru_context_configuration_handle *)(base +
147 GRU_CCH_BASE + ctxnum * GRU_HANDLE_STRIDE);
148}
149
150static inline unsigned long get_cb_number(void *cb)
151{
152 return (((unsigned long)cb - GRU_CB_BASE) % GRU_GSEG_PAGESIZE) /
153 GRU_HANDLE_STRIDE;
154}
155
156/* byte offset to a specific GRU chiplet. (p=pnode, c=chiplet (0 or 1)*/
157static inline unsigned long gru_chiplet_paddr(unsigned long paddr, int pnode,
158 int chiplet)
159{
160 return paddr + GRU_SIZE * (2 * pnode + chiplet);
161}
162
163static inline void *gru_chiplet_vaddr(void *vaddr, int pnode, int chiplet)
164{
165 return vaddr + GRU_SIZE * (2 * pnode + chiplet);
166}
167
168
169
170/*
171 * Global TLB Fault Map
172 * Bitmap of outstanding TLB misses needing interrupt/polling service.
173 *
174 */
175struct gru_tlb_fault_map {
176 unsigned long fault_bits[BITS_TO_LONGS(GRU_NUM_CBE)];
177 unsigned long fill0[2];
178 unsigned long done_bits[BITS_TO_LONGS(GRU_NUM_CBE)];
179 unsigned long fill1[2];
180};
181
182/*
183 * TGH - TLB Global Handle
184 * Used for TLB flushing.
185 *
186 */
187struct gru_tlb_global_handle {
188 unsigned int cmd:1; /* DW 0 */
189 unsigned int delresp:1;
190 unsigned int opc:1;
191 unsigned int fill1:5;
192
193 unsigned int fill2:8;
194
195 unsigned int status:2;
196 unsigned long fill3:2;
197 unsigned int state:3;
198 unsigned long fill4:1;
199
200 unsigned int cause:3;
201 unsigned long fill5:37;
202
203 unsigned long vaddr:64; /* DW 1 */
204
205 unsigned int asid:24; /* DW 2 */
206 unsigned int fill6:8;
207
208 unsigned int pagesize:5;
209 unsigned int fill7:11;
210
211 unsigned int global:1;
212 unsigned int fill8:15;
213
214 unsigned long vaddrmask:39; /* DW 3 */
215 unsigned int fill9:9;
216 unsigned int n:10;
217 unsigned int fill10:6;
218
219 unsigned int ctxbitmap:16; /* DW4 */
220 unsigned long fill11[3];
221};
222
223enum gru_tgh_cmd {
224 TGHCMD_START
225};
226
227enum gru_tgh_opc {
228 TGHOP_TLBNOP,
229 TGHOP_TLBINV
230};
231
232enum gru_tgh_status {
233 TGHSTATUS_IDLE,
234 TGHSTATUS_EXCEPTION,
235 TGHSTATUS_ACTIVE
236};
237
238enum gru_tgh_state {
239 TGHSTATE_IDLE,
240 TGHSTATE_PE_INVAL,
241 TGHSTATE_INTERRUPT_INVAL,
242 TGHSTATE_WAITDONE,
243 TGHSTATE_RESTART_CTX,
244};
245
246/*
247 * TFH - TLB Global Handle
248 * Used for TLB dropins into the GRU TLB.
249 *
250 */
251struct gru_tlb_fault_handle {
252 unsigned int cmd:1; /* DW 0 - low 32*/
253 unsigned int delresp:1;
254 unsigned int fill0:2;
255 unsigned int opc:3;
256 unsigned int fill1:9;
257
258 unsigned int status:2;
259 unsigned int fill2:1;
260 unsigned int color:1;
261 unsigned int state:3;
262 unsigned int fill3:1;
263
264 unsigned int cause:7; /* DW 0 - high 32 */
265 unsigned int fill4:1;
266
267 unsigned int indexway:12;
268 unsigned int fill5:4;
269
270 unsigned int ctxnum:4;
271 unsigned int fill6:12;
272
273 unsigned long missvaddr:64; /* DW 1 */
274
275 unsigned int missasid:24; /* DW 2 */
276 unsigned int fill7:8;
277 unsigned int fillasid:24;
278 unsigned int dirty:1;
279 unsigned int gaa:2;
280 unsigned long fill8:5;
281
282 unsigned long pfn:41; /* DW 3 */
283 unsigned int fill9:7;
284 unsigned int pagesize:5;
285 unsigned int fill10:11;
286
287 unsigned long fillvaddr:64; /* DW 4 */
288
289 unsigned long fill11[3];
290};
291
292enum gru_tfh_opc {
293 TFHOP_NOOP,
294 TFHOP_RESTART,
295 TFHOP_WRITE_ONLY,
296 TFHOP_WRITE_RESTART,
297 TFHOP_EXCEPTION,
298 TFHOP_USER_POLLING_MODE = 7,
299};
300
301enum tfh_status {
302 TFHSTATUS_IDLE,
303 TFHSTATUS_EXCEPTION,
304 TFHSTATUS_ACTIVE,
305};
306
307enum tfh_state {
308 TFHSTATE_INACTIVE,
309 TFHSTATE_IDLE,
310 TFHSTATE_MISS_UPM,
311 TFHSTATE_MISS_FMM,
312 TFHSTATE_HW_ERR,
313 TFHSTATE_WRITE_TLB,
314 TFHSTATE_RESTART_CBR,
315};
316
317/* TFH cause bits */
318enum tfh_cause {
319 TFHCAUSE_NONE,
320 TFHCAUSE_TLB_MISS,
321 TFHCAUSE_TLB_MOD,
322 TFHCAUSE_HW_ERROR_RR,
323 TFHCAUSE_HW_ERROR_MAIN_ARRAY,
324 TFHCAUSE_HW_ERROR_VALID,
325 TFHCAUSE_HW_ERROR_PAGESIZE,
326 TFHCAUSE_INSTRUCTION_EXCEPTION,
327 TFHCAUSE_UNCORRECTIBLE_ERROR,
328};
329
330/* GAA values */
331#define GAA_RAM 0x0
332#define GAA_NCRAM 0x2
333#define GAA_MMIO 0x1
334#define GAA_REGISTER 0x3
335
336/* GRU paddr shift for pfn. (NOTE: shift is NOT by actual pagesize) */
337#define GRU_PADDR_SHIFT 12
338
339/*
340 * Context Configuration handle
341 * Used to allocate resources to a GSEG context.
342 *
343 */
344struct gru_context_configuration_handle {
345 unsigned int cmd:1; /* DW0 */
346 unsigned int delresp:1;
347 unsigned int opc:3;
348 unsigned int unmap_enable:1;
349 unsigned int req_slice_set_enable:1;
350 unsigned int req_slice:2;
351 unsigned int cb_int_enable:1;
352 unsigned int tlb_int_enable:1;
353 unsigned int tfm_fault_bit_enable:1;
354 unsigned int tlb_int_select:4;
355
356 unsigned int status:2;
357 unsigned int state:2;
358 unsigned int reserved2:4;
359
360 unsigned int cause:4;
361 unsigned int tfm_done_bit_enable:1;
362 unsigned int unused:3;
363
364 unsigned int dsr_allocation_map;
365
366 unsigned long cbr_allocation_map; /* DW1 */
367
368 unsigned int asid[8]; /* DW 2 - 5 */
369 unsigned short sizeavail[8]; /* DW 6 - 7 */
370} __attribute__ ((packed));
371
372enum gru_cch_opc {
373 CCHOP_START = 1,
374 CCHOP_ALLOCATE,
375 CCHOP_INTERRUPT,
376 CCHOP_DEALLOCATE,
377 CCHOP_INTERRUPT_SYNC,
378};
379
380enum gru_cch_status {
381 CCHSTATUS_IDLE,
382 CCHSTATUS_EXCEPTION,
383 CCHSTATUS_ACTIVE,
384};
385
386enum gru_cch_state {
387 CCHSTATE_INACTIVE,
388 CCHSTATE_MAPPED,
389 CCHSTATE_ACTIVE,
390 CCHSTATE_INTERRUPTED,
391};
392
393/* CCH Exception cause */
394enum gru_cch_cause {
395 CCHCAUSE_REGION_REGISTER_WRITE_ERROR = 1,
396 CCHCAUSE_ILLEGAL_OPCODE = 2,
397 CCHCAUSE_INVALID_START_REQUEST = 3,
398 CCHCAUSE_INVALID_ALLOCATION_REQUEST = 4,
399 CCHCAUSE_INVALID_DEALLOCATION_REQUEST = 5,
400 CCHCAUSE_INVALID_INTERRUPT_REQUEST = 6,
401 CCHCAUSE_CCH_BUSY = 7,
402 CCHCAUSE_NO_CBRS_TO_ALLOCATE = 8,
403 CCHCAUSE_BAD_TFM_CONFIG = 9,
404 CCHCAUSE_CBR_RESOURCES_OVERSUBSCRIPED = 10,
405 CCHCAUSE_DSR_RESOURCES_OVERSUBSCRIPED = 11,
406 CCHCAUSE_CBR_DEALLOCATION_ERROR = 12,
407};
408/*
409 * CBE - Control Block Extended
410 * Maintains internal GRU state for active CBs.
411 *
412 */
413struct gru_control_block_extended {
414 unsigned int reserved0:1; /* DW 0 - low */
415 unsigned int imacpy:3;
416 unsigned int reserved1:4;
417 unsigned int xtypecpy:3;
418 unsigned int iaa0cpy:2;
419 unsigned int iaa1cpy:2;
420 unsigned int reserved2:1;
421 unsigned int opccpy:8;
422 unsigned int exopccpy:8;
423
424 unsigned int idef2cpy:22; /* DW 0 - high */
425 unsigned int reserved3:10;
426
427 unsigned int idef4cpy:22; /* DW 1 */
428 unsigned int reserved4:10;
429 unsigned int idef4upd:22;
430 unsigned int reserved5:10;
431
432 unsigned long idef1upd:64; /* DW 2 */
433
434 unsigned long idef5cpy:64; /* DW 3 */
435
436 unsigned long idef6cpy:64; /* DW 4 */
437
438 unsigned long idef3upd:64; /* DW 5 */
439
440 unsigned long idef5upd:64; /* DW 6 */
441
442 unsigned int idef2upd:22; /* DW 7 */
443 unsigned int reserved6:10;
444
445 unsigned int ecause:20;
446 unsigned int cbrstate:4;
447 unsigned int cbrexecstatus:8;
448};
449
450enum gru_cbr_state {
451 CBRSTATE_INACTIVE,
452 CBRSTATE_IDLE,
453 CBRSTATE_PE_CHECK,
454 CBRSTATE_QUEUED,
455 CBRSTATE_WAIT_RESPONSE,
456 CBRSTATE_INTERRUPTED,
457 CBRSTATE_INTERRUPTED_MISS_FMM,
458 CBRSTATE_BUSY_INTERRUPT_MISS_FMM,
459 CBRSTATE_INTERRUPTED_MISS_UPM,
460 CBRSTATE_BUSY_INTERRUPTED_MISS_UPM,
461 CBRSTATE_REQUEST_ISSUE,
462 CBRSTATE_BUSY_INTERRUPT,
463};
464
465/* CBE cbrexecstatus bits */
466#define CBR_EXS_ABORT_OCC_BIT 0
467#define CBR_EXS_INT_OCC_BIT 1
468#define CBR_EXS_PENDING_BIT 2
469#define CBR_EXS_QUEUED_BIT 3
470#define CBR_EXS_TLBHW_BIT 4
471#define CBR_EXS_EXCEPTION_BIT 5
472
473#define CBR_EXS_ABORT_OCC (1 << CBR_EXS_ABORT_OCC_BIT)
474#define CBR_EXS_INT_OCC (1 << CBR_EXS_INT_OCC_BIT)
475#define CBR_EXS_PENDING (1 << CBR_EXS_PENDING_BIT)
476#define CBR_EXS_QUEUED (1 << CBR_EXS_QUEUED_BIT)
477#define CBR_EXS_TLBHW (1 << CBR_EXS_TLBHW_BIT)
478#define CBR_EXS_EXCEPTION (1 << CBR_EXS_EXCEPTION_BIT)
479
480/* CBE ecause bits - defined in gru_instructions.h */
481
482/*
483 * Convert a processor pagesize into the strange encoded pagesize used by the
484 * GRU. Processor pagesize is encoded as log of bytes per page. (or PAGE_SHIFT)
485 * pagesize log pagesize grupagesize
486 * 4k 12 0
487 * 16k 14 1
488 * 64k 16 2
489 * 256k 18 3
490 * 1m 20 4
491 * 2m 21 5
492 * 4m 22 6
493 * 16m 24 7
494 * 64m 26 8
495 * ...
496 */
497#define GRU_PAGESIZE(sh) ((((sh) > 20 ? (sh) + 2: (sh)) >> 1) - 6)
498#define GRU_SIZEAVAIL(sh) (1UL << GRU_PAGESIZE(sh))
499
500/* minimum TLB purge count to ensure a full purge */
501#define GRUMAXINVAL 1024UL
502
503
504/* Extract the status field from a kernel handle */
505#define GET_MSEG_HANDLE_STATUS(h) (((*(unsigned long *)(h)) >> 16) & 3)
506
507static inline void start_instruction(void *h)
508{
509 unsigned long *w0 = h;
510
511 wmb(); /* setting CMD bit must be last */
512 *w0 = *w0 | 1;
513 gru_flush_cache(h);
514}
515
516static inline int wait_instruction_complete(void *h)
517{
518 int status;
519
520 do {
521 cpu_relax();
522 barrier();
523 status = GET_MSEG_HANDLE_STATUS(h);
524 } while (status == CCHSTATUS_ACTIVE);
525 return status;
526}
527
528#if defined CONFIG_IA64
529static inline void cch_allocate_set_asids(
530 struct gru_context_configuration_handle *cch, int asidval)
531{
532 int i;
533
534 for (i = 0; i <= RGN_HPAGE; i++) { /* assume HPAGE is last region */
535 cch->asid[i] = (asidval++);
536#if 0
537 /* ZZZ hugepages not supported yet */
538 if (i == RGN_HPAGE)
539 cch->sizeavail[i] = GRU_SIZEAVAIL(hpage_shift);
540 else
541#endif
542 cch->sizeavail[i] = GRU_SIZEAVAIL(PAGE_SHIFT);
543 }
544}
545#elif defined CONFIG_X86_64
546static inline void cch_allocate_set_asids(
547 struct gru_context_configuration_handle *cch, int asidval)
548{
549 int i;
550
551 for (i = 0; i < 8; i++) {
552 cch->asid[i] = asidval++;
553 cch->sizeavail[i] = GRU_SIZEAVAIL(PAGE_SHIFT) |
554 GRU_SIZEAVAIL(21);
555 }
556}
557#endif
558
559static inline int cch_allocate(struct gru_context_configuration_handle *cch,
560 int asidval, unsigned long cbrmap,
561 unsigned long dsrmap)
562{
563 cch_allocate_set_asids(cch, asidval);
564 cch->dsr_allocation_map = dsrmap;
565 cch->cbr_allocation_map = cbrmap;
566 cch->opc = CCHOP_ALLOCATE;
567 start_instruction(cch);
568 return wait_instruction_complete(cch);
569}
570
571static inline int cch_start(struct gru_context_configuration_handle *cch)
572{
573 cch->opc = CCHOP_START;
574 start_instruction(cch);
575 return wait_instruction_complete(cch);
576}
577
578static inline int cch_interrupt(struct gru_context_configuration_handle *cch)
579{
580 cch->opc = CCHOP_INTERRUPT;
581 start_instruction(cch);
582 return wait_instruction_complete(cch);
583}
584
585static inline int cch_deallocate(struct gru_context_configuration_handle *cch)
586{
587 cch->opc = CCHOP_DEALLOCATE;
588 start_instruction(cch);
589 return wait_instruction_complete(cch);
590}
591
592static inline int cch_interrupt_sync(struct gru_context_configuration_handle
593 *cch)
594{
595 cch->opc = CCHOP_INTERRUPT_SYNC;
596 start_instruction(cch);
597 return wait_instruction_complete(cch);
598}
599
600static inline int tgh_invalidate(struct gru_tlb_global_handle *tgh,
601 unsigned long vaddr, unsigned long vaddrmask,
602 int asid, int pagesize, int global, int n,
603 unsigned short ctxbitmap)
604{
605 tgh->vaddr = vaddr;
606 tgh->asid = asid;
607 tgh->pagesize = pagesize;
608 tgh->n = n;
609 tgh->global = global;
610 tgh->vaddrmask = vaddrmask;
611 tgh->ctxbitmap = ctxbitmap;
612 tgh->opc = TGHOP_TLBINV;
613 start_instruction(tgh);
614 return wait_instruction_complete(tgh);
615}
616
617static inline void tfh_write_only(struct gru_tlb_fault_handle *tfh,
618 unsigned long pfn, unsigned long vaddr,
619 int asid, int dirty, int pagesize)
620{
621 tfh->fillasid = asid;
622 tfh->fillvaddr = vaddr;
623 tfh->pfn = pfn;
624 tfh->dirty = dirty;
625 tfh->pagesize = pagesize;
626 tfh->opc = TFHOP_WRITE_ONLY;
627 start_instruction(tfh);
628}
629
630static inline void tfh_write_restart(struct gru_tlb_fault_handle *tfh,
631 unsigned long paddr, int gaa,
632 unsigned long vaddr, int asid, int dirty,
633 int pagesize)
634{
635 tfh->fillasid = asid;
636 tfh->fillvaddr = vaddr;
637 tfh->pfn = paddr >> GRU_PADDR_SHIFT;
638 tfh->gaa = gaa;
639 tfh->dirty = dirty;
640 tfh->pagesize = pagesize;
641 tfh->opc = TFHOP_WRITE_RESTART;
642 start_instruction(tfh);
643}
644
645static inline void tfh_restart(struct gru_tlb_fault_handle *tfh)
646{
647 tfh->opc = TFHOP_RESTART;
648 start_instruction(tfh);
649}
650
651static inline void tfh_user_polling_mode(struct gru_tlb_fault_handle *tfh)
652{
653 tfh->opc = TFHOP_USER_POLLING_MODE;
654 start_instruction(tfh);
655}
656
657static inline void tfh_exception(struct gru_tlb_fault_handle *tfh)
658{
659 tfh->opc = TFHOP_EXCEPTION;
660 start_instruction(tfh);
661}
662
663#endif /* __GRUHANDLES_H__ */
diff --git a/drivers/misc/sgi-gru/grukservices.c b/drivers/misc/sgi-gru/grukservices.c
new file mode 100644
index 000000000000..dfd49af0fe18
--- /dev/null
+++ b/drivers/misc/sgi-gru/grukservices.c
@@ -0,0 +1,679 @@
1/*
2 * SN Platform GRU Driver
3 *
4 * KERNEL SERVICES THAT USE THE GRU
5 *
6 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23#include <linux/kernel.h>
24#include <linux/errno.h>
25#include <linux/slab.h>
26#include <linux/mm.h>
27#include <linux/smp_lock.h>
28#include <linux/spinlock.h>
29#include <linux/device.h>
30#include <linux/miscdevice.h>
31#include <linux/proc_fs.h>
32#include <linux/interrupt.h>
33#include <linux/uaccess.h>
34#include "gru.h"
35#include "grulib.h"
36#include "grutables.h"
37#include "grukservices.h"
38#include "gru_instructions.h"
39#include <asm/uv/uv_hub.h>
40
41/*
42 * Kernel GRU Usage
43 *
44 * The following is an interim algorithm for management of kernel GRU
45 * resources. This will likely be replaced when we better understand the
46 * kernel/user requirements.
47 *
48 * At boot time, the kernel permanently reserves a fixed number of
49 * CBRs/DSRs for each cpu to use. The resources are all taken from
50 * the GRU chiplet 1 on the blade. This leaves the full set of resources
51 * of chiplet 0 available to be allocated to a single user.
52 */
53
54/* Blade percpu resources PERMANENTLY reserved for kernel use */
55#define GRU_NUM_KERNEL_CBR 1
56#define GRU_NUM_KERNEL_DSR_BYTES 256
57#define KERNEL_CTXNUM 15
58
59/* GRU instruction attributes for all instructions */
60#define IMA IMA_CB_DELAY
61
62/* GRU cacheline size is always 64 bytes - even on arches with 128 byte lines */
63#define __gru_cacheline_aligned__ \
64 __attribute__((__aligned__(GRU_CACHE_LINE_BYTES)))
65
66#define MAGIC 0x1234567887654321UL
67
68/* Default retry count for GRU errors on kernel instructions */
69#define EXCEPTION_RETRY_LIMIT 3
70
71/* Status of message queue sections */
72#define MQS_EMPTY 0
73#define MQS_FULL 1
74#define MQS_NOOP 2
75
76/*----------------- RESOURCE MANAGEMENT -------------------------------------*/
77/* optimized for x86_64 */
78struct message_queue {
79 union gru_mesqhead head __gru_cacheline_aligned__; /* CL 0 */
80 int qlines; /* DW 1 */
81 long hstatus[2];
82 void *next __gru_cacheline_aligned__;/* CL 1 */
83 void *limit;
84 void *start;
85 void *start2;
86 char data ____cacheline_aligned; /* CL 2 */
87};
88
89/* First word in every message - used by mesq interface */
90struct message_header {
91 char present;
92 char present2;
93 char lines;
94 char fill;
95};
96
97#define QLINES(mq) ((mq) + offsetof(struct message_queue, qlines))
98#define HSTATUS(mq, h) ((mq) + offsetof(struct message_queue, hstatus[h]))
99
100static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr)
101{
102 struct gru_blade_state *bs;
103 int lcpu;
104
105 BUG_ON(dsr_bytes > GRU_NUM_KERNEL_DSR_BYTES);
106 preempt_disable();
107 bs = gru_base[uv_numa_blade_id()];
108 lcpu = uv_blade_processor_id();
109 *cb = bs->kernel_cb + lcpu * GRU_HANDLE_STRIDE;
110 *dsr = bs->kernel_dsr + lcpu * GRU_NUM_KERNEL_DSR_BYTES;
111 return 0;
112}
113
114static void gru_free_cpu_resources(void *cb, void *dsr)
115{
116 preempt_enable();
117}
118
119int gru_get_cb_exception_detail(void *cb,
120 struct control_block_extended_exc_detail *excdet)
121{
122 struct gru_control_block_extended *cbe;
123
124 cbe = get_cbe(GRUBASE(cb), get_cb_number(cb));
125 excdet->opc = cbe->opccpy;
126 excdet->exopc = cbe->exopccpy;
127 excdet->ecause = cbe->ecause;
128 excdet->exceptdet0 = cbe->idef1upd;
129 excdet->exceptdet1 = cbe->idef3upd;
130 return 0;
131}
132
133char *gru_get_cb_exception_detail_str(int ret, void *cb,
134 char *buf, int size)
135{
136 struct gru_control_block_status *gen = (void *)cb;
137 struct control_block_extended_exc_detail excdet;
138
139 if (ret > 0 && gen->istatus == CBS_EXCEPTION) {
140 gru_get_cb_exception_detail(cb, &excdet);
141 snprintf(buf, size,
142 "GRU exception: cb %p, opc %d, exopc %d, ecause 0x%x,"
143 "excdet0 0x%lx, excdet1 0x%x",
144 gen, excdet.opc, excdet.exopc, excdet.ecause,
145 excdet.exceptdet0, excdet.exceptdet1);
146 } else {
147 snprintf(buf, size, "No exception");
148 }
149 return buf;
150}
151
152static int gru_wait_idle_or_exception(struct gru_control_block_status *gen)
153{
154 while (gen->istatus >= CBS_ACTIVE) {
155 cpu_relax();
156 barrier();
157 }
158 return gen->istatus;
159}
160
161static int gru_retry_exception(void *cb)
162{
163 struct gru_control_block_status *gen = (void *)cb;
164 struct control_block_extended_exc_detail excdet;
165 int retry = EXCEPTION_RETRY_LIMIT;
166
167 while (1) {
168 if (gru_get_cb_message_queue_substatus(cb))
169 break;
170 if (gru_wait_idle_or_exception(gen) == CBS_IDLE)
171 return CBS_IDLE;
172
173 gru_get_cb_exception_detail(cb, &excdet);
174 if (excdet.ecause & ~EXCEPTION_RETRY_BITS)
175 break;
176 if (retry-- == 0)
177 break;
178 gen->icmd = 1;
179 gru_flush_cache(gen);
180 }
181 return CBS_EXCEPTION;
182}
183
184int gru_check_status_proc(void *cb)
185{
186 struct gru_control_block_status *gen = (void *)cb;
187 int ret;
188
189 ret = gen->istatus;
190 if (ret != CBS_EXCEPTION)
191 return ret;
192 return gru_retry_exception(cb);
193
194}
195
196int gru_wait_proc(void *cb)
197{
198 struct gru_control_block_status *gen = (void *)cb;
199 int ret;
200
201 ret = gru_wait_idle_or_exception(gen);
202 if (ret == CBS_EXCEPTION)
203 ret = gru_retry_exception(cb);
204
205 return ret;
206}
207
208void gru_abort(int ret, void *cb, char *str)
209{
210 char buf[GRU_EXC_STR_SIZE];
211
212 panic("GRU FATAL ERROR: %s - %s\n", str,
213 gru_get_cb_exception_detail_str(ret, cb, buf, sizeof(buf)));
214}
215
216void gru_wait_abort_proc(void *cb)
217{
218 int ret;
219
220 ret = gru_wait_proc(cb);
221 if (ret)
222 gru_abort(ret, cb, "gru_wait_abort");
223}
224
225
226/*------------------------------ MESSAGE QUEUES -----------------------------*/
227
228/* Internal status . These are NOT returned to the user. */
229#define MQIE_AGAIN -1 /* try again */
230
231
232/*
233 * Save/restore the "present" flag that is in the second line of 2-line
234 * messages
235 */
236static inline int get_present2(void *p)
237{
238 struct message_header *mhdr = p + GRU_CACHE_LINE_BYTES;
239 return mhdr->present;
240}
241
242static inline void restore_present2(void *p, int val)
243{
244 struct message_header *mhdr = p + GRU_CACHE_LINE_BYTES;
245 mhdr->present = val;
246}
247
248/*
249 * Create a message queue.
250 * qlines - message queue size in cache lines. Includes 2-line header.
251 */
252int gru_create_message_queue(void *p, unsigned int bytes)
253{
254 struct message_queue *mq = p;
255 unsigned int qlines;
256
257 qlines = bytes / GRU_CACHE_LINE_BYTES - 2;
258 memset(mq, 0, bytes);
259 mq->start = &mq->data;
260 mq->start2 = &mq->data + (qlines / 2 - 1) * GRU_CACHE_LINE_BYTES;
261 mq->next = &mq->data;
262 mq->limit = &mq->data + (qlines - 2) * GRU_CACHE_LINE_BYTES;
263 mq->qlines = qlines;
264 mq->hstatus[0] = 0;
265 mq->hstatus[1] = 1;
266 mq->head = gru_mesq_head(2, qlines / 2 + 1);
267 return 0;
268}
269EXPORT_SYMBOL_GPL(gru_create_message_queue);
270
271/*
272 * Send a NOOP message to a message queue
273 * Returns:
274 * 0 - if queue is full after the send. This is the normal case
275 * but various races can change this.
276 * -1 - if mesq sent successfully but queue not full
277 * >0 - unexpected error. MQE_xxx returned
278 */
279static int send_noop_message(void *cb,
280 unsigned long mq, void *mesg)
281{
282 const struct message_header noop_header = {
283 .present = MQS_NOOP, .lines = 1};
284 unsigned long m;
285 int substatus, ret;
286 struct message_header save_mhdr, *mhdr = mesg;
287
288 STAT(mesq_noop);
289 save_mhdr = *mhdr;
290 *mhdr = noop_header;
291 gru_mesq(cb, mq, gru_get_tri(mhdr), 1, IMA);
292 ret = gru_wait(cb);
293
294 if (ret) {
295 substatus = gru_get_cb_message_queue_substatus(cb);
296 switch (substatus) {
297 case CBSS_NO_ERROR:
298 STAT(mesq_noop_unexpected_error);
299 ret = MQE_UNEXPECTED_CB_ERR;
300 break;
301 case CBSS_LB_OVERFLOWED:
302 STAT(mesq_noop_lb_overflow);
303 ret = MQE_CONGESTION;
304 break;
305 case CBSS_QLIMIT_REACHED:
306 STAT(mesq_noop_qlimit_reached);
307 ret = 0;
308 break;
309 case CBSS_AMO_NACKED:
310 STAT(mesq_noop_amo_nacked);
311 ret = MQE_CONGESTION;
312 break;
313 case CBSS_PUT_NACKED:
314 STAT(mesq_noop_put_nacked);
315 m = mq + (gru_get_amo_value_head(cb) << 6);
316 gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, 1, 1,
317 IMA);
318 if (gru_wait(cb) == CBS_IDLE)
319 ret = MQIE_AGAIN;
320 else
321 ret = MQE_UNEXPECTED_CB_ERR;
322 break;
323 case CBSS_PAGE_OVERFLOW:
324 default:
325 BUG();
326 }
327 }
328 *mhdr = save_mhdr;
329 return ret;
330}
331
332/*
333 * Handle a gru_mesq full.
334 */
335static int send_message_queue_full(void *cb,
336 unsigned long mq, void *mesg, int lines)
337{
338 union gru_mesqhead mqh;
339 unsigned int limit, head;
340 unsigned long avalue;
341 int half, qlines, save;
342
343 /* Determine if switching to first/second half of q */
344 avalue = gru_get_amo_value(cb);
345 head = gru_get_amo_value_head(cb);
346 limit = gru_get_amo_value_limit(cb);
347
348 /*
349 * Fetch "qlines" from the queue header. Since the queue may be
350 * in memory that can't be accessed using socket addresses, use
351 * the GRU to access the data. Use DSR space from the message.
352 */
353 save = *(int *)mesg;
354 gru_vload(cb, QLINES(mq), gru_get_tri(mesg), XTYPE_W, 1, 1, IMA);
355 if (gru_wait(cb) != CBS_IDLE)
356 goto cberr;
357 qlines = *(int *)mesg;
358 *(int *)mesg = save;
359 half = (limit != qlines);
360
361 if (half)
362 mqh = gru_mesq_head(qlines / 2 + 1, qlines);
363 else
364 mqh = gru_mesq_head(2, qlines / 2 + 1);
365
366 /* Try to get lock for switching head pointer */
367 gru_gamir(cb, EOP_IR_CLR, HSTATUS(mq, half), XTYPE_DW, IMA);
368 if (gru_wait(cb) != CBS_IDLE)
369 goto cberr;
370 if (!gru_get_amo_value(cb)) {
371 STAT(mesq_qf_locked);
372 return MQE_QUEUE_FULL;
373 }
374
375 /* Got the lock. Send optional NOP if queue not full, */
376 if (head != limit) {
377 if (send_noop_message(cb, mq, mesg)) {
378 gru_gamir(cb, EOP_IR_INC, HSTATUS(mq, half),
379 XTYPE_DW, IMA);
380 if (gru_wait(cb) != CBS_IDLE)
381 goto cberr;
382 STAT(mesq_qf_noop_not_full);
383 return MQIE_AGAIN;
384 }
385 avalue++;
386 }
387
388 /* Then flip queuehead to other half of queue. */
389 gru_gamer(cb, EOP_ERR_CSWAP, mq, XTYPE_DW, mqh.val, avalue, IMA);
390 if (gru_wait(cb) != CBS_IDLE)
391 goto cberr;
392
393 /* If not successfully in swapping queue head, clear the hstatus lock */
394 if (gru_get_amo_value(cb) != avalue) {
395 STAT(mesq_qf_switch_head_failed);
396 gru_gamir(cb, EOP_IR_INC, HSTATUS(mq, half), XTYPE_DW, IMA);
397 if (gru_wait(cb) != CBS_IDLE)
398 goto cberr;
399 }
400 return MQIE_AGAIN;
401cberr:
402 STAT(mesq_qf_unexpected_error);
403 return MQE_UNEXPECTED_CB_ERR;
404}
405
406
407/*
408 * Handle a gru_mesq failure. Some of these failures are software recoverable
409 * or retryable.
410 */
411static int send_message_failure(void *cb,
412 unsigned long mq,
413 void *mesg,
414 int lines)
415{
416 int substatus, ret = 0;
417 unsigned long m;
418
419 substatus = gru_get_cb_message_queue_substatus(cb);
420 switch (substatus) {
421 case CBSS_NO_ERROR:
422 STAT(mesq_send_unexpected_error);
423 ret = MQE_UNEXPECTED_CB_ERR;
424 break;
425 case CBSS_LB_OVERFLOWED:
426 STAT(mesq_send_lb_overflow);
427 ret = MQE_CONGESTION;
428 break;
429 case CBSS_QLIMIT_REACHED:
430 STAT(mesq_send_qlimit_reached);
431 ret = send_message_queue_full(cb, mq, mesg, lines);
432 break;
433 case CBSS_AMO_NACKED:
434 STAT(mesq_send_amo_nacked);
435 ret = MQE_CONGESTION;
436 break;
437 case CBSS_PUT_NACKED:
438 STAT(mesq_send_put_nacked);
439 m =mq + (gru_get_amo_value_head(cb) << 6);
440 gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, lines, 1, IMA);
441 if (gru_wait(cb) == CBS_IDLE)
442 ret = MQE_OK;
443 else
444 ret = MQE_UNEXPECTED_CB_ERR;
445 break;
446 default:
447 BUG();
448 }
449 return ret;
450}
451
452/*
453 * Send a message to a message queue
454 * cb GRU control block to use to send message
455 * mq message queue
456 * mesg message. ust be vaddr within a GSEG
457 * bytes message size (<= 2 CL)
458 */
459int gru_send_message_gpa(unsigned long mq, void *mesg, unsigned int bytes)
460{
461 struct message_header *mhdr;
462 void *cb;
463 void *dsr;
464 int istatus, clines, ret;
465
466 STAT(mesq_send);
467 BUG_ON(bytes < sizeof(int) || bytes > 2 * GRU_CACHE_LINE_BYTES);
468
469 clines = (bytes + GRU_CACHE_LINE_BYTES - 1) / GRU_CACHE_LINE_BYTES;
470 if (gru_get_cpu_resources(bytes, &cb, &dsr))
471 return MQE_BUG_NO_RESOURCES;
472 memcpy(dsr, mesg, bytes);
473 mhdr = dsr;
474 mhdr->present = MQS_FULL;
475 mhdr->lines = clines;
476 if (clines == 2) {
477 mhdr->present2 = get_present2(mhdr);
478 restore_present2(mhdr, MQS_FULL);
479 }
480
481 do {
482 ret = MQE_OK;
483 gru_mesq(cb, mq, gru_get_tri(mhdr), clines, IMA);
484 istatus = gru_wait(cb);
485 if (istatus != CBS_IDLE)
486 ret = send_message_failure(cb, mq, dsr, clines);
487 } while (ret == MQIE_AGAIN);
488 gru_free_cpu_resources(cb, dsr);
489
490 if (ret)
491 STAT(mesq_send_failed);
492 return ret;
493}
494EXPORT_SYMBOL_GPL(gru_send_message_gpa);
495
496/*
497 * Advance the receive pointer for the queue to the next message.
498 */
499void gru_free_message(void *rmq, void *mesg)
500{
501 struct message_queue *mq = rmq;
502 struct message_header *mhdr = mq->next;
503 void *next, *pnext;
504 int half = -1;
505 int lines = mhdr->lines;
506
507 if (lines == 2)
508 restore_present2(mhdr, MQS_EMPTY);
509 mhdr->present = MQS_EMPTY;
510
511 pnext = mq->next;
512 next = pnext + GRU_CACHE_LINE_BYTES * lines;
513 if (next == mq->limit) {
514 next = mq->start;
515 half = 1;
516 } else if (pnext < mq->start2 && next >= mq->start2) {
517 half = 0;
518 }
519
520 if (half >= 0)
521 mq->hstatus[half] = 1;
522 mq->next = next;
523}
524EXPORT_SYMBOL_GPL(gru_free_message);
525
526/*
527 * Get next message from message queue. Return NULL if no message
528 * present. User must call next_message() to move to next message.
529 * rmq message queue
530 */
531void *gru_get_next_message(void *rmq)
532{
533 struct message_queue *mq = rmq;
534 struct message_header *mhdr = mq->next;
535 int present = mhdr->present;
536
537 /* skip NOOP messages */
538 STAT(mesq_receive);
539 while (present == MQS_NOOP) {
540 gru_free_message(rmq, mhdr);
541 mhdr = mq->next;
542 present = mhdr->present;
543 }
544
545 /* Wait for both halves of 2 line messages */
546 if (present == MQS_FULL && mhdr->lines == 2 &&
547 get_present2(mhdr) == MQS_EMPTY)
548 present = MQS_EMPTY;
549
550 if (!present) {
551 STAT(mesq_receive_none);
552 return NULL;
553 }
554
555 if (mhdr->lines == 2)
556 restore_present2(mhdr, mhdr->present2);
557
558 return mhdr;
559}
560EXPORT_SYMBOL_GPL(gru_get_next_message);
561
562/* ---------------------- GRU DATA COPY FUNCTIONS ---------------------------*/
563
564/*
565 * Copy a block of data using the GRU resources
566 */
567int gru_copy_gpa(unsigned long dest_gpa, unsigned long src_gpa,
568 unsigned int bytes)
569{
570 void *cb;
571 void *dsr;
572 int ret;
573
574 STAT(copy_gpa);
575 if (gru_get_cpu_resources(GRU_NUM_KERNEL_DSR_BYTES, &cb, &dsr))
576 return MQE_BUG_NO_RESOURCES;
577 gru_bcopy(cb, src_gpa, dest_gpa, gru_get_tri(dsr),
578 XTYPE_B, bytes, GRU_NUM_KERNEL_DSR_BYTES, IMA);
579 ret = gru_wait(cb);
580 gru_free_cpu_resources(cb, dsr);
581 return ret;
582}
583EXPORT_SYMBOL_GPL(gru_copy_gpa);
584
585/* ------------------- KERNEL QUICKTESTS RUN AT STARTUP ----------------*/
586/* Temp - will delete after we gain confidence in the GRU */
587static __cacheline_aligned unsigned long word0;
588static __cacheline_aligned unsigned long word1;
589
590static int quicktest(struct gru_state *gru)
591{
592 void *cb;
593 void *ds;
594 unsigned long *p;
595
596 cb = get_gseg_base_address_cb(gru->gs_gru_base_vaddr, KERNEL_CTXNUM, 0);
597 ds = get_gseg_base_address_ds(gru->gs_gru_base_vaddr, KERNEL_CTXNUM, 0);
598 p = ds;
599 word0 = MAGIC;
600
601 gru_vload(cb, uv_gpa(&word0), 0, XTYPE_DW, 1, 1, IMA);
602 if (gru_wait(cb) != CBS_IDLE)
603 BUG();
604
605 if (*(unsigned long *)ds != MAGIC)
606 BUG();
607 gru_vstore(cb, uv_gpa(&word1), 0, XTYPE_DW, 1, 1, IMA);
608 if (gru_wait(cb) != CBS_IDLE)
609 BUG();
610
611 if (word0 != word1 || word0 != MAGIC) {
612 printk
613 ("GRU quicktest err: gru %d, found 0x%lx, expected 0x%lx\n",
614 gru->gs_gid, word1, MAGIC);
615 BUG(); /* ZZZ should not be fatal */
616 }
617
618 return 0;
619}
620
621
622int gru_kservices_init(struct gru_state *gru)
623{
624 struct gru_blade_state *bs;
625 struct gru_context_configuration_handle *cch;
626 unsigned long cbr_map, dsr_map;
627 int err, num, cpus_possible;
628
629 /*
630 * Currently, resources are reserved ONLY on the second chiplet
631 * on each blade. This leaves ALL resources on chiplet 0 available
632 * for user code.
633 */
634 bs = gru->gs_blade;
635 if (gru != &bs->bs_grus[1])
636 return 0;
637
638 cpus_possible = uv_blade_nr_possible_cpus(gru->gs_blade_id);
639
640 num = GRU_NUM_KERNEL_CBR * cpus_possible;
641 cbr_map = gru_reserve_cb_resources(gru, GRU_CB_COUNT_TO_AU(num), NULL);
642 gru->gs_reserved_cbrs += num;
643
644 num = GRU_NUM_KERNEL_DSR_BYTES * cpus_possible;
645 dsr_map = gru_reserve_ds_resources(gru, GRU_DS_BYTES_TO_AU(num), NULL);
646 gru->gs_reserved_dsr_bytes += num;
647
648 gru->gs_active_contexts++;
649 __set_bit(KERNEL_CTXNUM, &gru->gs_context_map);
650 cch = get_cch(gru->gs_gru_base_vaddr, KERNEL_CTXNUM);
651
652 bs->kernel_cb = get_gseg_base_address_cb(gru->gs_gru_base_vaddr,
653 KERNEL_CTXNUM, 0);
654 bs->kernel_dsr = get_gseg_base_address_ds(gru->gs_gru_base_vaddr,
655 KERNEL_CTXNUM, 0);
656
657 lock_cch_handle(cch);
658 cch->tfm_fault_bit_enable = 0;
659 cch->tlb_int_enable = 0;
660 cch->tfm_done_bit_enable = 0;
661 cch->unmap_enable = 1;
662 err = cch_allocate(cch, 0, cbr_map, dsr_map);
663 if (err) {
664 gru_dbg(grudev,
665 "Unable to allocate kernel CCH: gru %d, err %d\n",
666 gru->gs_gid, err);
667 BUG();
668 }
669 if (cch_start(cch)) {
670 gru_dbg(grudev, "Unable to start kernel CCH: gru %d, err %d\n",
671 gru->gs_gid, err);
672 BUG();
673 }
674 unlock_cch_handle(cch);
675
676 if (gru_options & GRU_QUICKLOOK)
677 quicktest(gru);
678 return 0;
679}
diff --git a/drivers/misc/sgi-gru/grukservices.h b/drivers/misc/sgi-gru/grukservices.h
new file mode 100644
index 000000000000..eb17e0a3ac61
--- /dev/null
+++ b/drivers/misc/sgi-gru/grukservices.h
@@ -0,0 +1,134 @@
1
2/*
3 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19#ifndef __GRU_KSERVICES_H_
20#define __GRU_KSERVICES_H_
21
22
23/*
24 * Message queues using the GRU to send/receive messages.
25 *
26 * These function allow the user to create a message queue for
27 * sending/receiving 1 or 2 cacheline messages using the GRU.
28 *
29 * Processes SENDING messages will use a kernel CBR/DSR to send
30 * the message. This is transparent to the caller.
31 *
32 * The receiver does not use any GRU resources.
33 *
34 * The functions support:
35 * - single receiver
36 * - multiple senders
37 * - cross partition message
38 *
39 * Missing features ZZZ:
40 * - user options for dealing with timeouts, queue full, etc.
41 * - gru_create_message_queue() needs interrupt vector info
42 */
43
44/*
45 * Initialize a user allocated chunk of memory to be used as
46 * a message queue. The caller must ensure that the queue is
47 * in contiguous physical memory and is cacheline aligned.
48 *
49 * Message queue size is the total number of bytes allocated
50 * to the queue including a 2 cacheline header that is used
51 * to manage the queue.
52 *
53 * Input:
54 * p pointer to user allocated memory.
55 * bytes size of message queue in bytes
56 *
57 * Errors:
58 * 0 OK
59 * >0 error
60 */
61extern int gru_create_message_queue(void *p, unsigned int bytes);
62
63/*
64 * Send a message to a message queue.
65 *
66 * Note: The message queue transport mechanism uses the first 32
67 * bits of the message. Users should avoid using these bits.
68 *
69 *
70 * Input:
71 * xmq message queue - must be a UV global physical address
72 * mesg pointer to message. Must be 64-bit aligned
73 * bytes size of message in bytes
74 *
75 * Output:
76 * 0 message sent
77 * >0 Send failure - see error codes below
78 *
79 */
80extern int gru_send_message_gpa(unsigned long mq_gpa, void *mesg,
81 unsigned int bytes);
82
83/* Status values for gru_send_message() */
84#define MQE_OK 0 /* message sent successfully */
85#define MQE_CONGESTION 1 /* temporary congestion, try again */
86#define MQE_QUEUE_FULL 2 /* queue is full */
87#define MQE_UNEXPECTED_CB_ERR 3 /* unexpected CB error */
88#define MQE_PAGE_OVERFLOW 10 /* BUG - queue overflowed a page */
89#define MQE_BUG_NO_RESOURCES 11 /* BUG - could not alloc GRU cb/dsr */
90
91/*
92 * Advance the receive pointer for the message queue to the next message.
93 * Note: current API requires messages to be gotten & freed in order. Future
94 * API extensions may allow for out-of-order freeing.
95 *
96 * Input
97 * mq message queue
98 * mesq message being freed
99 */
100extern void gru_free_message(void *mq, void *mesq);
101
102/*
103 * Get next message from message queue. Returns pointer to
104 * message OR NULL if no message present.
105 * User must call gru_free_message() after message is processed
106 * in order to move the queue pointers to next message.
107 *
108 * Input
109 * mq message queue
110 *
111 * Output:
112 * p pointer to message
113 * NULL no message available
114 */
115extern void *gru_get_next_message(void *mq);
116
117
118/*
119 * Copy data using the GRU. Source or destination can be located in a remote
120 * partition.
121 *
122 * Input:
123 * dest_gpa destination global physical address
124 * src_gpa source global physical address
125 * bytes number of bytes to copy
126 *
127 * Output:
128 * 0 OK
129 * >0 error
130 */
131extern int gru_copy_gpa(unsigned long dest_gpa, unsigned long src_gpa,
132 unsigned int bytes);
133
134#endif /* __GRU_KSERVICES_H_ */
diff --git a/drivers/misc/sgi-gru/grulib.h b/drivers/misc/sgi-gru/grulib.h
new file mode 100644
index 000000000000..e56e196a6998
--- /dev/null
+++ b/drivers/misc/sgi-gru/grulib.h
@@ -0,0 +1,97 @@
1/*
2 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU Lesser General Public License as published by
6 * the Free Software Foundation; either version 2.1 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU Lesser General Public License for more details.
13 *
14 * You should have received a copy of the GNU Lesser General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#ifndef __GRULIB_H__
20#define __GRULIB_H__
21
22#define GRU_BASENAME "gru"
23#define GRU_FULLNAME "/dev/gru"
24#define GRU_IOCTL_NUM 'G'
25
26/*
27 * Maximum number of GRU segments that a user can have open
28 * ZZZ temp - set high for testing. Revisit.
29 */
30#define GRU_MAX_OPEN_CONTEXTS 32
31
32/* Set Number of Request Blocks */
33#define GRU_CREATE_CONTEXT _IOWR(GRU_IOCTL_NUM, 1, void *)
34
35/* Register task as using the slice */
36#define GRU_SET_TASK_SLICE _IOWR(GRU_IOCTL_NUM, 5, void *)
37
38/* Fetch exception detail */
39#define GRU_USER_GET_EXCEPTION_DETAIL _IOWR(GRU_IOCTL_NUM, 6, void *)
40
41/* For user call_os handling - normally a TLB fault */
42#define GRU_USER_CALL_OS _IOWR(GRU_IOCTL_NUM, 8, void *)
43
44/* For user unload context */
45#define GRU_USER_UNLOAD_CONTEXT _IOWR(GRU_IOCTL_NUM, 9, void *)
46
47/* For fetching GRU chiplet status */
48#define GRU_GET_CHIPLET_STATUS _IOWR(GRU_IOCTL_NUM, 10, void *)
49
50/* For user TLB flushing (primarily for tests) */
51#define GRU_USER_FLUSH_TLB _IOWR(GRU_IOCTL_NUM, 50, void *)
52
53/* Get some config options (primarily for tests & emulator) */
54#define GRU_GET_CONFIG_INFO _IOWR(GRU_IOCTL_NUM, 51, void *)
55
56#define CONTEXT_WINDOW_BYTES(th) (GRU_GSEG_PAGESIZE * (th))
57#define THREAD_POINTER(p, th) (p + GRU_GSEG_PAGESIZE * (th))
58
59/*
60 * Structure used to pass TLB flush parameters to the driver
61 */
62struct gru_create_context_req {
63 unsigned long gseg;
64 unsigned int data_segment_bytes;
65 unsigned int control_blocks;
66 unsigned int maximum_thread_count;
67 unsigned int options;
68};
69
70/*
71 * Structure used to pass unload context parameters to the driver
72 */
73struct gru_unload_context_req {
74 unsigned long gseg;
75};
76
77/*
78 * Structure used to pass TLB flush parameters to the driver
79 */
80struct gru_flush_tlb_req {
81 unsigned long gseg;
82 unsigned long vaddr;
83 size_t len;
84};
85
86/*
87 * GRU configuration info (temp - for testing)
88 */
89struct gru_config_info {
90 int cpus;
91 int blades;
92 int nodes;
93 int chiplets;
94 int fill[16];
95};
96
97#endif /* __GRULIB_H__ */
diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c
new file mode 100644
index 000000000000..0eeb8dddd2f5
--- /dev/null
+++ b/drivers/misc/sgi-gru/grumain.c
@@ -0,0 +1,802 @@
1/*
2 * SN Platform GRU Driver
3 *
4 * DRIVER TABLE MANAGER + GRU CONTEXT LOAD/UNLOAD
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
11 */
12
13#include <linux/kernel.h>
14#include <linux/slab.h>
15#include <linux/mm.h>
16#include <linux/spinlock.h>
17#include <linux/sched.h>
18#include <linux/device.h>
19#include <linux/list.h>
20#include <asm/uv/uv_hub.h>
21#include "gru.h"
22#include "grutables.h"
23#include "gruhandles.h"
24
25unsigned long gru_options __read_mostly;
26
27static struct device_driver gru_driver = {
28 .name = "gru"
29};
30
31static struct device gru_device = {
32 .bus_id = {0},
33 .driver = &gru_driver,
34};
35
36struct device *grudev = &gru_device;
37
38/*
39 * Select a gru fault map to be used by the current cpu. Note that
40 * multiple cpus may be using the same map.
41 * ZZZ should "shift" be used?? Depends on HT cpu numbering
42 * ZZZ should be inline but did not work on emulator
43 */
44int gru_cpu_fault_map_id(void)
45{
46 return uv_blade_processor_id() % GRU_NUM_TFM;
47}
48
49/*--------- ASID Management -------------------------------------------
50 *
51 * Initially, assign asids sequentially from MIN_ASID .. MAX_ASID.
52 * Once MAX is reached, flush the TLB & start over. However,
53 * some asids may still be in use. There won't be many (percentage wise) still
54 * in use. Search active contexts & determine the value of the first
55 * asid in use ("x"s below). Set "limit" to this value.
56 * This defines a block of assignable asids.
57 *
58 * When "limit" is reached, search forward from limit+1 and determine the
59 * next block of assignable asids.
60 *
61 * Repeat until MAX_ASID is reached, then start over again.
62 *
63 * Each time MAX_ASID is reached, increment the asid generation. Since
64 * the search for in-use asids only checks contexts with GRUs currently
65 * assigned, asids in some contexts will be missed. Prior to loading
66 * a context, the asid generation of the GTS asid is rechecked. If it
67 * doesn't match the current generation, a new asid will be assigned.
68 *
69 * 0---------------x------------x---------------------x----|
70 * ^-next ^-limit ^-MAX_ASID
71 *
72 * All asid manipulation & context loading/unloading is protected by the
73 * gs_lock.
74 */
75
76/* Hit the asid limit. Start over */
77static int gru_wrap_asid(struct gru_state *gru)
78{
79 gru_dbg(grudev, "gru %p\n", gru);
80 STAT(asid_wrap);
81 gru->gs_asid_gen++;
82 gru_flush_all_tlb(gru);
83 return MIN_ASID;
84}
85
86/* Find the next chunk of unused asids */
87static int gru_reset_asid_limit(struct gru_state *gru, int asid)
88{
89 int i, gid, inuse_asid, limit;
90
91 gru_dbg(grudev, "gru %p, asid 0x%x\n", gru, asid);
92 STAT(asid_next);
93 limit = MAX_ASID;
94 if (asid >= limit)
95 asid = gru_wrap_asid(gru);
96 gid = gru->gs_gid;
97again:
98 for (i = 0; i < GRU_NUM_CCH; i++) {
99 if (!gru->gs_gts[i])
100 continue;
101 inuse_asid = gru->gs_gts[i]->ts_gms->ms_asids[gid].mt_asid;
102 gru_dbg(grudev, "gru %p, inuse_asid 0x%x, cxtnum %d, gts %p\n",
103 gru, inuse_asid, i, gru->gs_gts[i]);
104 if (inuse_asid == asid) {
105 asid += ASID_INC;
106 if (asid >= limit) {
107 /*
108 * empty range: reset the range limit and
109 * start over
110 */
111 limit = MAX_ASID;
112 if (asid >= MAX_ASID)
113 asid = gru_wrap_asid(gru);
114 goto again;
115 }
116 }
117
118 if ((inuse_asid > asid) && (inuse_asid < limit))
119 limit = inuse_asid;
120 }
121 gru->gs_asid_limit = limit;
122 gru->gs_asid = asid;
123 gru_dbg(grudev, "gru %p, new asid 0x%x, new_limit 0x%x\n", gru, asid,
124 limit);
125 return asid;
126}
127
128/* Assign a new ASID to a thread context. */
129static int gru_assign_asid(struct gru_state *gru)
130{
131 int asid;
132
133 spin_lock(&gru->gs_asid_lock);
134 gru->gs_asid += ASID_INC;
135 asid = gru->gs_asid;
136 if (asid >= gru->gs_asid_limit)
137 asid = gru_reset_asid_limit(gru, asid);
138 spin_unlock(&gru->gs_asid_lock);
139
140 gru_dbg(grudev, "gru %p, asid 0x%x\n", gru, asid);
141 return asid;
142}
143
144/*
145 * Clear n bits in a word. Return a word indicating the bits that were cleared.
146 * Optionally, build an array of chars that contain the bit numbers allocated.
147 */
148static unsigned long reserve_resources(unsigned long *p, int n, int mmax,
149 char *idx)
150{
151 unsigned long bits = 0;
152 int i;
153
154 do {
155 i = find_first_bit(p, mmax);
156 if (i == mmax)
157 BUG();
158 __clear_bit(i, p);
159 __set_bit(i, &bits);
160 if (idx)
161 *idx++ = i;
162 } while (--n);
163 return bits;
164}
165
166unsigned long gru_reserve_cb_resources(struct gru_state *gru, int cbr_au_count,
167 char *cbmap)
168{
169 return reserve_resources(&gru->gs_cbr_map, cbr_au_count, GRU_CBR_AU,
170 cbmap);
171}
172
173unsigned long gru_reserve_ds_resources(struct gru_state *gru, int dsr_au_count,
174 char *dsmap)
175{
176 return reserve_resources(&gru->gs_dsr_map, dsr_au_count, GRU_DSR_AU,
177 dsmap);
178}
179
180static void reserve_gru_resources(struct gru_state *gru,
181 struct gru_thread_state *gts)
182{
183 gru->gs_active_contexts++;
184 gts->ts_cbr_map =
185 gru_reserve_cb_resources(gru, gts->ts_cbr_au_count,
186 gts->ts_cbr_idx);
187 gts->ts_dsr_map =
188 gru_reserve_ds_resources(gru, gts->ts_dsr_au_count, NULL);
189}
190
191static void free_gru_resources(struct gru_state *gru,
192 struct gru_thread_state *gts)
193{
194 gru->gs_active_contexts--;
195 gru->gs_cbr_map |= gts->ts_cbr_map;
196 gru->gs_dsr_map |= gts->ts_dsr_map;
197}
198
199/*
200 * Check if a GRU has sufficient free resources to satisfy an allocation
201 * request. Note: GRU locks may or may not be held when this is called. If
202 * not held, recheck after acquiring the appropriate locks.
203 *
204 * Returns 1 if sufficient resources, 0 if not
205 */
206static int check_gru_resources(struct gru_state *gru, int cbr_au_count,
207 int dsr_au_count, int max_active_contexts)
208{
209 return hweight64(gru->gs_cbr_map) >= cbr_au_count
210 && hweight64(gru->gs_dsr_map) >= dsr_au_count
211 && gru->gs_active_contexts < max_active_contexts;
212}
213
214/*
215 * TLB manangment requires tracking all GRU chiplets that have loaded a GSEG
216 * context.
217 */
218static int gru_load_mm_tracker(struct gru_state *gru, struct gru_mm_struct *gms,
219 int ctxnum)
220{
221 struct gru_mm_tracker *asids = &gms->ms_asids[gru->gs_gid];
222 unsigned short ctxbitmap = (1 << ctxnum);
223 int asid;
224
225 spin_lock(&gms->ms_asid_lock);
226 asid = asids->mt_asid;
227
228 if (asid == 0 || asids->mt_asid_gen != gru->gs_asid_gen) {
229 asid = gru_assign_asid(gru);
230 asids->mt_asid = asid;
231 asids->mt_asid_gen = gru->gs_asid_gen;
232 STAT(asid_new);
233 } else {
234 STAT(asid_reuse);
235 }
236
237 BUG_ON(asids->mt_ctxbitmap & ctxbitmap);
238 asids->mt_ctxbitmap |= ctxbitmap;
239 if (!test_bit(gru->gs_gid, gms->ms_asidmap))
240 __set_bit(gru->gs_gid, gms->ms_asidmap);
241 spin_unlock(&gms->ms_asid_lock);
242
243 gru_dbg(grudev,
244 "gru %x, gms %p, ctxnum 0x%d, asid 0x%x, asidmap 0x%lx\n",
245 gru->gs_gid, gms, ctxnum, asid, gms->ms_asidmap[0]);
246 return asid;
247}
248
249static void gru_unload_mm_tracker(struct gru_state *gru,
250 struct gru_mm_struct *gms, int ctxnum)
251{
252 struct gru_mm_tracker *asids;
253 unsigned short ctxbitmap;
254
255 asids = &gms->ms_asids[gru->gs_gid];
256 ctxbitmap = (1 << ctxnum);
257 spin_lock(&gms->ms_asid_lock);
258 BUG_ON((asids->mt_ctxbitmap & ctxbitmap) != ctxbitmap);
259 asids->mt_ctxbitmap ^= ctxbitmap;
260 gru_dbg(grudev, "gru %x, gms %p, ctxnum 0x%d, asidmap 0x%lx\n",
261 gru->gs_gid, gms, ctxnum, gms->ms_asidmap[0]);
262 spin_unlock(&gms->ms_asid_lock);
263}
264
265/*
266 * Decrement the reference count on a GTS structure. Free the structure
267 * if the reference count goes to zero.
268 */
269void gts_drop(struct gru_thread_state *gts)
270{
271 if (gts && atomic_dec_return(&gts->ts_refcnt) == 0) {
272 gru_drop_mmu_notifier(gts->ts_gms);
273 kfree(gts);
274 STAT(gts_free);
275 }
276}
277
278/*
279 * Locate the GTS structure for the current thread.
280 */
281static struct gru_thread_state *gru_find_current_gts_nolock(struct gru_vma_data
282 *vdata, int tsid)
283{
284 struct gru_thread_state *gts;
285
286 list_for_each_entry(gts, &vdata->vd_head, ts_next)
287 if (gts->ts_tsid == tsid)
288 return gts;
289 return NULL;
290}
291
292/*
293 * Allocate a thread state structure.
294 */
295static struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
296 struct gru_vma_data *vdata,
297 int tsid)
298{
299 struct gru_thread_state *gts;
300 int bytes;
301
302 bytes = DSR_BYTES(vdata->vd_dsr_au_count) +
303 CBR_BYTES(vdata->vd_cbr_au_count);
304 bytes += sizeof(struct gru_thread_state);
305 gts = kzalloc(bytes, GFP_KERNEL);
306 if (!gts)
307 return NULL;
308
309 STAT(gts_alloc);
310 atomic_set(&gts->ts_refcnt, 1);
311 mutex_init(&gts->ts_ctxlock);
312 gts->ts_cbr_au_count = vdata->vd_cbr_au_count;
313 gts->ts_dsr_au_count = vdata->vd_dsr_au_count;
314 gts->ts_user_options = vdata->vd_user_options;
315 gts->ts_tsid = tsid;
316 gts->ts_user_options = vdata->vd_user_options;
317 gts->ts_ctxnum = NULLCTX;
318 gts->ts_mm = current->mm;
319 gts->ts_vma = vma;
320 gts->ts_tlb_int_select = -1;
321 gts->ts_gms = gru_register_mmu_notifier();
322 if (!gts->ts_gms)
323 goto err;
324
325 gru_dbg(grudev, "alloc vdata %p, new gts %p\n", vdata, gts);
326 return gts;
327
328err:
329 gts_drop(gts);
330 return NULL;
331}
332
333/*
334 * Allocate a vma private data structure.
335 */
336struct gru_vma_data *gru_alloc_vma_data(struct vm_area_struct *vma, int tsid)
337{
338 struct gru_vma_data *vdata = NULL;
339
340 vdata = kmalloc(sizeof(*vdata), GFP_KERNEL);
341 if (!vdata)
342 return NULL;
343
344 INIT_LIST_HEAD(&vdata->vd_head);
345 spin_lock_init(&vdata->vd_lock);
346 gru_dbg(grudev, "alloc vdata %p\n", vdata);
347 return vdata;
348}
349
350/*
351 * Find the thread state structure for the current thread.
352 */
353struct gru_thread_state *gru_find_thread_state(struct vm_area_struct *vma,
354 int tsid)
355{
356 struct gru_vma_data *vdata = vma->vm_private_data;
357 struct gru_thread_state *gts;
358
359 spin_lock(&vdata->vd_lock);
360 gts = gru_find_current_gts_nolock(vdata, tsid);
361 spin_unlock(&vdata->vd_lock);
362 gru_dbg(grudev, "vma %p, gts %p\n", vma, gts);
363 return gts;
364}
365
366/*
367 * Allocate a new thread state for a GSEG. Note that races may allow
368 * another thread to race to create a gts.
369 */
370struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct *vma,
371 int tsid)
372{
373 struct gru_vma_data *vdata = vma->vm_private_data;
374 struct gru_thread_state *gts, *ngts;
375
376 gts = gru_alloc_gts(vma, vdata, tsid);
377 if (!gts)
378 return NULL;
379
380 spin_lock(&vdata->vd_lock);
381 ngts = gru_find_current_gts_nolock(vdata, tsid);
382 if (ngts) {
383 gts_drop(gts);
384 gts = ngts;
385 STAT(gts_double_allocate);
386 } else {
387 list_add(&gts->ts_next, &vdata->vd_head);
388 }
389 spin_unlock(&vdata->vd_lock);
390 gru_dbg(grudev, "vma %p, gts %p\n", vma, gts);
391 return gts;
392}
393
394/*
395 * Free the GRU context assigned to the thread state.
396 */
397static void gru_free_gru_context(struct gru_thread_state *gts)
398{
399 struct gru_state *gru;
400
401 gru = gts->ts_gru;
402 gru_dbg(grudev, "gts %p, gru %p\n", gts, gru);
403
404 spin_lock(&gru->gs_lock);
405 gru->gs_gts[gts->ts_ctxnum] = NULL;
406 free_gru_resources(gru, gts);
407 BUG_ON(test_bit(gts->ts_ctxnum, &gru->gs_context_map) == 0);
408 __clear_bit(gts->ts_ctxnum, &gru->gs_context_map);
409 gts->ts_ctxnum = NULLCTX;
410 gts->ts_gru = NULL;
411 spin_unlock(&gru->gs_lock);
412
413 gts_drop(gts);
414 STAT(free_context);
415}
416
417/*
418 * Prefetching cachelines help hardware performance.
419 * (Strictly a performance enhancement. Not functionally required).
420 */
421static void prefetch_data(void *p, int num, int stride)
422{
423 while (num-- > 0) {
424 prefetchw(p);
425 p += stride;
426 }
427}
428
429static inline long gru_copy_handle(void *d, void *s)
430{
431 memcpy(d, s, GRU_HANDLE_BYTES);
432 return GRU_HANDLE_BYTES;
433}
434
435/* rewrite in assembly & use lots of prefetch */
436static void gru_load_context_data(void *save, void *grubase, int ctxnum,
437 unsigned long cbrmap, unsigned long dsrmap)
438{
439 void *gseg, *cb, *cbe;
440 unsigned long length;
441 int i, scr;
442
443 gseg = grubase + ctxnum * GRU_GSEG_STRIDE;
444 length = hweight64(dsrmap) * GRU_DSR_AU_BYTES;
445 prefetch_data(gseg + GRU_DS_BASE, length / GRU_CACHE_LINE_BYTES,
446 GRU_CACHE_LINE_BYTES);
447
448 cb = gseg + GRU_CB_BASE;
449 cbe = grubase + GRU_CBE_BASE;
450 for_each_cbr_in_allocation_map(i, &cbrmap, scr) {
451 prefetch_data(cb, 1, GRU_CACHE_LINE_BYTES);
452 prefetch_data(cbe + i * GRU_HANDLE_STRIDE, 1,
453 GRU_CACHE_LINE_BYTES);
454 cb += GRU_HANDLE_STRIDE;
455 }
456
457 cb = gseg + GRU_CB_BASE;
458 for_each_cbr_in_allocation_map(i, &cbrmap, scr) {
459 save += gru_copy_handle(cb, save);
460 save += gru_copy_handle(cbe + i * GRU_HANDLE_STRIDE, save);
461 cb += GRU_HANDLE_STRIDE;
462 }
463
464 memcpy(gseg + GRU_DS_BASE, save, length);
465}
466
467static void gru_unload_context_data(void *save, void *grubase, int ctxnum,
468 unsigned long cbrmap, unsigned long dsrmap)
469{
470 void *gseg, *cb, *cbe;
471 unsigned long length;
472 int i, scr;
473
474 gseg = grubase + ctxnum * GRU_GSEG_STRIDE;
475
476 cb = gseg + GRU_CB_BASE;
477 cbe = grubase + GRU_CBE_BASE;
478 for_each_cbr_in_allocation_map(i, &cbrmap, scr) {
479 save += gru_copy_handle(save, cb);
480 save += gru_copy_handle(save, cbe + i * GRU_HANDLE_STRIDE);
481 cb += GRU_HANDLE_STRIDE;
482 }
483 length = hweight64(dsrmap) * GRU_DSR_AU_BYTES;
484 memcpy(save, gseg + GRU_DS_BASE, length);
485}
486
487void gru_unload_context(struct gru_thread_state *gts, int savestate)
488{
489 struct gru_state *gru = gts->ts_gru;
490 struct gru_context_configuration_handle *cch;
491 int ctxnum = gts->ts_ctxnum;
492
493 zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE);
494 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
495
496 lock_cch_handle(cch);
497 if (cch_interrupt_sync(cch))
498 BUG();
499 gru_dbg(grudev, "gts %p\n", gts);
500
501 gru_unload_mm_tracker(gru, gts->ts_gms, gts->ts_ctxnum);
502 if (savestate)
503 gru_unload_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr,
504 ctxnum, gts->ts_cbr_map,
505 gts->ts_dsr_map);
506
507 if (cch_deallocate(cch))
508 BUG();
509 gts->ts_force_unload = 0; /* ts_force_unload locked by CCH lock */
510 unlock_cch_handle(cch);
511
512 gru_free_gru_context(gts);
513 STAT(unload_context);
514}
515
516/*
517 * Load a GRU context by copying it from the thread data structure in memory
518 * to the GRU.
519 */
520static void gru_load_context(struct gru_thread_state *gts)
521{
522 struct gru_state *gru = gts->ts_gru;
523 struct gru_context_configuration_handle *cch;
524 int err, asid, ctxnum = gts->ts_ctxnum;
525
526 gru_dbg(grudev, "gts %p\n", gts);
527 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
528
529 lock_cch_handle(cch);
530 asid = gru_load_mm_tracker(gru, gts->ts_gms, gts->ts_ctxnum);
531 cch->tfm_fault_bit_enable =
532 (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
533 || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
534 cch->tlb_int_enable = (gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
535 if (cch->tlb_int_enable) {
536 gts->ts_tlb_int_select = gru_cpu_fault_map_id();
537 cch->tlb_int_select = gts->ts_tlb_int_select;
538 }
539 cch->tfm_done_bit_enable = 0;
540 err = cch_allocate(cch, asid, gts->ts_cbr_map, gts->ts_dsr_map);
541 if (err) {
542 gru_dbg(grudev,
543 "err %d: cch %p, gts %p, cbr 0x%lx, dsr 0x%lx\n",
544 err, cch, gts, gts->ts_cbr_map, gts->ts_dsr_map);
545 BUG();
546 }
547
548 gru_load_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, ctxnum,
549 gts->ts_cbr_map, gts->ts_dsr_map);
550
551 if (cch_start(cch))
552 BUG();
553 unlock_cch_handle(cch);
554
555 STAT(load_context);
556}
557
558/*
559 * Update fields in an active CCH:
560 * - retarget interrupts on local blade
561 * - force a delayed context unload by clearing the CCH asids. This
562 * forces TLB misses for new GRU instructions. The context is unloaded
563 * when the next TLB miss occurs.
564 */
565static int gru_update_cch(struct gru_thread_state *gts, int int_select)
566{
567 struct gru_context_configuration_handle *cch;
568 struct gru_state *gru = gts->ts_gru;
569 int i, ctxnum = gts->ts_ctxnum, ret = 0;
570
571 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
572
573 lock_cch_handle(cch);
574 if (cch->state == CCHSTATE_ACTIVE) {
575 if (gru->gs_gts[gts->ts_ctxnum] != gts)
576 goto exit;
577 if (cch_interrupt(cch))
578 BUG();
579 if (int_select >= 0) {
580 gts->ts_tlb_int_select = int_select;
581 cch->tlb_int_select = int_select;
582 } else {
583 for (i = 0; i < 8; i++)
584 cch->asid[i] = 0;
585 cch->tfm_fault_bit_enable = 0;
586 cch->tlb_int_enable = 0;
587 gts->ts_force_unload = 1;
588 }
589 if (cch_start(cch))
590 BUG();
591 ret = 1;
592 }
593exit:
594 unlock_cch_handle(cch);
595 return ret;
596}
597
598/*
599 * Update CCH tlb interrupt select. Required when all the following is true:
600 * - task's GRU context is loaded into a GRU
601 * - task is using interrupt notification for TLB faults
602 * - task has migrated to a different cpu on the same blade where
603 * it was previously running.
604 */
605static int gru_retarget_intr(struct gru_thread_state *gts)
606{
607 if (gts->ts_tlb_int_select < 0
608 || gts->ts_tlb_int_select == gru_cpu_fault_map_id())
609 return 0;
610
611 gru_dbg(grudev, "retarget from %d to %d\n", gts->ts_tlb_int_select,
612 gru_cpu_fault_map_id());
613 return gru_update_cch(gts, gru_cpu_fault_map_id());
614}
615
616
617/*
618 * Insufficient GRU resources available on the local blade. Steal a context from
619 * a process. This is a hack until a _real_ resource scheduler is written....
620 */
621#define next_ctxnum(n) ((n) < GRU_NUM_CCH - 2 ? (n) + 1 : 0)
622#define next_gru(b, g) (((g) < &(b)->bs_grus[GRU_CHIPLETS_PER_BLADE - 1]) ? \
623 ((g)+1) : &(b)->bs_grus[0])
624
625static void gru_steal_context(struct gru_thread_state *gts)
626{
627 struct gru_blade_state *blade;
628 struct gru_state *gru, *gru0;
629 struct gru_thread_state *ngts = NULL;
630 int ctxnum, ctxnum0, flag = 0, cbr, dsr;
631
632 cbr = gts->ts_cbr_au_count;
633 dsr = gts->ts_dsr_au_count;
634
635 preempt_disable();
636 blade = gru_base[uv_numa_blade_id()];
637 spin_lock(&blade->bs_lock);
638
639 ctxnum = next_ctxnum(blade->bs_lru_ctxnum);
640 gru = blade->bs_lru_gru;
641 if (ctxnum == 0)
642 gru = next_gru(blade, gru);
643 ctxnum0 = ctxnum;
644 gru0 = gru;
645 while (1) {
646 if (check_gru_resources(gru, cbr, dsr, GRU_NUM_CCH))
647 break;
648 spin_lock(&gru->gs_lock);
649 for (; ctxnum < GRU_NUM_CCH; ctxnum++) {
650 if (flag && gru == gru0 && ctxnum == ctxnum0)
651 break;
652 ngts = gru->gs_gts[ctxnum];
653 /*
654 * We are grabbing locks out of order, so trylock is
655 * needed. GTSs are usually not locked, so the odds of
656 * success are high. If trylock fails, try to steal a
657 * different GSEG.
658 */
659 if (ngts && mutex_trylock(&ngts->ts_ctxlock))
660 break;
661 ngts = NULL;
662 flag = 1;
663 }
664 spin_unlock(&gru->gs_lock);
665 if (ngts || (flag && gru == gru0 && ctxnum == ctxnum0))
666 break;
667 ctxnum = 0;
668 gru = next_gru(blade, gru);
669 }
670 blade->bs_lru_gru = gru;
671 blade->bs_lru_ctxnum = ctxnum;
672 spin_unlock(&blade->bs_lock);
673 preempt_enable();
674
675 if (ngts) {
676 STAT(steal_context);
677 ngts->ts_steal_jiffies = jiffies;
678 gru_unload_context(ngts, 1);
679 mutex_unlock(&ngts->ts_ctxlock);
680 } else {
681 STAT(steal_context_failed);
682 }
683 gru_dbg(grudev,
684 "stole gru %x, ctxnum %d from gts %p. Need cb %d, ds %d;"
685 " avail cb %ld, ds %ld\n",
686 gru->gs_gid, ctxnum, ngts, cbr, dsr, hweight64(gru->gs_cbr_map),
687 hweight64(gru->gs_dsr_map));
688}
689
690/*
691 * Scan the GRUs on the local blade & assign a GRU context.
692 */
693static struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts)
694{
695 struct gru_state *gru, *grux;
696 int i, max_active_contexts;
697
698 preempt_disable();
699
700again:
701 gru = NULL;
702 max_active_contexts = GRU_NUM_CCH;
703 for_each_gru_on_blade(grux, uv_numa_blade_id(), i) {
704 if (check_gru_resources(grux, gts->ts_cbr_au_count,
705 gts->ts_dsr_au_count,
706 max_active_contexts)) {
707 gru = grux;
708 max_active_contexts = grux->gs_active_contexts;
709 if (max_active_contexts == 0)
710 break;
711 }
712 }
713
714 if (gru) {
715 spin_lock(&gru->gs_lock);
716 if (!check_gru_resources(gru, gts->ts_cbr_au_count,
717 gts->ts_dsr_au_count, GRU_NUM_CCH)) {
718 spin_unlock(&gru->gs_lock);
719 goto again;
720 }
721 reserve_gru_resources(gru, gts);
722 gts->ts_gru = gru;
723 gts->ts_ctxnum =
724 find_first_zero_bit(&gru->gs_context_map, GRU_NUM_CCH);
725 BUG_ON(gts->ts_ctxnum == GRU_NUM_CCH);
726 atomic_inc(&gts->ts_refcnt);
727 gru->gs_gts[gts->ts_ctxnum] = gts;
728 __set_bit(gts->ts_ctxnum, &gru->gs_context_map);
729 spin_unlock(&gru->gs_lock);
730
731 STAT(assign_context);
732 gru_dbg(grudev,
733 "gseg %p, gts %p, gru %x, ctx %d, cbr %d, dsr %d\n",
734 gseg_virtual_address(gts->ts_gru, gts->ts_ctxnum), gts,
735 gts->ts_gru->gs_gid, gts->ts_ctxnum,
736 gts->ts_cbr_au_count, gts->ts_dsr_au_count);
737 } else {
738 gru_dbg(grudev, "failed to allocate a GTS %s\n", "");
739 STAT(assign_context_failed);
740 }
741
742 preempt_enable();
743 return gru;
744}
745
746/*
747 * gru_nopage
748 *
749 * Map the user's GRU segment
750 *
751 * Note: gru segments alway mmaped on GRU_GSEG_PAGESIZE boundaries.
752 */
753int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
754{
755 struct gru_thread_state *gts;
756 unsigned long paddr, vaddr;
757
758 vaddr = (unsigned long)vmf->virtual_address;
759 gru_dbg(grudev, "vma %p, vaddr 0x%lx (0x%lx)\n",
760 vma, vaddr, GSEG_BASE(vaddr));
761 STAT(nopfn);
762
763 /* The following check ensures vaddr is a valid address in the VMA */
764 gts = gru_find_thread_state(vma, TSID(vaddr, vma));
765 if (!gts)
766 return VM_FAULT_SIGBUS;
767
768again:
769 preempt_disable();
770 mutex_lock(&gts->ts_ctxlock);
771 if (gts->ts_gru) {
772 if (gts->ts_gru->gs_blade_id != uv_numa_blade_id()) {
773 STAT(migrated_nopfn_unload);
774 gru_unload_context(gts, 1);
775 } else {
776 if (gru_retarget_intr(gts))
777 STAT(migrated_nopfn_retarget);
778 }
779 }
780
781 if (!gts->ts_gru) {
782 if (!gru_assign_gru_context(gts)) {
783 mutex_unlock(&gts->ts_ctxlock);
784 preempt_enable();
785 schedule_timeout(GRU_ASSIGN_DELAY); /* true hack ZZZ */
786 if (gts->ts_steal_jiffies + GRU_STEAL_DELAY < jiffies)
787 gru_steal_context(gts);
788 goto again;
789 }
790 gru_load_context(gts);
791 paddr = gseg_physical_address(gts->ts_gru, gts->ts_ctxnum);
792 remap_pfn_range(vma, vaddr & ~(GRU_GSEG_PAGESIZE - 1),
793 paddr >> PAGE_SHIFT, GRU_GSEG_PAGESIZE,
794 vma->vm_page_prot);
795 }
796
797 mutex_unlock(&gts->ts_ctxlock);
798 preempt_enable();
799
800 return VM_FAULT_NOPAGE;
801}
802
diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
new file mode 100644
index 000000000000..533923f83f1a
--- /dev/null
+++ b/drivers/misc/sgi-gru/gruprocfs.c
@@ -0,0 +1,336 @@
1/*
2 * SN Platform GRU Driver
3 *
4 * PROC INTERFACES
5 *
6 * This file supports the /proc interfaces for the GRU driver
7 *
8 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24
25#include <linux/proc_fs.h>
26#include <linux/device.h>
27#include <linux/seq_file.h>
28#include <linux/uaccess.h>
29#include "gru.h"
30#include "grulib.h"
31#include "grutables.h"
32
33#define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
34
35static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
36{
37 unsigned long val = atomic_long_read(v);
38
39 if (val)
40 seq_printf(s, "%16lu %s\n", val, id);
41}
42
43static int statistics_show(struct seq_file *s, void *p)
44{
45 printstat(s, vdata_alloc);
46 printstat(s, vdata_free);
47 printstat(s, gts_alloc);
48 printstat(s, gts_free);
49 printstat(s, vdata_double_alloc);
50 printstat(s, gts_double_allocate);
51 printstat(s, assign_context);
52 printstat(s, assign_context_failed);
53 printstat(s, free_context);
54 printstat(s, load_context);
55 printstat(s, unload_context);
56 printstat(s, steal_context);
57 printstat(s, steal_context_failed);
58 printstat(s, nopfn);
59 printstat(s, break_cow);
60 printstat(s, asid_new);
61 printstat(s, asid_next);
62 printstat(s, asid_wrap);
63 printstat(s, asid_reuse);
64 printstat(s, intr);
65 printstat(s, call_os);
66 printstat(s, call_os_check_for_bug);
67 printstat(s, call_os_wait_queue);
68 printstat(s, user_flush_tlb);
69 printstat(s, user_unload_context);
70 printstat(s, user_exception);
71 printstat(s, set_task_slice);
72 printstat(s, migrate_check);
73 printstat(s, migrated_retarget);
74 printstat(s, migrated_unload);
75 printstat(s, migrated_unload_delay);
76 printstat(s, migrated_nopfn_retarget);
77 printstat(s, migrated_nopfn_unload);
78 printstat(s, tlb_dropin);
79 printstat(s, tlb_dropin_fail_no_asid);
80 printstat(s, tlb_dropin_fail_upm);
81 printstat(s, tlb_dropin_fail_invalid);
82 printstat(s, tlb_dropin_fail_range_active);
83 printstat(s, tlb_dropin_fail_idle);
84 printstat(s, tlb_dropin_fail_fmm);
85 printstat(s, mmu_invalidate_range);
86 printstat(s, mmu_invalidate_page);
87 printstat(s, mmu_clear_flush_young);
88 printstat(s, flush_tlb);
89 printstat(s, flush_tlb_gru);
90 printstat(s, flush_tlb_gru_tgh);
91 printstat(s, flush_tlb_gru_zero_asid);
92 printstat(s, copy_gpa);
93 printstat(s, mesq_receive);
94 printstat(s, mesq_receive_none);
95 printstat(s, mesq_send);
96 printstat(s, mesq_send_failed);
97 printstat(s, mesq_noop);
98 printstat(s, mesq_send_unexpected_error);
99 printstat(s, mesq_send_lb_overflow);
100 printstat(s, mesq_send_qlimit_reached);
101 printstat(s, mesq_send_amo_nacked);
102 printstat(s, mesq_send_put_nacked);
103 printstat(s, mesq_qf_not_full);
104 printstat(s, mesq_qf_locked);
105 printstat(s, mesq_qf_noop_not_full);
106 printstat(s, mesq_qf_switch_head_failed);
107 printstat(s, mesq_qf_unexpected_error);
108 printstat(s, mesq_noop_unexpected_error);
109 printstat(s, mesq_noop_lb_overflow);
110 printstat(s, mesq_noop_qlimit_reached);
111 printstat(s, mesq_noop_amo_nacked);
112 printstat(s, mesq_noop_put_nacked);
113 return 0;
114}
115
116static ssize_t statistics_write(struct file *file, const char __user *userbuf,
117 size_t count, loff_t *data)
118{
119 memset(&gru_stats, 0, sizeof(gru_stats));
120 return count;
121}
122
123static int options_show(struct seq_file *s, void *p)
124{
125 seq_printf(s, "0x%lx\n", gru_options);
126 return 0;
127}
128
129static ssize_t options_write(struct file *file, const char __user *userbuf,
130 size_t count, loff_t *data)
131{
132 unsigned long val;
133 char buf[80];
134
135 if (copy_from_user
136 (buf, userbuf, count < sizeof(buf) ? count : sizeof(buf)))
137 return -EFAULT;
138 if (!strict_strtoul(buf, 10, &val))
139 gru_options = val;
140
141 return count;
142}
143
144static int cch_seq_show(struct seq_file *file, void *data)
145{
146 long gid = *(long *)data;
147 int i;
148 struct gru_state *gru = GID_TO_GRU(gid);
149 struct gru_thread_state *ts;
150 const char *mode[] = { "??", "UPM", "INTR", "OS_POLL" };
151
152 if (gid == 0)
153 seq_printf(file, "#%5s%5s%6s%9s%6s%8s%8s\n", "gid", "bid",
154 "ctx#", "pid", "cbrs", "dsbytes", "mode");
155 if (gru)
156 for (i = 0; i < GRU_NUM_CCH; i++) {
157 ts = gru->gs_gts[i];
158 if (!ts)
159 continue;
160 seq_printf(file, " %5d%5d%6d%9d%6d%8d%8s\n",
161 gru->gs_gid, gru->gs_blade_id, i,
162 ts->ts_tgid_owner,
163 ts->ts_cbr_au_count * GRU_CBR_AU_SIZE,
164 ts->ts_cbr_au_count * GRU_DSR_AU_BYTES,
165 mode[ts->ts_user_options &
166 GRU_OPT_MISS_MASK]);
167 }
168
169 return 0;
170}
171
172static int gru_seq_show(struct seq_file *file, void *data)
173{
174 long gid = *(long *)data, ctxfree, cbrfree, dsrfree;
175 struct gru_state *gru = GID_TO_GRU(gid);
176
177 if (gid == 0) {
178 seq_printf(file, "#%5s%5s%7s%6s%6s%8s%6s%6s\n", "gid", "nid",
179 "ctx", "cbr", "dsr", "ctx", "cbr", "dsr");
180 seq_printf(file, "#%5s%5s%7s%6s%6s%8s%6s%6s\n", "", "", "busy",
181 "busy", "busy", "free", "free", "free");
182 }
183 if (gru) {
184 ctxfree = GRU_NUM_CCH - gru->gs_active_contexts;
185 cbrfree = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE;
186 dsrfree = hweight64(gru->gs_dsr_map) * GRU_DSR_AU_BYTES;
187 seq_printf(file, " %5d%5d%7ld%6ld%6ld%8ld%6ld%6ld\n",
188 gru->gs_gid, gru->gs_blade_id, GRU_NUM_CCH - ctxfree,
189 GRU_NUM_CBE - cbrfree, GRU_NUM_DSR_BYTES - dsrfree,
190 ctxfree, cbrfree, dsrfree);
191 }
192
193 return 0;
194}
195
196static void seq_stop(struct seq_file *file, void *data)
197{
198}
199
200static void *seq_start(struct seq_file *file, loff_t *gid)
201{
202 if (*gid < GRU_MAX_GRUS)
203 return gid;
204 return NULL;
205}
206
207static void *seq_next(struct seq_file *file, void *data, loff_t *gid)
208{
209 (*gid)++;
210 if (*gid < GRU_MAX_GRUS)
211 return gid;
212 return NULL;
213}
214
215static const struct seq_operations cch_seq_ops = {
216 .start = seq_start,
217 .next = seq_next,
218 .stop = seq_stop,
219 .show = cch_seq_show
220};
221
222static const struct seq_operations gru_seq_ops = {
223 .start = seq_start,
224 .next = seq_next,
225 .stop = seq_stop,
226 .show = gru_seq_show
227};
228
229static int statistics_open(struct inode *inode, struct file *file)
230{
231 return single_open(file, statistics_show, NULL);
232}
233
234static int options_open(struct inode *inode, struct file *file)
235{
236 return single_open(file, options_show, NULL);
237}
238
239static int cch_open(struct inode *inode, struct file *file)
240{
241 return seq_open(file, &cch_seq_ops);
242}
243
244static int gru_open(struct inode *inode, struct file *file)
245{
246 return seq_open(file, &gru_seq_ops);
247}
248
249/* *INDENT-OFF* */
250static const struct file_operations statistics_fops = {
251 .open = statistics_open,
252 .read = seq_read,
253 .write = statistics_write,
254 .llseek = seq_lseek,
255 .release = single_release,
256};
257
258static const struct file_operations options_fops = {
259 .open = options_open,
260 .read = seq_read,
261 .write = options_write,
262 .llseek = seq_lseek,
263 .release = single_release,
264};
265
266static const struct file_operations cch_fops = {
267 .open = cch_open,
268 .read = seq_read,
269 .llseek = seq_lseek,
270 .release = seq_release,
271};
272static const struct file_operations gru_fops = {
273 .open = gru_open,
274 .read = seq_read,
275 .llseek = seq_lseek,
276 .release = seq_release,
277};
278
279static struct proc_entry {
280 char *name;
281 int mode;
282 const struct file_operations *fops;
283 struct proc_dir_entry *entry;
284} proc_files[] = {
285 {"statistics", 0644, &statistics_fops},
286 {"debug_options", 0644, &options_fops},
287 {"cch_status", 0444, &cch_fops},
288 {"gru_status", 0444, &gru_fops},
289 {NULL}
290};
291/* *INDENT-ON* */
292
293static struct proc_dir_entry *proc_gru __read_mostly;
294
295static int create_proc_file(struct proc_entry *p)
296{
297 p->entry = create_proc_entry(p->name, p->mode, proc_gru);
298 if (!p->entry)
299 return -1;
300 p->entry->proc_fops = p->fops;
301 return 0;
302}
303
304static void delete_proc_files(void)
305{
306 struct proc_entry *p;
307
308 if (proc_gru) {
309 for (p = proc_files; p->name; p++)
310 if (p->entry)
311 remove_proc_entry(p->name, proc_gru);
312 remove_proc_entry("gru", NULL);
313 }
314}
315
316int gru_proc_init(void)
317{
318 struct proc_entry *p;
319
320 proc_mkdir("sgi_uv", NULL);
321 proc_gru = proc_mkdir("sgi_uv/gru", NULL);
322
323 for (p = proc_files; p->name; p++)
324 if (create_proc_file(p))
325 goto err;
326 return 0;
327
328err:
329 delete_proc_files();
330 return -1;
331}
332
333void gru_proc_exit(void)
334{
335 delete_proc_files();
336}
diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
new file mode 100644
index 000000000000..4251018f70ff
--- /dev/null
+++ b/drivers/misc/sgi-gru/grutables.h
@@ -0,0 +1,609 @@
1/*
2 * SN Platform GRU Driver
3 *
4 * GRU DRIVER TABLES, MACROS, externs, etc
5 *
6 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23#ifndef __GRUTABLES_H__
24#define __GRUTABLES_H__
25
26/*
27 * GRU Chiplet:
28 * The GRU is a user addressible memory accelerator. It provides
29 * several forms of load, store, memset, bcopy instructions. In addition, it
30 * contains special instructions for AMOs, sending messages to message
31 * queues, etc.
32 *
33 * The GRU is an integral part of the node controller. It connects
34 * directly to the cpu socket. In its current implementation, there are 2
35 * GRU chiplets in the node controller on each blade (~node).
36 *
37 * The entire GRU memory space is fully coherent and cacheable by the cpus.
38 *
39 * Each GRU chiplet has a physical memory map that looks like the following:
40 *
41 * +-----------------+
42 * |/////////////////|
43 * |/////////////////|
44 * |/////////////////|
45 * |/////////////////|
46 * |/////////////////|
47 * |/////////////////|
48 * |/////////////////|
49 * |/////////////////|
50 * +-----------------+
51 * | system control |
52 * +-----------------+ _______ +-------------+
53 * |/////////////////| / | |
54 * |/////////////////| / | |
55 * |/////////////////| / | instructions|
56 * |/////////////////| / | |
57 * |/////////////////| / | |
58 * |/////////////////| / |-------------|
59 * |/////////////////| / | |
60 * +-----------------+ | |
61 * | context 15 | | data |
62 * +-----------------+ | |
63 * | ...... | \ | |
64 * +-----------------+ \____________ +-------------+
65 * | context 1 |
66 * +-----------------+
67 * | context 0 |
68 * +-----------------+
69 *
70 * Each of the "contexts" is a chunk of memory that can be mmaped into user
71 * space. The context consists of 2 parts:
72 *
73 * - an instruction space that can be directly accessed by the user
74 * to issue GRU instructions and to check instruction status.
75 *
76 * - a data area that acts as normal RAM.
77 *
78 * User instructions contain virtual addresses of data to be accessed by the
79 * GRU. The GRU contains a TLB that is used to convert these user virtual
80 * addresses to physical addresses.
81 *
82 * The "system control" area of the GRU chiplet is used by the kernel driver
83 * to manage user contexts and to perform functions such as TLB dropin and
84 * purging.
85 *
86 * One context may be reserved for the kernel and used for cross-partition
87 * communication. The GRU will also be used to asynchronously zero out
88 * large blocks of memory (not currently implemented).
89 *
90 *
91 * Tables:
92 *
93 * VDATA-VMA Data - Holds a few parameters. Head of linked list of
94 * GTS tables for threads using the GSEG
95 * GTS - Gru Thread State - contains info for managing a GSEG context. A
96 * GTS is allocated for each thread accessing a
97 * GSEG.
98 * GTD - GRU Thread Data - contains shadow copy of GRU data when GSEG is
99 * not loaded into a GRU
100 * GMS - GRU Memory Struct - Used to manage TLB shootdowns. Tracks GRUs
101 * where a GSEG has been loaded. Similar to
102 * an mm_struct but for GRU.
103 *
104 * GS - GRU State - Used to manage the state of a GRU chiplet
105 * BS - Blade State - Used to manage state of all GRU chiplets
106 * on a blade
107 *
108 *
109 * Normal task tables for task using GRU.
110 * - 2 threads in process
111 * - 2 GSEGs open in process
112 * - GSEG1 is being used by both threads
113 * - GSEG2 is used only by thread 2
114 *
115 * task -->|
116 * task ---+---> mm ->------ (notifier) -------+-> gms
117 * | |
118 * |--> vma -> vdata ---> gts--->| GSEG1 (thread1)
119 * | | |
120 * | +-> gts--->| GSEG1 (thread2)
121 * | |
122 * |--> vma -> vdata ---> gts--->| GSEG2 (thread2)
123 * .
124 * .
125 *
126 * GSEGs are marked DONTCOPY on fork
127 *
128 * At open
129 * file.private_data -> NULL
130 *
131 * At mmap,
132 * vma -> vdata
133 *
134 * After gseg reference
135 * vma -> vdata ->gts
136 *
137 * After fork
138 * parent
139 * vma -> vdata -> gts
140 * child
141 * (vma is not copied)
142 *
143 */
144
145#include <linux/rmap.h>
146#include <linux/interrupt.h>
147#include <linux/mutex.h>
148#include <linux/wait.h>
149#include <linux/mmu_notifier.h>
150#include "gru.h"
151#include "gruhandles.h"
152
153extern struct gru_stats_s gru_stats;
154extern struct gru_blade_state *gru_base[];
155extern unsigned long gru_start_paddr, gru_end_paddr;
156
157#define GRU_MAX_BLADES MAX_NUMNODES
158#define GRU_MAX_GRUS (GRU_MAX_BLADES * GRU_CHIPLETS_PER_BLADE)
159
160#define GRU_DRIVER_ID_STR "SGI GRU Device Driver"
161#define GRU_DRIVER_VERSION_STR "0.80"
162
163/*
164 * GRU statistics.
165 */
166struct gru_stats_s {
167 atomic_long_t vdata_alloc;
168 atomic_long_t vdata_free;
169 atomic_long_t gts_alloc;
170 atomic_long_t gts_free;
171 atomic_long_t vdata_double_alloc;
172 atomic_long_t gts_double_allocate;
173 atomic_long_t assign_context;
174 atomic_long_t assign_context_failed;
175 atomic_long_t free_context;
176 atomic_long_t load_context;
177 atomic_long_t unload_context;
178 atomic_long_t steal_context;
179 atomic_long_t steal_context_failed;
180 atomic_long_t nopfn;
181 atomic_long_t break_cow;
182 atomic_long_t asid_new;
183 atomic_long_t asid_next;
184 atomic_long_t asid_wrap;
185 atomic_long_t asid_reuse;
186 atomic_long_t intr;
187 atomic_long_t call_os;
188 atomic_long_t call_os_check_for_bug;
189 atomic_long_t call_os_wait_queue;
190 atomic_long_t user_flush_tlb;
191 atomic_long_t user_unload_context;
192 atomic_long_t user_exception;
193 atomic_long_t set_task_slice;
194 atomic_long_t migrate_check;
195 atomic_long_t migrated_retarget;
196 atomic_long_t migrated_unload;
197 atomic_long_t migrated_unload_delay;
198 atomic_long_t migrated_nopfn_retarget;
199 atomic_long_t migrated_nopfn_unload;
200 atomic_long_t tlb_dropin;
201 atomic_long_t tlb_dropin_fail_no_asid;
202 atomic_long_t tlb_dropin_fail_upm;
203 atomic_long_t tlb_dropin_fail_invalid;
204 atomic_long_t tlb_dropin_fail_range_active;
205 atomic_long_t tlb_dropin_fail_idle;
206 atomic_long_t tlb_dropin_fail_fmm;
207 atomic_long_t mmu_invalidate_range;
208 atomic_long_t mmu_invalidate_page;
209 atomic_long_t mmu_clear_flush_young;
210 atomic_long_t flush_tlb;
211 atomic_long_t flush_tlb_gru;
212 atomic_long_t flush_tlb_gru_tgh;
213 atomic_long_t flush_tlb_gru_zero_asid;
214
215 atomic_long_t copy_gpa;
216
217 atomic_long_t mesq_receive;
218 atomic_long_t mesq_receive_none;
219 atomic_long_t mesq_send;
220 atomic_long_t mesq_send_failed;
221 atomic_long_t mesq_noop;
222 atomic_long_t mesq_send_unexpected_error;
223 atomic_long_t mesq_send_lb_overflow;
224 atomic_long_t mesq_send_qlimit_reached;
225 atomic_long_t mesq_send_amo_nacked;
226 atomic_long_t mesq_send_put_nacked;
227 atomic_long_t mesq_qf_not_full;
228 atomic_long_t mesq_qf_locked;
229 atomic_long_t mesq_qf_noop_not_full;
230 atomic_long_t mesq_qf_switch_head_failed;
231 atomic_long_t mesq_qf_unexpected_error;
232 atomic_long_t mesq_noop_unexpected_error;
233 atomic_long_t mesq_noop_lb_overflow;
234 atomic_long_t mesq_noop_qlimit_reached;
235 atomic_long_t mesq_noop_amo_nacked;
236 atomic_long_t mesq_noop_put_nacked;
237
238};
239
240#define OPT_DPRINT 1
241#define OPT_STATS 2
242#define GRU_QUICKLOOK 4
243
244
245#define IRQ_GRU 110 /* Starting IRQ number for interrupts */
246
247/* Delay in jiffies between attempts to assign a GRU context */
248#define GRU_ASSIGN_DELAY ((HZ * 20) / 1000)
249
250/*
251 * If a process has it's context stolen, min delay in jiffies before trying to
252 * steal a context from another process.
253 */
254#define GRU_STEAL_DELAY ((HZ * 200) / 1000)
255
256#define STAT(id) do { \
257 if (gru_options & OPT_STATS) \
258 atomic_long_inc(&gru_stats.id); \
259 } while (0)
260
261#ifdef CONFIG_SGI_GRU_DEBUG
262#define gru_dbg(dev, fmt, x...) \
263 do { \
264 if (gru_options & OPT_DPRINT) \
265 dev_dbg(dev, "%s: " fmt, __func__, x); \
266 } while (0)
267#else
268#define gru_dbg(x...)
269#endif
270
271/*-----------------------------------------------------------------------------
272 * ASID management
273 */
274#define MAX_ASID 0xfffff0
275#define MIN_ASID 8
276#define ASID_INC 8 /* number of regions */
277
278/* Generate a GRU asid value from a GRU base asid & a virtual address. */
279#if defined CONFIG_IA64
280#define VADDR_HI_BIT 64
281#define GRUREGION(addr) ((addr) >> (VADDR_HI_BIT - 3) & 3)
282#elif defined __x86_64
283#define VADDR_HI_BIT 48
284#define GRUREGION(addr) (0) /* ZZZ could do better */
285#else
286#error "Unsupported architecture"
287#endif
288#define GRUASID(asid, addr) ((asid) + GRUREGION(addr))
289
290/*------------------------------------------------------------------------------
291 * File & VMS Tables
292 */
293
294struct gru_state;
295
296/*
297 * This structure is pointed to from the mmstruct via the notifier pointer.
298 * There is one of these per address space.
299 */
300struct gru_mm_tracker {
301 unsigned int mt_asid_gen; /* ASID wrap count */
302 int mt_asid; /* current base ASID for gru */
303 unsigned short mt_ctxbitmap; /* bitmap of contexts using
304 asid */
305};
306
307struct gru_mm_struct {
308 struct mmu_notifier ms_notifier;
309 atomic_t ms_refcnt;
310 spinlock_t ms_asid_lock; /* protects ASID assignment */
311 atomic_t ms_range_active;/* num range_invals active */
312 char ms_released;
313 wait_queue_head_t ms_wait_queue;
314 DECLARE_BITMAP(ms_asidmap, GRU_MAX_GRUS);
315 struct gru_mm_tracker ms_asids[GRU_MAX_GRUS];
316};
317
318/*
319 * One of these structures is allocated when a GSEG is mmaped. The
320 * structure is pointed to by the vma->vm_private_data field in the vma struct.
321 */
322struct gru_vma_data {
323 spinlock_t vd_lock; /* Serialize access to vma */
324 struct list_head vd_head; /* head of linked list of gts */
325 long vd_user_options;/* misc user option flags */
326 int vd_cbr_au_count;
327 int vd_dsr_au_count;
328};
329
330/*
331 * One of these is allocated for each thread accessing a mmaped GRU. A linked
332 * list of these structure is hung off the struct gru_vma_data in the mm_struct.
333 */
334struct gru_thread_state {
335 struct list_head ts_next; /* list - head at vma-private */
336 struct mutex ts_ctxlock; /* load/unload CTX lock */
337 struct mm_struct *ts_mm; /* mm currently mapped to
338 context */
339 struct vm_area_struct *ts_vma; /* vma of GRU context */
340 struct gru_state *ts_gru; /* GRU where the context is
341 loaded */
342 struct gru_mm_struct *ts_gms; /* asid & ioproc struct */
343 unsigned long ts_cbr_map; /* map of allocated CBRs */
344 unsigned long ts_dsr_map; /* map of allocated DATA
345 resources */
346 unsigned long ts_steal_jiffies;/* jiffies when context last
347 stolen */
348 long ts_user_options;/* misc user option flags */
349 pid_t ts_tgid_owner; /* task that is using the
350 context - for migration */
351 int ts_tsid; /* thread that owns the
352 structure */
353 int ts_tlb_int_select;/* target cpu if interrupts
354 enabled */
355 int ts_ctxnum; /* context number where the
356 context is loaded */
357 atomic_t ts_refcnt; /* reference count GTS */
358 unsigned char ts_dsr_au_count;/* Number of DSR resources
359 required for contest */
360 unsigned char ts_cbr_au_count;/* Number of CBR resources
361 required for contest */
362 char ts_force_unload;/* force context to be unloaded
363 after migration */
364 char ts_cbr_idx[GRU_CBR_AU];/* CBR numbers of each
365 allocated CB */
366 unsigned long ts_gdata[0]; /* save area for GRU data (CB,
367 DS, CBE) */
368};
369
370/*
371 * Threaded programs actually allocate an array of GSEGs when a context is
372 * created. Each thread uses a separate GSEG. TSID is the index into the GSEG
373 * array.
374 */
375#define TSID(a, v) (((a) - (v)->vm_start) / GRU_GSEG_PAGESIZE)
376#define UGRUADDR(gts) ((gts)->ts_vma->vm_start + \
377 (gts)->ts_tsid * GRU_GSEG_PAGESIZE)
378
379#define NULLCTX (-1) /* if context not loaded into GRU */
380
381/*-----------------------------------------------------------------------------
382 * GRU State Tables
383 */
384
385/*
386 * One of these exists for each GRU chiplet.
387 */
388struct gru_state {
389 struct gru_blade_state *gs_blade; /* GRU state for entire
390 blade */
391 unsigned long gs_gru_base_paddr; /* Physical address of
392 gru segments (64) */
393 void *gs_gru_base_vaddr; /* Virtual address of
394 gru segments (64) */
395 unsigned char gs_gid; /* unique GRU number */
396 unsigned char gs_tgh_local_shift; /* used to pick TGH for
397 local flush */
398 unsigned char gs_tgh_first_remote; /* starting TGH# for
399 remote flush */
400 unsigned short gs_blade_id; /* blade of GRU */
401 spinlock_t gs_asid_lock; /* lock used for
402 assigning asids */
403 spinlock_t gs_lock; /* lock used for
404 assigning contexts */
405
406 /* -- the following are protected by the gs_asid_lock spinlock ---- */
407 unsigned int gs_asid; /* Next availe ASID */
408 unsigned int gs_asid_limit; /* Limit of available
409 ASIDs */
410 unsigned int gs_asid_gen; /* asid generation.
411 Inc on wrap */
412
413 /* --- the following fields are protected by the gs_lock spinlock --- */
414 unsigned long gs_context_map; /* bitmap to manage
415 contexts in use */
416 unsigned long gs_cbr_map; /* bitmap to manage CB
417 resources */
418 unsigned long gs_dsr_map; /* bitmap used to manage
419 DATA resources */
420 unsigned int gs_reserved_cbrs; /* Number of kernel-
421 reserved cbrs */
422 unsigned int gs_reserved_dsr_bytes; /* Bytes of kernel-
423 reserved dsrs */
424 unsigned short gs_active_contexts; /* number of contexts
425 in use */
426 struct gru_thread_state *gs_gts[GRU_NUM_CCH]; /* GTS currently using
427 the context */
428};
429
430/*
431 * This structure contains the GRU state for all the GRUs on a blade.
432 */
433struct gru_blade_state {
434 void *kernel_cb; /* First kernel
435 reserved cb */
436 void *kernel_dsr; /* First kernel
437 reserved DSR */
438 /* ---- the following are protected by the bs_lock spinlock ---- */
439 spinlock_t bs_lock; /* lock used for
440 stealing contexts */
441 int bs_lru_ctxnum; /* STEAL - last context
442 stolen */
443 struct gru_state *bs_lru_gru; /* STEAL - last gru
444 stolen */
445
446 struct gru_state bs_grus[GRU_CHIPLETS_PER_BLADE];
447};
448
449/*-----------------------------------------------------------------------------
450 * Address Primitives
451 */
452#define get_tfm_for_cpu(g, c) \
453 ((struct gru_tlb_fault_map *)get_tfm((g)->gs_gru_base_vaddr, (c)))
454#define get_tfh_by_index(g, i) \
455 ((struct gru_tlb_fault_handle *)get_tfh((g)->gs_gru_base_vaddr, (i)))
456#define get_tgh_by_index(g, i) \
457 ((struct gru_tlb_global_handle *)get_tgh((g)->gs_gru_base_vaddr, (i)))
458#define get_cbe_by_index(g, i) \
459 ((struct gru_control_block_extended *)get_cbe((g)->gs_gru_base_vaddr,\
460 (i)))
461
462/*-----------------------------------------------------------------------------
463 * Useful Macros
464 */
465
466/* Given a blade# & chiplet#, get a pointer to the GRU */
467#define get_gru(b, c) (&gru_base[b]->bs_grus[c])
468
469/* Number of bytes to save/restore when unloading/loading GRU contexts */
470#define DSR_BYTES(dsr) ((dsr) * GRU_DSR_AU_BYTES)
471#define CBR_BYTES(cbr) ((cbr) * GRU_HANDLE_BYTES * GRU_CBR_AU_SIZE * 2)
472
473/* Convert a user CB number to the actual CBRNUM */
474#define thread_cbr_number(gts, n) ((gts)->ts_cbr_idx[(n) / GRU_CBR_AU_SIZE] \
475 * GRU_CBR_AU_SIZE + (n) % GRU_CBR_AU_SIZE)
476
477/* Convert a gid to a pointer to the GRU */
478#define GID_TO_GRU(gid) \
479 (gru_base[(gid) / GRU_CHIPLETS_PER_BLADE] ? \
480 (&gru_base[(gid) / GRU_CHIPLETS_PER_BLADE]-> \
481 bs_grus[(gid) % GRU_CHIPLETS_PER_BLADE]) : \
482 NULL)
483
484/* Scan all active GRUs in a GRU bitmap */
485#define for_each_gru_in_bitmap(gid, map) \
486 for ((gid) = find_first_bit((map), GRU_MAX_GRUS); (gid) < GRU_MAX_GRUS;\
487 (gid)++, (gid) = find_next_bit((map), GRU_MAX_GRUS, (gid)))
488
489/* Scan all active GRUs on a specific blade */
490#define for_each_gru_on_blade(gru, nid, i) \
491 for ((gru) = gru_base[nid]->bs_grus, (i) = 0; \
492 (i) < GRU_CHIPLETS_PER_BLADE; \
493 (i)++, (gru)++)
494
495/* Scan all active GTSs on a gru. Note: must hold ss_lock to use this macro. */
496#define for_each_gts_on_gru(gts, gru, ctxnum) \
497 for ((ctxnum) = 0; (ctxnum) < GRU_NUM_CCH; (ctxnum)++) \
498 if (((gts) = (gru)->gs_gts[ctxnum]))
499
500/* Scan each CBR whose bit is set in a TFM (or copy of) */
501#define for_each_cbr_in_tfm(i, map) \
502 for ((i) = find_first_bit(map, GRU_NUM_CBE); \
503 (i) < GRU_NUM_CBE; \
504 (i)++, (i) = find_next_bit(map, GRU_NUM_CBE, i))
505
506/* Scan each CBR in a CBR bitmap. Note: multiple CBRs in an allocation unit */
507#define for_each_cbr_in_allocation_map(i, map, k) \
508 for ((k) = find_first_bit(map, GRU_CBR_AU); (k) < GRU_CBR_AU; \
509 (k) = find_next_bit(map, GRU_CBR_AU, (k) + 1)) \
510 for ((i) = (k)*GRU_CBR_AU_SIZE; \
511 (i) < ((k) + 1) * GRU_CBR_AU_SIZE; (i)++)
512
513/* Scan each DSR in a DSR bitmap. Note: multiple DSRs in an allocation unit */
514#define for_each_dsr_in_allocation_map(i, map, k) \
515 for ((k) = find_first_bit((const unsigned long *)map, GRU_DSR_AU);\
516 (k) < GRU_DSR_AU; \
517 (k) = find_next_bit((const unsigned long *)map, \
518 GRU_DSR_AU, (k) + 1)) \
519 for ((i) = (k) * GRU_DSR_AU_CL; \
520 (i) < ((k) + 1) * GRU_DSR_AU_CL; (i)++)
521
522#define gseg_physical_address(gru, ctxnum) \
523 ((gru)->gs_gru_base_paddr + ctxnum * GRU_GSEG_STRIDE)
524#define gseg_virtual_address(gru, ctxnum) \
525 ((gru)->gs_gru_base_vaddr + ctxnum * GRU_GSEG_STRIDE)
526
527/*-----------------------------------------------------------------------------
528 * Lock / Unlock GRU handles
529 * Use the "delresp" bit in the handle as a "lock" bit.
530 */
531
532/* Lock hierarchy checking enabled only in emulator */
533
534static inline void __lock_handle(void *h)
535{
536 while (test_and_set_bit(1, h))
537 cpu_relax();
538}
539
540static inline void __unlock_handle(void *h)
541{
542 clear_bit(1, h);
543}
544
545static inline void lock_cch_handle(struct gru_context_configuration_handle *cch)
546{
547 __lock_handle(cch);
548}
549
550static inline void unlock_cch_handle(struct gru_context_configuration_handle
551 *cch)
552{
553 __unlock_handle(cch);
554}
555
556static inline void lock_tgh_handle(struct gru_tlb_global_handle *tgh)
557{
558 __lock_handle(tgh);
559}
560
561static inline void unlock_tgh_handle(struct gru_tlb_global_handle *tgh)
562{
563 __unlock_handle(tgh);
564}
565
566/*-----------------------------------------------------------------------------
567 * Function prototypes & externs
568 */
569struct gru_unload_context_req;
570
571extern struct vm_operations_struct gru_vm_ops;
572extern struct device *grudev;
573
574extern struct gru_vma_data *gru_alloc_vma_data(struct vm_area_struct *vma,
575 int tsid);
576extern struct gru_thread_state *gru_find_thread_state(struct vm_area_struct
577 *vma, int tsid);
578extern struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct
579 *vma, int tsid);
580extern void gru_unload_context(struct gru_thread_state *gts, int savestate);
581extern void gts_drop(struct gru_thread_state *gts);
582extern void gru_tgh_flush_init(struct gru_state *gru);
583extern int gru_kservices_init(struct gru_state *gru);
584extern irqreturn_t gru_intr(int irq, void *dev_id);
585extern int gru_handle_user_call_os(unsigned long address);
586extern int gru_user_flush_tlb(unsigned long arg);
587extern int gru_user_unload_context(unsigned long arg);
588extern int gru_get_exception_detail(unsigned long arg);
589extern int gru_set_task_slice(long address);
590extern int gru_cpu_fault_map_id(void);
591extern struct vm_area_struct *gru_find_vma(unsigned long vaddr);
592extern void gru_flush_all_tlb(struct gru_state *gru);
593extern int gru_proc_init(void);
594extern void gru_proc_exit(void);
595
596extern unsigned long gru_reserve_cb_resources(struct gru_state *gru,
597 int cbr_au_count, char *cbmap);
598extern unsigned long gru_reserve_ds_resources(struct gru_state *gru,
599 int dsr_au_count, char *dsmap);
600extern int gru_fault(struct vm_area_struct *, struct vm_fault *vmf);
601extern struct gru_mm_struct *gru_register_mmu_notifier(void);
602extern void gru_drop_mmu_notifier(struct gru_mm_struct *gms);
603
604extern void gru_flush_tlb_range(struct gru_mm_struct *gms, unsigned long start,
605 unsigned long len);
606
607extern unsigned long gru_options;
608
609#endif /* __GRUTABLES_H__ */
diff --git a/drivers/misc/sgi-gru/grutlbpurge.c b/drivers/misc/sgi-gru/grutlbpurge.c
new file mode 100644
index 000000000000..c84496a77691
--- /dev/null
+++ b/drivers/misc/sgi-gru/grutlbpurge.c
@@ -0,0 +1,371 @@
1/*
2 * SN Platform GRU Driver
3 *
4 * MMUOPS callbacks + TLB flushing
5 *
6 * This file handles emu notifier callbacks from the core kernel. The callbacks
7 * are used to update the TLB in the GRU as a result of changes in the
8 * state of a process address space. This file also handles TLB invalidates
9 * from the GRU driver.
10 *
11 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28#include <linux/kernel.h>
29#include <linux/list.h>
30#include <linux/spinlock.h>
31#include <linux/mm.h>
32#include <linux/slab.h>
33#include <linux/device.h>
34#include <linux/hugetlb.h>
35#include <linux/delay.h>
36#include <linux/timex.h>
37#include <linux/srcu.h>
38#include <asm/processor.h>
39#include "gru.h"
40#include "grutables.h"
41#include <asm/uv/uv_hub.h>
42
43#define gru_random() get_cycles()
44
45/* ---------------------------------- TLB Invalidation functions --------
46 * get_tgh_handle
47 *
48 * Find a TGH to use for issuing a TLB invalidate. For GRUs that are on the
49 * local blade, use a fixed TGH that is a function of the blade-local cpu
50 * number. Normally, this TGH is private to the cpu & no contention occurs for
51 * the TGH. For offblade GRUs, select a random TGH in the range above the
52 * private TGHs. A spinlock is required to access this TGH & the lock must be
53 * released when the invalidate is completes. This sucks, but it is the best we
54 * can do.
55 *
56 * Note that the spinlock is IN the TGH handle so locking does not involve
57 * additional cache lines.
58 *
59 */
60static inline int get_off_blade_tgh(struct gru_state *gru)
61{
62 int n;
63
64 n = GRU_NUM_TGH - gru->gs_tgh_first_remote;
65 n = gru_random() % n;
66 n += gru->gs_tgh_first_remote;
67 return n;
68}
69
70static inline int get_on_blade_tgh(struct gru_state *gru)
71{
72 return uv_blade_processor_id() >> gru->gs_tgh_local_shift;
73}
74
75static struct gru_tlb_global_handle *get_lock_tgh_handle(struct gru_state
76 *gru)
77{
78 struct gru_tlb_global_handle *tgh;
79 int n;
80
81 preempt_disable();
82 if (uv_numa_blade_id() == gru->gs_blade_id)
83 n = get_on_blade_tgh(gru);
84 else
85 n = get_off_blade_tgh(gru);
86 tgh = get_tgh_by_index(gru, n);
87 lock_tgh_handle(tgh);
88
89 return tgh;
90}
91
92static void get_unlock_tgh_handle(struct gru_tlb_global_handle *tgh)
93{
94 unlock_tgh_handle(tgh);
95 preempt_enable();
96}
97
98/*
99 * gru_flush_tlb_range
100 *
101 * General purpose TLB invalidation function. This function scans every GRU in
102 * the ENTIRE system (partition) looking for GRUs where the specified MM has
103 * been accessed by the GRU. For each GRU found, the TLB must be invalidated OR
104 * the ASID invalidated. Invalidating an ASID causes a new ASID to be assigned
105 * on the next fault. This effectively flushes the ENTIRE TLB for the MM at the
106 * cost of (possibly) a large number of future TLBmisses.
107 *
108 * The current algorithm is optimized based on the following (somewhat true)
109 * assumptions:
110 * - GRU contexts are not loaded into a GRU unless a reference is made to
111 * the data segment or control block (this is true, not an assumption).
112 * If a DS/CB is referenced, the user will also issue instructions that
113 * cause TLBmisses. It is not necessary to optimize for the case where
114 * contexts are loaded but no instructions cause TLB misses. (I know
115 * this will happen but I'm not optimizing for it).
116 * - GRU instructions to invalidate TLB entries are SLOOOOWWW - normally
117 * a few usec but in unusual cases, it could be longer. Avoid if
118 * possible.
119 * - intrablade process migration between cpus is not frequent but is
120 * common.
121 * - a GRU context is not typically migrated to a different GRU on the
122 * blade because of intrablade migration
123 * - interblade migration is rare. Processes migrate their GRU context to
124 * the new blade.
125 * - if interblade migration occurs, migration back to the original blade
126 * is very very rare (ie., no optimization for this case)
127 * - most GRU instruction operate on a subset of the user REGIONS. Code
128 * & shared library regions are not likely targets of GRU instructions.
129 *
130 * To help improve the efficiency of TLB invalidation, the GMS data
131 * structure is maintained for EACH address space (MM struct). The GMS is
132 * also the structure that contains the pointer to the mmu callout
133 * functions. This structure is linked to the mm_struct for the address space
134 * using the mmu "register" function. The mmu interfaces are used to
135 * provide the callbacks for TLB invalidation. The GMS contains:
136 *
137 * - asid[maxgrus] array. ASIDs are assigned to a GRU when a context is
138 * loaded into the GRU.
139 * - asidmap[maxgrus]. bitmap to make it easier to find non-zero asids in
140 * the above array
141 * - ctxbitmap[maxgrus]. Indicates the contexts that are currently active
142 * in the GRU for the address space. This bitmap must be passed to the
143 * GRU to do an invalidate.
144 *
145 * The current algorithm for invalidating TLBs is:
146 * - scan the asidmap for GRUs where the context has been loaded, ie,
147 * asid is non-zero.
148 * - for each gru found:
149 * - if the ctxtmap is non-zero, there are active contexts in the
150 * GRU. TLB invalidate instructions must be issued to the GRU.
151 * - if the ctxtmap is zero, no context is active. Set the ASID to
152 * zero to force a full TLB invalidation. This is fast but will
153 * cause a lot of TLB misses if the context is reloaded onto the
154 * GRU
155 *
156 */
157
158void gru_flush_tlb_range(struct gru_mm_struct *gms, unsigned long start,
159 unsigned long len)
160{
161 struct gru_state *gru;
162 struct gru_mm_tracker *asids;
163 struct gru_tlb_global_handle *tgh;
164 unsigned long num;
165 int grupagesize, pagesize, pageshift, gid, asid;
166
167 /* ZZZ TODO - handle huge pages */
168 pageshift = PAGE_SHIFT;
169 pagesize = (1UL << pageshift);
170 grupagesize = GRU_PAGESIZE(pageshift);
171 num = min(((len + pagesize - 1) >> pageshift), GRUMAXINVAL);
172
173 STAT(flush_tlb);
174 gru_dbg(grudev, "gms %p, start 0x%lx, len 0x%lx, asidmap 0x%lx\n", gms,
175 start, len, gms->ms_asidmap[0]);
176
177 spin_lock(&gms->ms_asid_lock);
178 for_each_gru_in_bitmap(gid, gms->ms_asidmap) {
179 STAT(flush_tlb_gru);
180 gru = GID_TO_GRU(gid);
181 asids = gms->ms_asids + gid;
182 asid = asids->mt_asid;
183 if (asids->mt_ctxbitmap && asid) {
184 STAT(flush_tlb_gru_tgh);
185 asid = GRUASID(asid, start);
186 gru_dbg(grudev,
187 " FLUSH gruid %d, asid 0x%x, num %ld, cbmap 0x%x\n",
188 gid, asid, num, asids->mt_ctxbitmap);
189 tgh = get_lock_tgh_handle(gru);
190 tgh_invalidate(tgh, start, 0, asid, grupagesize, 0,
191 num - 1, asids->mt_ctxbitmap);
192 get_unlock_tgh_handle(tgh);
193 } else {
194 STAT(flush_tlb_gru_zero_asid);
195 asids->mt_asid = 0;
196 __clear_bit(gru->gs_gid, gms->ms_asidmap);
197 gru_dbg(grudev,
198 " CLEARASID gruid %d, asid 0x%x, cbtmap 0x%x, asidmap 0x%lx\n",
199 gid, asid, asids->mt_ctxbitmap,
200 gms->ms_asidmap[0]);
201 }
202 }
203 spin_unlock(&gms->ms_asid_lock);
204}
205
206/*
207 * Flush the entire TLB on a chiplet.
208 */
209void gru_flush_all_tlb(struct gru_state *gru)
210{
211 struct gru_tlb_global_handle *tgh;
212
213 gru_dbg(grudev, "gru %p, gid %d\n", gru, gru->gs_gid);
214 tgh = get_lock_tgh_handle(gru);
215 tgh_invalidate(tgh, 0, ~0, 0, 1, 1, GRUMAXINVAL - 1, 0);
216 get_unlock_tgh_handle(tgh);
217 preempt_enable();
218}
219
220/*
221 * MMUOPS notifier callout functions
222 */
223static void gru_invalidate_range_start(struct mmu_notifier *mn,
224 struct mm_struct *mm,
225 unsigned long start, unsigned long end)
226{
227 struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct,
228 ms_notifier);
229
230 STAT(mmu_invalidate_range);
231 atomic_inc(&gms->ms_range_active);
232 gru_dbg(grudev, "gms %p, start 0x%lx, end 0x%lx, act %d\n", gms,
233 start, end, atomic_read(&gms->ms_range_active));
234 gru_flush_tlb_range(gms, start, end - start);
235}
236
237static void gru_invalidate_range_end(struct mmu_notifier *mn,
238 struct mm_struct *mm, unsigned long start,
239 unsigned long end)
240{
241 struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct,
242 ms_notifier);
243
244 /* ..._and_test() provides needed barrier */
245 (void)atomic_dec_and_test(&gms->ms_range_active);
246
247 wake_up_all(&gms->ms_wait_queue);
248 gru_dbg(grudev, "gms %p, start 0x%lx, end 0x%lx\n", gms, start, end);
249}
250
251static void gru_invalidate_page(struct mmu_notifier *mn, struct mm_struct *mm,
252 unsigned long address)
253{
254 struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct,
255 ms_notifier);
256
257 STAT(mmu_invalidate_page);
258 gru_flush_tlb_range(gms, address, PAGE_SIZE);
259 gru_dbg(grudev, "gms %p, address 0x%lx\n", gms, address);
260}
261
262static void gru_release(struct mmu_notifier *mn, struct mm_struct *mm)
263{
264 struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct,
265 ms_notifier);
266
267 gms->ms_released = 1;
268 gru_dbg(grudev, "gms %p\n", gms);
269}
270
271
272static const struct mmu_notifier_ops gru_mmuops = {
273 .invalidate_page = gru_invalidate_page,
274 .invalidate_range_start = gru_invalidate_range_start,
275 .invalidate_range_end = gru_invalidate_range_end,
276 .release = gru_release,
277};
278
279/* Move this to the basic mmu_notifier file. But for now... */
280static struct mmu_notifier *mmu_find_ops(struct mm_struct *mm,
281 const struct mmu_notifier_ops *ops)
282{
283 struct mmu_notifier *mn, *gru_mn = NULL;
284 struct hlist_node *n;
285
286 if (mm->mmu_notifier_mm) {
287 rcu_read_lock();
288 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list,
289 hlist)
290 if (mn->ops == ops) {
291 gru_mn = mn;
292 break;
293 }
294 rcu_read_unlock();
295 }
296 return gru_mn;
297}
298
299struct gru_mm_struct *gru_register_mmu_notifier(void)
300{
301 struct gru_mm_struct *gms;
302 struct mmu_notifier *mn;
303
304 mn = mmu_find_ops(current->mm, &gru_mmuops);
305 if (mn) {
306 gms = container_of(mn, struct gru_mm_struct, ms_notifier);
307 atomic_inc(&gms->ms_refcnt);
308 } else {
309 gms = kzalloc(sizeof(*gms), GFP_KERNEL);
310 if (gms) {
311 spin_lock_init(&gms->ms_asid_lock);
312 gms->ms_notifier.ops = &gru_mmuops;
313 atomic_set(&gms->ms_refcnt, 1);
314 init_waitqueue_head(&gms->ms_wait_queue);
315 __mmu_notifier_register(&gms->ms_notifier, current->mm);
316 }
317 }
318 gru_dbg(grudev, "gms %p, refcnt %d\n", gms,
319 atomic_read(&gms->ms_refcnt));
320 return gms;
321}
322
323void gru_drop_mmu_notifier(struct gru_mm_struct *gms)
324{
325 gru_dbg(grudev, "gms %p, refcnt %d, released %d\n", gms,
326 atomic_read(&gms->ms_refcnt), gms->ms_released);
327 if (atomic_dec_return(&gms->ms_refcnt) == 0) {
328 if (!gms->ms_released)
329 mmu_notifier_unregister(&gms->ms_notifier, current->mm);
330 kfree(gms);
331 }
332}
333
334/*
335 * Setup TGH parameters. There are:
336 * - 24 TGH handles per GRU chiplet
337 * - a portion (MAX_LOCAL_TGH) of the handles are reserved for
338 * use by blade-local cpus
339 * - the rest are used by off-blade cpus. This usage is
340 * less frequent than blade-local usage.
341 *
342 * For now, use 16 handles for local flushes, 8 for remote flushes. If the blade
343 * has less tan or equal to 16 cpus, each cpu has a unique handle that it can
344 * use.
345 */
346#define MAX_LOCAL_TGH 16
347
348void gru_tgh_flush_init(struct gru_state *gru)
349{
350 int cpus, shift = 0, n;
351
352 cpus = uv_blade_nr_possible_cpus(gru->gs_blade_id);
353
354 /* n = cpus rounded up to next power of 2 */
355 if (cpus) {
356 n = 1 << fls(cpus - 1);
357
358 /*
359 * shift count for converting local cpu# to TGH index
360 * 0 if cpus <= MAX_LOCAL_TGH,
361 * 1 if cpus <= 2*MAX_LOCAL_TGH,
362 * etc
363 */
364 shift = max(0, fls(n - 1) - fls(MAX_LOCAL_TGH - 1));
365 }
366 gru->gs_tgh_local_shift = shift;
367
368 /* first starting TGH index to use for remote purges */
369 gru->gs_tgh_first_remote = (cpus + (1 << shift) - 1) >> shift;
370
371}
diff --git a/drivers/misc/sgi-xp/Makefile b/drivers/misc/sgi-xp/Makefile
index b6e40a7958ce..35ce28578075 100644
--- a/drivers/misc/sgi-xp/Makefile
+++ b/drivers/misc/sgi-xp/Makefile
@@ -3,9 +3,17 @@
3# 3#
4 4
5obj-$(CONFIG_SGI_XP) += xp.o 5obj-$(CONFIG_SGI_XP) += xp.o
6xp-y := xp_main.o xp_nofault.o 6xp-y := xp_main.o
7xp-$(CONFIG_IA64_SGI_SN2) += xp_sn2.o xp_nofault.o
8xp-$(CONFIG_IA64_GENERIC) += xp_sn2.o xp_nofault.o xp_uv.o
9xp-$(CONFIG_IA64_SGI_UV) += xp_uv.o
10xp-$(CONFIG_X86_64) += xp_uv.o
7 11
8obj-$(CONFIG_SGI_XP) += xpc.o 12obj-$(CONFIG_SGI_XP) += xpc.o
9xpc-y := xpc_main.o xpc_channel.o xpc_partition.o 13xpc-y := xpc_main.o xpc_channel.o xpc_partition.o
14xpc-$(CONFIG_IA64_SGI_SN2) += xpc_sn2.o
15xpc-$(CONFIG_IA64_GENERIC) += xpc_sn2.o xpc_uv.o
16xpc-$(CONFIG_IA64_SGI_UV) += xpc_uv.o
17xpc-$(CONFIG_X86_64) += xpc_uv.o
10 18
11obj-$(CONFIG_SGI_XP) += xpnet.o 19obj-$(CONFIG_SGI_XP) += xpnet.o
diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
index 03a87a307e32..859a5281c61b 100644
--- a/drivers/misc/sgi-xp/xp.h
+++ b/drivers/misc/sgi-xp/xp.h
@@ -13,11 +13,34 @@
13#ifndef _DRIVERS_MISC_SGIXP_XP_H 13#ifndef _DRIVERS_MISC_SGIXP_XP_H
14#define _DRIVERS_MISC_SGIXP_XP_H 14#define _DRIVERS_MISC_SGIXP_XP_H
15 15
16#include <linux/cache.h>
17#include <linux/hardirq.h>
18#include <linux/mutex.h> 16#include <linux/mutex.h>
19#include <asm/sn/types.h> 17
20#include <asm/sn/bte.h> 18#ifdef CONFIG_IA64
19#include <asm/system.h>
20#include <asm/sn/arch.h> /* defines is_shub1() and is_shub2() */
21#define is_shub() ia64_platform_is("sn2")
22#define is_uv() ia64_platform_is("uv")
23#endif
24#ifdef CONFIG_X86_64
25#include <asm/genapic.h>
26#define is_uv() is_uv_system()
27#endif
28
29#ifndef is_shub1
30#define is_shub1() 0
31#endif
32
33#ifndef is_shub2
34#define is_shub2() 0
35#endif
36
37#ifndef is_shub
38#define is_shub() 0
39#endif
40
41#ifndef is_uv
42#define is_uv() 0
43#endif
21 44
22#ifdef USE_DBUG_ON 45#ifdef USE_DBUG_ON
23#define DBUG_ON(condition) BUG_ON(condition) 46#define DBUG_ON(condition) BUG_ON(condition)
@@ -26,133 +49,56 @@
26#endif 49#endif
27 50
28/* 51/*
29 * Define the maximum number of logically defined partitions the system 52 * Define the maximum number of partitions the system can possibly support.
30 * can support. It is constrained by the maximum number of hardware 53 * It is based on the maximum number of hardware partitionable regions. The
31 * partitionable regions. The term 'region' in this context refers to the 54 * term 'region' in this context refers to the minimum number of nodes that
32 * minimum number of nodes that can comprise an access protection grouping. 55 * can comprise an access protection grouping. The access protection is in
33 * The access protection is in regards to memory, IPI and IOI. 56 * regards to memory, IPI and IOI.
34 * 57 *
35 * The maximum number of hardware partitionable regions is equal to the 58 * The maximum number of hardware partitionable regions is equal to the
36 * maximum number of nodes in the entire system divided by the minimum number 59 * maximum number of nodes in the entire system divided by the minimum number
37 * of nodes that comprise an access protection grouping. 60 * of nodes that comprise an access protection grouping.
38 */ 61 */
39#define XP_MAX_PARTITIONS 64 62#define XP_MAX_NPARTITIONS_SN2 64
40 63#define XP_MAX_NPARTITIONS_UV 256
41/*
42 * Define the number of u64s required to represent all the C-brick nasids
43 * as a bitmap. The cross-partition kernel modules deal only with
44 * C-brick nasids, thus the need for bitmaps which don't account for
45 * odd-numbered (non C-brick) nasids.
46 */
47#define XP_MAX_PHYSNODE_ID (MAX_NUMALINK_NODES / 2)
48#define XP_NASID_MASK_BYTES ((XP_MAX_PHYSNODE_ID + 7) / 8)
49#define XP_NASID_MASK_WORDS ((XP_MAX_PHYSNODE_ID + 63) / 64)
50
51/*
52 * Wrapper for bte_copy() that should it return a failure status will retry
53 * the bte_copy() once in the hope that the failure was due to a temporary
54 * aberration (i.e., the link going down temporarily).
55 *
56 * src - physical address of the source of the transfer.
57 * vdst - virtual address of the destination of the transfer.
58 * len - number of bytes to transfer from source to destination.
59 * mode - see bte_copy() for definition.
60 * notification - see bte_copy() for definition.
61 *
62 * Note: xp_bte_copy() should never be called while holding a spinlock.
63 */
64static inline bte_result_t
65xp_bte_copy(u64 src, u64 vdst, u64 len, u64 mode, void *notification)
66{
67 bte_result_t ret;
68 u64 pdst = ia64_tpa(vdst);
69
70 /*
71 * Ensure that the physically mapped memory is contiguous.
72 *
73 * We do this by ensuring that the memory is from region 7 only.
74 * If the need should arise to use memory from one of the other
75 * regions, then modify the BUG_ON() statement to ensure that the
76 * memory from that region is always physically contiguous.
77 */
78 BUG_ON(REGION_NUMBER(vdst) != RGN_KERNEL);
79
80 ret = bte_copy(src, pdst, len, mode, notification);
81 if ((ret != BTE_SUCCESS) && BTE_ERROR_RETRY(ret)) {
82 if (!in_interrupt())
83 cond_resched();
84
85 ret = bte_copy(src, pdst, len, mode, notification);
86 }
87
88 return ret;
89}
90 64
91/* 65/*
92 * XPC establishes channel connections between the local partition and any 66 * XPC establishes channel connections between the local partition and any
93 * other partition that is currently up. Over these channels, kernel-level 67 * other partition that is currently up. Over these channels, kernel-level
94 * `users' can communicate with their counterparts on the other partitions. 68 * `users' can communicate with their counterparts on the other partitions.
95 * 69 *
96 * The maxinum number of channels is limited to eight. For performance reasons,
97 * the internal cross partition structures require sixteen bytes per channel,
98 * and eight allows all of this interface-shared info to fit in one cache line.
99 *
100 * XPC_NCHANNELS reflects the total number of channels currently defined.
101 * If the need for additional channels arises, one can simply increase 70 * If the need for additional channels arises, one can simply increase
102 * XPC_NCHANNELS accordingly. If the day should come where that number 71 * XPC_MAX_NCHANNELS accordingly. If the day should come where that number
103 * exceeds the MAXIMUM number of channels allowed (eight), then one will need 72 * exceeds the absolute MAXIMUM number of channels possible (eight), then one
104 * to make changes to the XPC code to allow for this. 73 * will need to make changes to the XPC code to accommodate for this.
74 *
75 * The absolute maximum number of channels possible is limited to eight for
76 * performance reasons on sn2 hardware. The internal cross partition structures
77 * require sixteen bytes per channel, and eight allows all of this
78 * interface-shared info to fit in one 128-byte cacheline.
105 */ 79 */
106#define XPC_MEM_CHANNEL 0 /* memory channel number */ 80#define XPC_MEM_CHANNEL 0 /* memory channel number */
107#define XPC_NET_CHANNEL 1 /* network channel number */ 81#define XPC_NET_CHANNEL 1 /* network channel number */
108 82
109#define XPC_NCHANNELS 2 /* #of defined channels */ 83#define XPC_MAX_NCHANNELS 2 /* max #of channels allowed */
110#define XPC_MAX_NCHANNELS 8 /* max #of channels allowed */
111 84
112#if XPC_NCHANNELS > XPC_MAX_NCHANNELS 85#if XPC_MAX_NCHANNELS > 8
113#error XPC_NCHANNELS exceeds MAXIMUM allowed. 86#error XPC_MAX_NCHANNELS exceeds absolute MAXIMUM possible.
114#endif 87#endif
115 88
116/* 89/*
117 * The format of an XPC message is as follows: 90 * Define macro, XPC_MSG_SIZE(), is provided for the user
118 *
119 * +-------+--------------------------------+
120 * | flags |////////////////////////////////|
121 * +-------+--------------------------------+
122 * | message # |
123 * +----------------------------------------+
124 * | payload (user-defined message) |
125 * | |
126 * :
127 * | |
128 * +----------------------------------------+
129 *
130 * The size of the payload is defined by the user via xpc_connect(). A user-
131 * defined message resides in the payload area.
132 *
133 * The user should have no dealings with the message header, but only the
134 * message's payload. When a message entry is allocated (via xpc_allocate())
135 * a pointer to the payload area is returned and not the actual beginning of
136 * the XPC message. The user then constructs a message in the payload area
137 * and passes that pointer as an argument on xpc_send() or xpc_send_notify().
138 *
139 * The size of a message entry (within a message queue) must be a cacheline
140 * sized multiple in order to facilitate the BTE transfer of messages from one
141 * message queue to another. A macro, XPC_MSG_SIZE(), is provided for the user
142 * that wants to fit as many msg entries as possible in a given memory size 91 * that wants to fit as many msg entries as possible in a given memory size
143 * (e.g. a memory page). 92 * (e.g. a memory page).
144 */ 93 */
145struct xpc_msg { 94#define XPC_MSG_MAX_SIZE 128
146 u8 flags; /* FOR XPC INTERNAL USE ONLY */ 95#define XPC_MSG_HDR_MAX_SIZE 16
147 u8 reserved[7]; /* FOR XPC INTERNAL USE ONLY */ 96#define XPC_MSG_PAYLOAD_MAX_SIZE (XPC_MSG_MAX_SIZE - XPC_MSG_HDR_MAX_SIZE)
148 s64 number; /* FOR XPC INTERNAL USE ONLY */
149
150 u64 payload; /* user defined portion of message */
151};
152 97
153#define XPC_MSG_PAYLOAD_OFFSET (u64) (&((struct xpc_msg *)0)->payload)
154#define XPC_MSG_SIZE(_payload_size) \ 98#define XPC_MSG_SIZE(_payload_size) \
155 L1_CACHE_ALIGN(XPC_MSG_PAYLOAD_OFFSET + (_payload_size)) 99 ALIGN(XPC_MSG_HDR_MAX_SIZE + (_payload_size), \
100 is_uv() ? 64 : 128)
101
156 102
157/* 103/*
158 * Define the return values and values passed to user's callout functions. 104 * Define the return values and values passed to user's callout functions.
@@ -233,8 +179,20 @@ enum xp_retval {
233 xpDisconnected, /* 51: channel disconnected (closed) */ 179 xpDisconnected, /* 51: channel disconnected (closed) */
234 180
235 xpBteCopyError, /* 52: bte_copy() returned error */ 181 xpBteCopyError, /* 52: bte_copy() returned error */
182 xpSalError, /* 53: sn SAL error */
183 xpRsvdPageNotSet, /* 54: the reserved page is not set up */
184 xpPayloadTooBig, /* 55: payload too large for message slot */
185
186 xpUnsupported, /* 56: unsupported functionality or resource */
187 xpNeedMoreInfo, /* 57: more info is needed by SAL */
236 188
237 xpUnknownReason /* 53: unknown reason - must be last in enum */ 189 xpGruCopyError, /* 58: gru_copy_gru() returned error */
190 xpGruSendMqError, /* 59: gru send message queue related error */
191
192 xpBadChannelNumber, /* 60: invalid channel number */
193 xpBadMsgType, /* 60: invalid message type */
194
195 xpUnknownReason /* 61: unknown reason - must be last in enum */
238}; 196};
239 197
240/* 198/*
@@ -285,6 +243,9 @@ typedef void (*xpc_channel_func) (enum xp_retval reason, short partid,
285 * calling xpc_received(). 243 * calling xpc_received().
286 * 244 *
287 * All other reason codes indicate failure. 245 * All other reason codes indicate failure.
246 *
247 * NOTE: The user defined function must be callable by an interrupt handler
248 * and thus cannot block.
288 */ 249 */
289typedef void (*xpc_notify_func) (enum xp_retval reason, short partid, 250typedef void (*xpc_notify_func) (enum xp_retval reason, short partid,
290 int ch_number, void *key); 251 int ch_number, void *key);
@@ -308,23 +269,22 @@ struct xpc_registration {
308 xpc_channel_func func; /* function to call */ 269 xpc_channel_func func; /* function to call */
309 void *key; /* pointer to user's key */ 270 void *key; /* pointer to user's key */
310 u16 nentries; /* #of msg entries in local msg queue */ 271 u16 nentries; /* #of msg entries in local msg queue */
311 u16 msg_size; /* message queue's message size */ 272 u16 entry_size; /* message queue's message entry size */
312 u32 assigned_limit; /* limit on #of assigned kthreads */ 273 u32 assigned_limit; /* limit on #of assigned kthreads */
313 u32 idle_limit; /* limit on #of idle kthreads */ 274 u32 idle_limit; /* limit on #of idle kthreads */
314} ____cacheline_aligned; 275} ____cacheline_aligned;
315 276
316#define XPC_CHANNEL_REGISTERED(_c) (xpc_registrations[_c].func != NULL) 277#define XPC_CHANNEL_REGISTERED(_c) (xpc_registrations[_c].func != NULL)
317 278
318/* the following are valid xpc_allocate() flags */ 279/* the following are valid xpc_send() or xpc_send_notify() flags */
319#define XPC_WAIT 0 /* wait flag */ 280#define XPC_WAIT 0 /* wait flag */
320#define XPC_NOWAIT 1 /* no wait flag */ 281#define XPC_NOWAIT 1 /* no wait flag */
321 282
322struct xpc_interface { 283struct xpc_interface {
323 void (*connect) (int); 284 void (*connect) (int);
324 void (*disconnect) (int); 285 void (*disconnect) (int);
325 enum xp_retval (*allocate) (short, int, u32, void **); 286 enum xp_retval (*send) (short, int, u32, void *, u16);
326 enum xp_retval (*send) (short, int, void *); 287 enum xp_retval (*send_notify) (short, int, u32, void *, u16,
327 enum xp_retval (*send_notify) (short, int, void *,
328 xpc_notify_func, void *); 288 xpc_notify_func, void *);
329 void (*received) (short, int, void *); 289 void (*received) (short, int, void *);
330 enum xp_retval (*partid_to_nasids) (short, void *); 290 enum xp_retval (*partid_to_nasids) (short, void *);
@@ -334,10 +294,9 @@ extern struct xpc_interface xpc_interface;
334 294
335extern void xpc_set_interface(void (*)(int), 295extern void xpc_set_interface(void (*)(int),
336 void (*)(int), 296 void (*)(int),
337 enum xp_retval (*)(short, int, u32, void **), 297 enum xp_retval (*)(short, int, u32, void *, u16),
338 enum xp_retval (*)(short, int, void *), 298 enum xp_retval (*)(short, int, u32, void *, u16,
339 enum xp_retval (*)(short, int, void *, 299 xpc_notify_func, void *),
340 xpc_notify_func, void *),
341 void (*)(short, int, void *), 300 void (*)(short, int, void *),
342 enum xp_retval (*)(short, void *)); 301 enum xp_retval (*)(short, void *));
343extern void xpc_clear_interface(void); 302extern void xpc_clear_interface(void);
@@ -347,22 +306,19 @@ extern enum xp_retval xpc_connect(int, xpc_channel_func, void *, u16,
347extern void xpc_disconnect(int); 306extern void xpc_disconnect(int);
348 307
349static inline enum xp_retval 308static inline enum xp_retval
350xpc_allocate(short partid, int ch_number, u32 flags, void **payload) 309xpc_send(short partid, int ch_number, u32 flags, void *payload,
351{ 310 u16 payload_size)
352 return xpc_interface.allocate(partid, ch_number, flags, payload);
353}
354
355static inline enum xp_retval
356xpc_send(short partid, int ch_number, void *payload)
357{ 311{
358 return xpc_interface.send(partid, ch_number, payload); 312 return xpc_interface.send(partid, ch_number, flags, payload,
313 payload_size);
359} 314}
360 315
361static inline enum xp_retval 316static inline enum xp_retval
362xpc_send_notify(short partid, int ch_number, void *payload, 317xpc_send_notify(short partid, int ch_number, u32 flags, void *payload,
363 xpc_notify_func func, void *key) 318 u16 payload_size, xpc_notify_func func, void *key)
364{ 319{
365 return xpc_interface.send_notify(partid, ch_number, payload, func, key); 320 return xpc_interface.send_notify(partid, ch_number, flags, payload,
321 payload_size, func, key);
366} 322}
367 323
368static inline void 324static inline void
@@ -377,8 +333,23 @@ xpc_partid_to_nasids(short partid, void *nasids)
377 return xpc_interface.partid_to_nasids(partid, nasids); 333 return xpc_interface.partid_to_nasids(partid, nasids);
378} 334}
379 335
336extern short xp_max_npartitions;
337extern short xp_partition_id;
338extern u8 xp_region_size;
339
340extern unsigned long (*xp_pa) (void *);
341extern enum xp_retval (*xp_remote_memcpy) (unsigned long, const unsigned long,
342 size_t);
343extern int (*xp_cpu_to_nasid) (int);
344
380extern u64 xp_nofault_PIOR_target; 345extern u64 xp_nofault_PIOR_target;
381extern int xp_nofault_PIOR(void *); 346extern int xp_nofault_PIOR(void *);
382extern int xp_error_PIOR(void); 347extern int xp_error_PIOR(void);
383 348
349extern struct device *xp;
350extern enum xp_retval xp_init_sn2(void);
351extern enum xp_retval xp_init_uv(void);
352extern void xp_exit_sn2(void);
353extern void xp_exit_uv(void);
354
384#endif /* _DRIVERS_MISC_SGIXP_XP_H */ 355#endif /* _DRIVERS_MISC_SGIXP_XP_H */
diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
index 196480b691a1..66a1d19e08ad 100644
--- a/drivers/misc/sgi-xp/xp_main.c
+++ b/drivers/misc/sgi-xp/xp_main.c
@@ -14,29 +14,48 @@
14 * 14 *
15 */ 15 */
16 16
17#include <linux/kernel.h>
18#include <linux/interrupt.h>
19#include <linux/module.h> 17#include <linux/module.h>
20#include <linux/mutex.h> 18#include <linux/device.h>
21#include <asm/sn/intr.h>
22#include <asm/sn/sn_sal.h>
23#include "xp.h" 19#include "xp.h"
24 20
25/* 21/* define the XP debug device structures to be used with dev_dbg() et al */
26 * The export of xp_nofault_PIOR needs to happen here since it is defined 22
27 * in drivers/misc/sgi-xp/xp_nofault.S. The target of the nofault read is 23struct device_driver xp_dbg_name = {
28 * defined here. 24 .name = "xp"
29 */ 25};
30EXPORT_SYMBOL_GPL(xp_nofault_PIOR); 26
27struct device xp_dbg_subname = {
28 .bus_id = {0}, /* set to "" */
29 .driver = &xp_dbg_name
30};
31
32struct device *xp = &xp_dbg_subname;
33
34/* max #of partitions possible */
35short xp_max_npartitions;
36EXPORT_SYMBOL_GPL(xp_max_npartitions);
37
38short xp_partition_id;
39EXPORT_SYMBOL_GPL(xp_partition_id);
40
41u8 xp_region_size;
42EXPORT_SYMBOL_GPL(xp_region_size);
43
44unsigned long (*xp_pa) (void *addr);
45EXPORT_SYMBOL_GPL(xp_pa);
46
47enum xp_retval (*xp_remote_memcpy) (unsigned long dst_gpa,
48 const unsigned long src_gpa, size_t len);
49EXPORT_SYMBOL_GPL(xp_remote_memcpy);
31 50
32u64 xp_nofault_PIOR_target; 51int (*xp_cpu_to_nasid) (int cpuid);
33EXPORT_SYMBOL_GPL(xp_nofault_PIOR_target); 52EXPORT_SYMBOL_GPL(xp_cpu_to_nasid);
34 53
35/* 54/*
36 * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level 55 * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level
37 * users of XPC. 56 * users of XPC.
38 */ 57 */
39struct xpc_registration xpc_registrations[XPC_NCHANNELS]; 58struct xpc_registration xpc_registrations[XPC_MAX_NCHANNELS];
40EXPORT_SYMBOL_GPL(xpc_registrations); 59EXPORT_SYMBOL_GPL(xpc_registrations);
41 60
42/* 61/*
@@ -51,10 +70,9 @@ xpc_notloaded(void)
51struct xpc_interface xpc_interface = { 70struct xpc_interface xpc_interface = {
52 (void (*)(int))xpc_notloaded, 71 (void (*)(int))xpc_notloaded,
53 (void (*)(int))xpc_notloaded, 72 (void (*)(int))xpc_notloaded,
54 (enum xp_retval(*)(short, int, u32, void **))xpc_notloaded, 73 (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
55 (enum xp_retval(*)(short, int, void *))xpc_notloaded, 74 (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
56 (enum xp_retval(*)(short, int, void *, xpc_notify_func, void *)) 75 void *))xpc_notloaded,
57 xpc_notloaded,
58 (void (*)(short, int, void *))xpc_notloaded, 76 (void (*)(short, int, void *))xpc_notloaded,
59 (enum xp_retval(*)(short, void *))xpc_notloaded 77 (enum xp_retval(*)(short, void *))xpc_notloaded
60}; 78};
@@ -66,16 +84,14 @@ EXPORT_SYMBOL_GPL(xpc_interface);
66void 84void
67xpc_set_interface(void (*connect) (int), 85xpc_set_interface(void (*connect) (int),
68 void (*disconnect) (int), 86 void (*disconnect) (int),
69 enum xp_retval (*allocate) (short, int, u32, void **), 87 enum xp_retval (*send) (short, int, u32, void *, u16),
70 enum xp_retval (*send) (short, int, void *), 88 enum xp_retval (*send_notify) (short, int, u32, void *, u16,
71 enum xp_retval (*send_notify) (short, int, void *,
72 xpc_notify_func, void *), 89 xpc_notify_func, void *),
73 void (*received) (short, int, void *), 90 void (*received) (short, int, void *),
74 enum xp_retval (*partid_to_nasids) (short, void *)) 91 enum xp_retval (*partid_to_nasids) (short, void *))
75{ 92{
76 xpc_interface.connect = connect; 93 xpc_interface.connect = connect;
77 xpc_interface.disconnect = disconnect; 94 xpc_interface.disconnect = disconnect;
78 xpc_interface.allocate = allocate;
79 xpc_interface.send = send; 95 xpc_interface.send = send;
80 xpc_interface.send_notify = send_notify; 96 xpc_interface.send_notify = send_notify;
81 xpc_interface.received = received; 97 xpc_interface.received = received;
@@ -91,13 +107,11 @@ xpc_clear_interface(void)
91{ 107{
92 xpc_interface.connect = (void (*)(int))xpc_notloaded; 108 xpc_interface.connect = (void (*)(int))xpc_notloaded;
93 xpc_interface.disconnect = (void (*)(int))xpc_notloaded; 109 xpc_interface.disconnect = (void (*)(int))xpc_notloaded;
94 xpc_interface.allocate = (enum xp_retval(*)(short, int, u32, 110 xpc_interface.send = (enum xp_retval(*)(short, int, u32, void *, u16))
95 void **))xpc_notloaded;
96 xpc_interface.send = (enum xp_retval(*)(short, int, void *))
97 xpc_notloaded; 111 xpc_notloaded;
98 xpc_interface.send_notify = (enum xp_retval(*)(short, int, void *, 112 xpc_interface.send_notify = (enum xp_retval(*)(short, int, u32, void *,
99 xpc_notify_func, 113 u16, xpc_notify_func,
100 void *))xpc_notloaded; 114 void *))xpc_notloaded;
101 xpc_interface.received = (void (*)(short, int, void *)) 115 xpc_interface.received = (void (*)(short, int, void *))
102 xpc_notloaded; 116 xpc_notloaded;
103 xpc_interface.partid_to_nasids = (enum xp_retval(*)(short, void *)) 117 xpc_interface.partid_to_nasids = (enum xp_retval(*)(short, void *))
@@ -135,11 +149,14 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
135{ 149{
136 struct xpc_registration *registration; 150 struct xpc_registration *registration;
137 151
138 DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); 152 DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS);
139 DBUG_ON(payload_size == 0 || nentries == 0); 153 DBUG_ON(payload_size == 0 || nentries == 0);
140 DBUG_ON(func == NULL); 154 DBUG_ON(func == NULL);
141 DBUG_ON(assigned_limit == 0 || idle_limit > assigned_limit); 155 DBUG_ON(assigned_limit == 0 || idle_limit > assigned_limit);
142 156
157 if (XPC_MSG_SIZE(payload_size) > XPC_MSG_MAX_SIZE)
158 return xpPayloadTooBig;
159
143 registration = &xpc_registrations[ch_number]; 160 registration = &xpc_registrations[ch_number];
144 161
145 if (mutex_lock_interruptible(&registration->mutex) != 0) 162 if (mutex_lock_interruptible(&registration->mutex) != 0)
@@ -152,7 +169,7 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
152 } 169 }
153 170
154 /* register the channel for connection */ 171 /* register the channel for connection */
155 registration->msg_size = XPC_MSG_SIZE(payload_size); 172 registration->entry_size = XPC_MSG_SIZE(payload_size);
156 registration->nentries = nentries; 173 registration->nentries = nentries;
157 registration->assigned_limit = assigned_limit; 174 registration->assigned_limit = assigned_limit;
158 registration->idle_limit = idle_limit; 175 registration->idle_limit = idle_limit;
@@ -185,7 +202,7 @@ xpc_disconnect(int ch_number)
185{ 202{
186 struct xpc_registration *registration; 203 struct xpc_registration *registration;
187 204
188 DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); 205 DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS);
189 206
190 registration = &xpc_registrations[ch_number]; 207 registration = &xpc_registrations[ch_number];
191 208
@@ -206,7 +223,7 @@ xpc_disconnect(int ch_number)
206 registration->func = NULL; 223 registration->func = NULL;
207 registration->key = NULL; 224 registration->key = NULL;
208 registration->nentries = 0; 225 registration->nentries = 0;
209 registration->msg_size = 0; 226 registration->entry_size = 0;
210 registration->assigned_limit = 0; 227 registration->assigned_limit = 0;
211 registration->idle_limit = 0; 228 registration->idle_limit = 0;
212 229
@@ -221,39 +238,21 @@ EXPORT_SYMBOL_GPL(xpc_disconnect);
221int __init 238int __init
222xp_init(void) 239xp_init(void)
223{ 240{
224 int ret, ch_number; 241 enum xp_retval ret;
225 u64 func_addr = *(u64 *)xp_nofault_PIOR; 242 int ch_number;
226 u64 err_func_addr = *(u64 *)xp_error_PIOR;
227
228 if (!ia64_platform_is("sn2"))
229 return -ENODEV;
230 243
231 /* 244 if (is_shub())
232 * Register a nofault code region which performs a cross-partition 245 ret = xp_init_sn2();
233 * PIO read. If the PIO read times out, the MCA handler will consume 246 else if (is_uv())
234 * the error and return to a kernel-provided instruction to indicate 247 ret = xp_init_uv();
235 * an error. This PIO read exists because it is guaranteed to timeout
236 * if the destination is down (AMO operations do not timeout on at
237 * least some CPUs on Shubs <= v1.2, which unfortunately we have to
238 * work around).
239 */
240 ret = sn_register_nofault_code(func_addr, err_func_addr, err_func_addr,
241 1, 1);
242 if (ret != 0) {
243 printk(KERN_ERR "XP: can't register nofault code, error=%d\n",
244 ret);
245 }
246 /*
247 * Setup the nofault PIO read target. (There is no special reason why
248 * SH_IPI_ACCESS was selected.)
249 */
250 if (is_shub2())
251 xp_nofault_PIOR_target = SH2_IPI_ACCESS0;
252 else 248 else
253 xp_nofault_PIOR_target = SH1_IPI_ACCESS; 249 ret = xpUnsupported;
250
251 if (ret != xpSuccess)
252 return -ENODEV;
254 253
255 /* initialize the connection registration mutex */ 254 /* initialize the connection registration mutex */
256 for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++) 255 for (ch_number = 0; ch_number < XPC_MAX_NCHANNELS; ch_number++)
257 mutex_init(&xpc_registrations[ch_number].mutex); 256 mutex_init(&xpc_registrations[ch_number].mutex);
258 257
259 return 0; 258 return 0;
@@ -264,12 +263,10 @@ module_init(xp_init);
264void __exit 263void __exit
265xp_exit(void) 264xp_exit(void)
266{ 265{
267 u64 func_addr = *(u64 *)xp_nofault_PIOR; 266 if (is_shub())
268 u64 err_func_addr = *(u64 *)xp_error_PIOR; 267 xp_exit_sn2();
269 268 else if (is_uv())
270 /* unregister the PIO read nofault code region */ 269 xp_exit_uv();
271 (void)sn_register_nofault_code(func_addr, err_func_addr,
272 err_func_addr, 1, 0);
273} 270}
274 271
275module_exit(xp_exit); 272module_exit(xp_exit);
diff --git a/drivers/misc/sgi-xp/xp_sn2.c b/drivers/misc/sgi-xp/xp_sn2.c
new file mode 100644
index 000000000000..1440134caf31
--- /dev/null
+++ b/drivers/misc/sgi-xp/xp_sn2.c
@@ -0,0 +1,146 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
7 */
8
9/*
10 * Cross Partition (XP) sn2-based functions.
11 *
12 * Architecture specific implementation of common functions.
13 */
14
15#include <linux/module.h>
16#include <linux/device.h>
17#include <asm/sn/bte.h>
18#include <asm/sn/sn_sal.h>
19#include "xp.h"
20
21/*
22 * The export of xp_nofault_PIOR needs to happen here since it is defined
23 * in drivers/misc/sgi-xp/xp_nofault.S. The target of the nofault read is
24 * defined here.
25 */
26EXPORT_SYMBOL_GPL(xp_nofault_PIOR);
27
28u64 xp_nofault_PIOR_target;
29EXPORT_SYMBOL_GPL(xp_nofault_PIOR_target);
30
31/*
32 * Register a nofault code region which performs a cross-partition PIO read.
33 * If the PIO read times out, the MCA handler will consume the error and
34 * return to a kernel-provided instruction to indicate an error. This PIO read
35 * exists because it is guaranteed to timeout if the destination is down
36 * (amo operations do not timeout on at least some CPUs on Shubs <= v1.2,
37 * which unfortunately we have to work around).
38 */
39static enum xp_retval
40xp_register_nofault_code_sn2(void)
41{
42 int ret;
43 u64 func_addr;
44 u64 err_func_addr;
45
46 func_addr = *(u64 *)xp_nofault_PIOR;
47 err_func_addr = *(u64 *)xp_error_PIOR;
48 ret = sn_register_nofault_code(func_addr, err_func_addr, err_func_addr,
49 1, 1);
50 if (ret != 0) {
51 dev_err(xp, "can't register nofault code, error=%d\n", ret);
52 return xpSalError;
53 }
54 /*
55 * Setup the nofault PIO read target. (There is no special reason why
56 * SH_IPI_ACCESS was selected.)
57 */
58 if (is_shub1())
59 xp_nofault_PIOR_target = SH1_IPI_ACCESS;
60 else if (is_shub2())
61 xp_nofault_PIOR_target = SH2_IPI_ACCESS0;
62
63 return xpSuccess;
64}
65
66static void
67xp_unregister_nofault_code_sn2(void)
68{
69 u64 func_addr = *(u64 *)xp_nofault_PIOR;
70 u64 err_func_addr = *(u64 *)xp_error_PIOR;
71
72 /* unregister the PIO read nofault code region */
73 (void)sn_register_nofault_code(func_addr, err_func_addr,
74 err_func_addr, 1, 0);
75}
76
77/*
78 * Convert a virtual memory address to a physical memory address.
79 */
80static unsigned long
81xp_pa_sn2(void *addr)
82{
83 return __pa(addr);
84}
85
86/*
87 * Wrapper for bte_copy().
88 *
89 * dst_pa - physical address of the destination of the transfer.
90 * src_pa - physical address of the source of the transfer.
91 * len - number of bytes to transfer from source to destination.
92 *
93 * Note: xp_remote_memcpy_sn2() should never be called while holding a spinlock.
94 */
95static enum xp_retval
96xp_remote_memcpy_sn2(unsigned long dst_pa, const unsigned long src_pa,
97 size_t len)
98{
99 bte_result_t ret;
100
101 ret = bte_copy(src_pa, dst_pa, len, (BTE_NOTIFY | BTE_WACQUIRE), NULL);
102 if (ret == BTE_SUCCESS)
103 return xpSuccess;
104
105 if (is_shub2()) {
106 dev_err(xp, "bte_copy() on shub2 failed, error=0x%x dst_pa="
107 "0x%016lx src_pa=0x%016lx len=%ld\\n", ret, dst_pa,
108 src_pa, len);
109 } else {
110 dev_err(xp, "bte_copy() failed, error=%d dst_pa=0x%016lx "
111 "src_pa=0x%016lx len=%ld\\n", ret, dst_pa, src_pa, len);
112 }
113
114 return xpBteCopyError;
115}
116
117static int
118xp_cpu_to_nasid_sn2(int cpuid)
119{
120 return cpuid_to_nasid(cpuid);
121}
122
123enum xp_retval
124xp_init_sn2(void)
125{
126 BUG_ON(!is_shub());
127
128 xp_max_npartitions = XP_MAX_NPARTITIONS_SN2;
129 xp_partition_id = sn_partition_id;
130 xp_region_size = sn_region_size;
131
132 xp_pa = xp_pa_sn2;
133 xp_remote_memcpy = xp_remote_memcpy_sn2;
134 xp_cpu_to_nasid = xp_cpu_to_nasid_sn2;
135
136 return xp_register_nofault_code_sn2();
137}
138
139void
140xp_exit_sn2(void)
141{
142 BUG_ON(!is_shub());
143
144 xp_unregister_nofault_code_sn2();
145}
146
diff --git a/drivers/misc/sgi-xp/xp_uv.c b/drivers/misc/sgi-xp/xp_uv.c
new file mode 100644
index 000000000000..d9f7ce2510bc
--- /dev/null
+++ b/drivers/misc/sgi-xp/xp_uv.c
@@ -0,0 +1,72 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
7 */
8
9/*
10 * Cross Partition (XP) uv-based functions.
11 *
12 * Architecture specific implementation of common functions.
13 *
14 */
15
16#include <linux/device.h>
17#include <asm/uv/uv_hub.h>
18#include "../sgi-gru/grukservices.h"
19#include "xp.h"
20
21/*
22 * Convert a virtual memory address to a physical memory address.
23 */
24static unsigned long
25xp_pa_uv(void *addr)
26{
27 return uv_gpa(addr);
28}
29
30static enum xp_retval
31xp_remote_memcpy_uv(unsigned long dst_gpa, const unsigned long src_gpa,
32 size_t len)
33{
34 int ret;
35
36 ret = gru_copy_gpa(dst_gpa, src_gpa, len);
37 if (ret == 0)
38 return xpSuccess;
39
40 dev_err(xp, "gru_copy_gpa() failed, dst_gpa=0x%016lx src_gpa=0x%016lx "
41 "len=%ld\n", dst_gpa, src_gpa, len);
42 return xpGruCopyError;
43}
44
45static int
46xp_cpu_to_nasid_uv(int cpuid)
47{
48 /* ??? Is this same as sn2 nasid in mach/part bitmaps set up by SAL? */
49 return UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpuid));
50}
51
52enum xp_retval
53xp_init_uv(void)
54{
55 BUG_ON(!is_uv());
56
57 xp_max_npartitions = XP_MAX_NPARTITIONS_UV;
58 xp_partition_id = 0; /* !!! not correct value */
59 xp_region_size = 0; /* !!! not correct value */
60
61 xp_pa = xp_pa_uv;
62 xp_remote_memcpy = xp_remote_memcpy_uv;
63 xp_cpu_to_nasid = xp_cpu_to_nasid_uv;
64
65 return xpSuccess;
66}
67
68void
69xp_exit_uv(void)
70{
71 BUG_ON(!is_uv());
72}
diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
index 11ac267ed68f..619208d61862 100644
--- a/drivers/misc/sgi-xp/xpc.h
+++ b/drivers/misc/sgi-xp/xpc.h
@@ -13,18 +13,10 @@
13#ifndef _DRIVERS_MISC_SGIXP_XPC_H 13#ifndef _DRIVERS_MISC_SGIXP_XPC_H
14#define _DRIVERS_MISC_SGIXP_XPC_H 14#define _DRIVERS_MISC_SGIXP_XPC_H
15 15
16#include <linux/interrupt.h> 16#include <linux/wait.h>
17#include <linux/sysctl.h>
18#include <linux/device.h>
19#include <linux/mutex.h>
20#include <linux/completion.h> 17#include <linux/completion.h>
21#include <asm/pgtable.h> 18#include <linux/timer.h>
22#include <asm/processor.h> 19#include <linux/sched.h>
23#include <asm/sn/bte.h>
24#include <asm/sn/clksupport.h>
25#include <asm/sn/addrs.h>
26#include <asm/sn/mspec.h>
27#include <asm/sn/shub_mmr.h>
28#include "xp.h" 20#include "xp.h"
29 21
30/* 22/*
@@ -36,23 +28,7 @@
36#define XPC_VERSION_MAJOR(_v) ((_v) >> 4) 28#define XPC_VERSION_MAJOR(_v) ((_v) >> 4)
37#define XPC_VERSION_MINOR(_v) ((_v) & 0xf) 29#define XPC_VERSION_MINOR(_v) ((_v) & 0xf)
38 30
39/* 31/* define frequency of the heartbeat and frequency how often it's checked */
40 * The next macros define word or bit representations for given
41 * C-brick nasid in either the SAL provided bit array representing
42 * nasids in the partition/machine or the AMO_t array used for
43 * inter-partition initiation communications.
44 *
45 * For SN2 machines, C-Bricks are alway even numbered NASIDs. As
46 * such, some space will be saved by insisting that nasid information
47 * passed from SAL always be packed for C-Bricks and the
48 * cross-partition interrupts use the same packing scheme.
49 */
50#define XPC_NASID_W_INDEX(_n) (((_n) / 64) / 2)
51#define XPC_NASID_B_INDEX(_n) (((_n) / 2) & (64 - 1))
52#define XPC_NASID_IN_ARRAY(_n, _p) ((_p)[XPC_NASID_W_INDEX(_n)] & \
53 (1UL << XPC_NASID_B_INDEX(_n)))
54#define XPC_NASID_FROM_W_B(_w, _b) (((_w) * 64 + (_b)) * 2)
55
56#define XPC_HB_DEFAULT_INTERVAL 5 /* incr HB every x secs */ 32#define XPC_HB_DEFAULT_INTERVAL 5 /* incr HB every x secs */
57#define XPC_HB_CHECK_DEFAULT_INTERVAL 20 /* check HB every x secs */ 33#define XPC_HB_CHECK_DEFAULT_INTERVAL 20 /* check HB every x secs */
58 34
@@ -72,11 +48,11 @@
72 * 48 *
73 * reserved page header 49 * reserved page header
74 * 50 *
75 * The first cacheline of the reserved page contains the header 51 * The first two 64-byte cachelines of the reserved page contain the
76 * (struct xpc_rsvd_page). Before SAL initialization has completed, 52 * header (struct xpc_rsvd_page). Before SAL initialization has completed,
77 * SAL has set up the following fields of the reserved page header: 53 * SAL has set up the following fields of the reserved page header:
78 * SAL_signature, SAL_version, partid, and nasids_size. The other 54 * SAL_signature, SAL_version, SAL_partid, and SAL_nasids_size. The
79 * fields are set up by XPC. (xpc_rsvd_page points to the local 55 * other fields are set up by XPC. (xpc_rsvd_page points to the local
80 * partition's reserved page.) 56 * partition's reserved page.)
81 * 57 *
82 * part_nasids mask 58 * part_nasids mask
@@ -87,14 +63,16 @@
87 * the actual nasids in the entire machine (mach_nasids). We're only 63 * the actual nasids in the entire machine (mach_nasids). We're only
88 * interested in the even numbered nasids (which contain the processors 64 * interested in the even numbered nasids (which contain the processors
89 * and/or memory), so we only need half as many bits to represent the 65 * and/or memory), so we only need half as many bits to represent the
90 * nasids. The part_nasids mask is located starting at the first cacheline 66 * nasids. When mapping nasid to bit in a mask (or bit to nasid) be sure
91 * following the reserved page header. The mach_nasids mask follows right 67 * to either divide or multiply by 2. The part_nasids mask is located
92 * after the part_nasids mask. The size in bytes of each mask is reflected 68 * starting at the first cacheline following the reserved page header. The
93 * by the reserved page header field 'nasids_size'. (Local partition's 69 * mach_nasids mask follows right after the part_nasids mask. The size in
94 * mask pointers are xpc_part_nasids and xpc_mach_nasids.) 70 * bytes of each mask is reflected by the reserved page header field
71 * 'SAL_nasids_size'. (Local partition's mask pointers are xpc_part_nasids
72 * and xpc_mach_nasids.)
95 * 73 *
96 * vars 74 * vars (ia64-sn2 only)
97 * vars part 75 * vars part (ia64-sn2 only)
98 * 76 *
99 * Immediately following the mach_nasids mask are the XPC variables 77 * Immediately following the mach_nasids mask are the XPC variables
100 * required by other partitions. First are those that are generic to all 78 * required by other partitions. First are those that are generic to all
@@ -102,43 +80,26 @@
102 * which are partition specific (vars part). These are setup by XPC. 80 * which are partition specific (vars part). These are setup by XPC.
103 * (Local partition's vars pointers are xpc_vars and xpc_vars_part.) 81 * (Local partition's vars pointers are xpc_vars and xpc_vars_part.)
104 * 82 *
105 * Note: Until vars_pa is set, the partition XPC code has not been initialized. 83 * Note: Until 'ts_jiffies' is set non-zero, the partition XPC code has not been
84 * initialized.
106 */ 85 */
107struct xpc_rsvd_page { 86struct xpc_rsvd_page {
108 u64 SAL_signature; /* SAL: unique signature */ 87 u64 SAL_signature; /* SAL: unique signature */
109 u64 SAL_version; /* SAL: version */ 88 u64 SAL_version; /* SAL: version */
110 u8 partid; /* SAL: partition ID */ 89 short SAL_partid; /* SAL: partition ID */
90 short max_npartitions; /* value of XPC_MAX_PARTITIONS */
111 u8 version; 91 u8 version;
112 u8 pad1[6]; /* align to next u64 in cacheline */ 92 u8 pad1[3]; /* align to next u64 in 1st 64-byte cacheline */
113 u64 vars_pa; /* physical address of struct xpc_vars */ 93 union {
114 struct timespec stamp; /* time when reserved page was setup by XPC */ 94 unsigned long vars_pa; /* phys address of struct xpc_vars */
115 u64 pad2[9]; /* align to last u64 in cacheline */ 95 unsigned long activate_mq_gpa; /* gru phy addr of activate_mq */
116 u64 nasids_size; /* SAL: size of each nasid mask in bytes */ 96 } sn;
97 unsigned long ts_jiffies; /* timestamp when rsvd pg was setup by XPC */
98 u64 pad2[10]; /* align to last u64 in 2nd 64-byte cacheline */
99 u64 SAL_nasids_size; /* SAL: size of each nasid mask in bytes */
117}; 100};
118 101
119#define XPC_RP_VERSION _XPC_VERSION(1, 1) /* version 1.1 of the reserved page */ 102#define XPC_RP_VERSION _XPC_VERSION(2, 0) /* version 2.0 of the reserved page */
120
121#define XPC_SUPPORTS_RP_STAMP(_version) \
122 (_version >= _XPC_VERSION(1, 1))
123
124/*
125 * compare stamps - the return value is:
126 *
127 * < 0, if stamp1 < stamp2
128 * = 0, if stamp1 == stamp2
129 * > 0, if stamp1 > stamp2
130 */
131static inline int
132xpc_compare_stamps(struct timespec *stamp1, struct timespec *stamp2)
133{
134 int ret;
135
136 ret = stamp1->tv_sec - stamp2->tv_sec;
137 if (ret == 0)
138 ret = stamp1->tv_nsec - stamp2->tv_nsec;
139
140 return ret;
141}
142 103
143/* 104/*
144 * Define the structures by which XPC variables can be exported to other 105 * Define the structures by which XPC variables can be exported to other
@@ -154,85 +115,40 @@ xpc_compare_stamps(struct timespec *stamp1, struct timespec *stamp2)
154 * reflected by incrementing either the major or minor version numbers 115 * reflected by incrementing either the major or minor version numbers
155 * of struct xpc_vars. 116 * of struct xpc_vars.
156 */ 117 */
157struct xpc_vars { 118struct xpc_vars_sn2 {
158 u8 version; 119 u8 version;
159 u64 heartbeat; 120 u64 heartbeat;
160 u64 heartbeating_to_mask; 121 DECLARE_BITMAP(heartbeating_to_mask, XP_MAX_NPARTITIONS_SN2);
161 u64 heartbeat_offline; /* if 0, heartbeat should be changing */ 122 u64 heartbeat_offline; /* if 0, heartbeat should be changing */
162 int act_nasid; 123 int activate_IRQ_nasid;
163 int act_phys_cpuid; 124 int activate_IRQ_phys_cpuid;
164 u64 vars_part_pa; 125 unsigned long vars_part_pa;
165 u64 amos_page_pa; /* paddr of page of AMOs from MSPEC driver */ 126 unsigned long amos_page_pa;/* paddr of page of amos from MSPEC driver */
166 AMO_t *amos_page; /* vaddr of page of AMOs from MSPEC driver */ 127 struct amo *amos_page; /* vaddr of page of amos from MSPEC driver */
167}; 128};
168 129
169#define XPC_V_VERSION _XPC_VERSION(3, 1) /* version 3.1 of the cross vars */ 130#define XPC_V_VERSION _XPC_VERSION(3, 1) /* version 3.1 of the cross vars */
170 131
171#define XPC_SUPPORTS_DISENGAGE_REQUEST(_version) \
172 (_version >= _XPC_VERSION(3, 1))
173
174static inline int
175xpc_hb_allowed(short partid, struct xpc_vars *vars)
176{
177 return ((vars->heartbeating_to_mask & (1UL << partid)) != 0);
178}
179
180static inline void
181xpc_allow_hb(short partid, struct xpc_vars *vars)
182{
183 u64 old_mask, new_mask;
184
185 do {
186 old_mask = vars->heartbeating_to_mask;
187 new_mask = (old_mask | (1UL << partid));
188 } while (cmpxchg(&vars->heartbeating_to_mask, old_mask, new_mask) !=
189 old_mask);
190}
191
192static inline void
193xpc_disallow_hb(short partid, struct xpc_vars *vars)
194{
195 u64 old_mask, new_mask;
196
197 do {
198 old_mask = vars->heartbeating_to_mask;
199 new_mask = (old_mask & ~(1UL << partid));
200 } while (cmpxchg(&vars->heartbeating_to_mask, old_mask, new_mask) !=
201 old_mask);
202}
203
204/*
205 * The AMOs page consists of a number of AMO variables which are divided into
206 * four groups, The first two groups are used to identify an IRQ's sender.
207 * These two groups consist of 64 and 128 AMO variables respectively. The last
208 * two groups, consisting of just one AMO variable each, are used to identify
209 * the remote partitions that are currently engaged (from the viewpoint of
210 * the XPC running on the remote partition).
211 */
212#define XPC_NOTIFY_IRQ_AMOS 0
213#define XPC_ACTIVATE_IRQ_AMOS (XPC_NOTIFY_IRQ_AMOS + XP_MAX_PARTITIONS)
214#define XPC_ENGAGED_PARTITIONS_AMO (XPC_ACTIVATE_IRQ_AMOS + XP_NASID_MASK_WORDS)
215#define XPC_DISENGAGE_REQUEST_AMO (XPC_ENGAGED_PARTITIONS_AMO + 1)
216
217/* 132/*
218 * The following structure describes the per partition specific variables. 133 * The following structure describes the per partition specific variables.
219 * 134 *
220 * An array of these structures, one per partition, will be defined. As a 135 * An array of these structures, one per partition, will be defined. As a
221 * partition becomes active XPC will copy the array entry corresponding to 136 * partition becomes active XPC will copy the array entry corresponding to
222 * itself from that partition. It is desirable that the size of this 137 * itself from that partition. It is desirable that the size of this structure
223 * structure evenly divide into a cacheline, such that none of the entries 138 * evenly divides into a 128-byte cacheline, such that none of the entries in
224 * in this array crosses a cacheline boundary. As it is now, each entry 139 * this array crosses a 128-byte cacheline boundary. As it is now, each entry
225 * occupies half a cacheline. 140 * occupies 64-bytes.
226 */ 141 */
227struct xpc_vars_part { 142struct xpc_vars_part_sn2 {
228 u64 magic; 143 u64 magic;
229 144
230 u64 openclose_args_pa; /* physical address of open and close args */ 145 unsigned long openclose_args_pa; /* phys addr of open and close args */
231 u64 GPs_pa; /* physical address of Get/Put values */ 146 unsigned long GPs_pa; /* physical address of Get/Put values */
147
148 unsigned long chctl_amo_pa; /* physical address of chctl flags' amo */
232 149
233 u64 IPI_amo_pa; /* physical address of IPI AMO_t structure */ 150 int notify_IRQ_nasid; /* nasid of where to send notify IRQs */
234 int IPI_nasid; /* nasid of where to send IPIs */ 151 int notify_IRQ_phys_cpuid; /* CPUID of where to send notify IRQs */
235 int IPI_phys_cpuid; /* physical CPU ID of where to send IPIs */
236 152
237 u8 nchannels; /* #of defined channels supported */ 153 u8 nchannels; /* #of defined channels supported */
238 154
@@ -248,20 +164,95 @@ struct xpc_vars_part {
248 * MAGIC2 indicates that this partition has pulled the remote partititions 164 * MAGIC2 indicates that this partition has pulled the remote partititions
249 * per partition variables that pertain to this partition. 165 * per partition variables that pertain to this partition.
250 */ 166 */
251#define XPC_VP_MAGIC1 0x0053524156435058L /* 'XPCVARS\0'L (little endian) */ 167#define XPC_VP_MAGIC1_SN2 0x0053524156435058L /* 'XPCVARS\0'L (little endian) */
252#define XPC_VP_MAGIC2 0x0073726176435058L /* 'XPCvars\0'L (little endian) */ 168#define XPC_VP_MAGIC2_SN2 0x0073726176435058L /* 'XPCvars\0'L (little endian) */
253 169
254/* the reserved page sizes and offsets */ 170/* the reserved page sizes and offsets */
255 171
256#define XPC_RP_HEADER_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_rsvd_page)) 172#define XPC_RP_HEADER_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_rsvd_page))
257#define XPC_RP_VARS_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_vars)) 173#define XPC_RP_VARS_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_vars_sn2))
258 174
259#define XPC_RP_PART_NASIDS(_rp) ((u64 *)((u8 *)(_rp) + XPC_RP_HEADER_SIZE)) 175#define XPC_RP_PART_NASIDS(_rp) ((unsigned long *)((u8 *)(_rp) + \
260#define XPC_RP_MACH_NASIDS(_rp) (XPC_RP_PART_NASIDS(_rp) + xp_nasid_mask_words) 176 XPC_RP_HEADER_SIZE))
261#define XPC_RP_VARS(_rp) ((struct xpc_vars *)(XPC_RP_MACH_NASIDS(_rp) + \ 177#define XPC_RP_MACH_NASIDS(_rp) (XPC_RP_PART_NASIDS(_rp) + \
262 xp_nasid_mask_words)) 178 xpc_nasid_mask_nlongs)
263#define XPC_RP_VARS_PART(_rp) ((struct xpc_vars_part *) \ 179#define XPC_RP_VARS(_rp) ((struct xpc_vars_sn2 *) \
264 ((u8 *)XPC_RP_VARS(_rp) + XPC_RP_VARS_SIZE)) 180 (XPC_RP_MACH_NASIDS(_rp) + \
181 xpc_nasid_mask_nlongs))
182
183/*
184 * The activate_mq is used to send/receive GRU messages that affect XPC's
185 * heartbeat, partition active state, and channel state. This is UV only.
186 */
187struct xpc_activate_mq_msghdr_uv {
188 short partid; /* sender's partid */
189 u8 act_state; /* sender's act_state at time msg sent */
190 u8 type; /* message's type */
191 unsigned long rp_ts_jiffies; /* timestamp of sender's rp setup by XPC */
192};
193
194/* activate_mq defined message types */
195#define XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV 0
196#define XPC_ACTIVATE_MQ_MSG_INC_HEARTBEAT_UV 1
197#define XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV 2
198#define XPC_ACTIVATE_MQ_MSG_ONLINE_HEARTBEAT_UV 3
199
200#define XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV 4
201#define XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV 5
202
203#define XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV 6
204#define XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV 7
205#define XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV 8
206#define XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV 9
207
208#define XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV 10
209#define XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV 11
210
211struct xpc_activate_mq_msg_uv {
212 struct xpc_activate_mq_msghdr_uv hdr;
213};
214
215struct xpc_activate_mq_msg_heartbeat_req_uv {
216 struct xpc_activate_mq_msghdr_uv hdr;
217 u64 heartbeat;
218};
219
220struct xpc_activate_mq_msg_activate_req_uv {
221 struct xpc_activate_mq_msghdr_uv hdr;
222 unsigned long rp_gpa;
223 unsigned long activate_mq_gpa;
224};
225
226struct xpc_activate_mq_msg_deactivate_req_uv {
227 struct xpc_activate_mq_msghdr_uv hdr;
228 enum xp_retval reason;
229};
230
231struct xpc_activate_mq_msg_chctl_closerequest_uv {
232 struct xpc_activate_mq_msghdr_uv hdr;
233 short ch_number;
234 enum xp_retval reason;
235};
236
237struct xpc_activate_mq_msg_chctl_closereply_uv {
238 struct xpc_activate_mq_msghdr_uv hdr;
239 short ch_number;
240};
241
242struct xpc_activate_mq_msg_chctl_openrequest_uv {
243 struct xpc_activate_mq_msghdr_uv hdr;
244 short ch_number;
245 short entry_size; /* size of notify_mq's GRU messages */
246 short local_nentries; /* ??? Is this needed? What is? */
247};
248
249struct xpc_activate_mq_msg_chctl_openreply_uv {
250 struct xpc_activate_mq_msghdr_uv hdr;
251 short ch_number;
252 short remote_nentries; /* ??? Is this needed? What is? */
253 short local_nentries; /* ??? Is this needed? What is? */
254 unsigned long local_notify_mq_gpa;
255};
265 256
266/* 257/*
267 * Functions registered by add_timer() or called by kernel_thread() only 258 * Functions registered by add_timer() or called by kernel_thread() only
@@ -270,22 +261,22 @@ struct xpc_vars_part {
270 * the passed argument. 261 * the passed argument.
271 */ 262 */
272#define XPC_PACK_ARGS(_arg1, _arg2) \ 263#define XPC_PACK_ARGS(_arg1, _arg2) \
273 ((((u64) _arg1) & 0xffffffff) | \ 264 ((((u64)_arg1) & 0xffffffff) | \
274 ((((u64) _arg2) & 0xffffffff) << 32)) 265 ((((u64)_arg2) & 0xffffffff) << 32))
275 266
276#define XPC_UNPACK_ARG1(_args) (((u64) _args) & 0xffffffff) 267#define XPC_UNPACK_ARG1(_args) (((u64)_args) & 0xffffffff)
277#define XPC_UNPACK_ARG2(_args) ((((u64) _args) >> 32) & 0xffffffff) 268#define XPC_UNPACK_ARG2(_args) ((((u64)_args) >> 32) & 0xffffffff)
278 269
279/* 270/*
280 * Define a Get/Put value pair (pointers) used with a message queue. 271 * Define a Get/Put value pair (pointers) used with a message queue.
281 */ 272 */
282struct xpc_gp { 273struct xpc_gp_sn2 {
283 s64 get; /* Get value */ 274 s64 get; /* Get value */
284 s64 put; /* Put value */ 275 s64 put; /* Put value */
285}; 276};
286 277
287#define XPC_GP_SIZE \ 278#define XPC_GP_SIZE \
288 L1_CACHE_ALIGN(sizeof(struct xpc_gp) * XPC_NCHANNELS) 279 L1_CACHE_ALIGN(sizeof(struct xpc_gp_sn2) * XPC_MAX_NCHANNELS)
289 280
290/* 281/*
291 * Define a structure that contains arguments associated with opening and 282 * Define a structure that contains arguments associated with opening and
@@ -293,31 +284,89 @@ struct xpc_gp {
293 */ 284 */
294struct xpc_openclose_args { 285struct xpc_openclose_args {
295 u16 reason; /* reason why channel is closing */ 286 u16 reason; /* reason why channel is closing */
296 u16 msg_size; /* sizeof each message entry */ 287 u16 entry_size; /* sizeof each message entry */
297 u16 remote_nentries; /* #of message entries in remote msg queue */ 288 u16 remote_nentries; /* #of message entries in remote msg queue */
298 u16 local_nentries; /* #of message entries in local msg queue */ 289 u16 local_nentries; /* #of message entries in local msg queue */
299 u64 local_msgqueue_pa; /* physical address of local message queue */ 290 unsigned long local_msgqueue_pa; /* phys addr of local message queue */
300}; 291};
301 292
302#define XPC_OPENCLOSE_ARGS_SIZE \ 293#define XPC_OPENCLOSE_ARGS_SIZE \
303 L1_CACHE_ALIGN(sizeof(struct xpc_openclose_args) * XPC_NCHANNELS) 294 L1_CACHE_ALIGN(sizeof(struct xpc_openclose_args) * \
295 XPC_MAX_NCHANNELS)
304 296
305/* struct xpc_msg flags */
306 297
307#define XPC_M_DONE 0x01 /* msg has been received/consumed */ 298/*
308#define XPC_M_READY 0x02 /* msg is ready to be sent */ 299 * Structures to define a fifo singly-linked list.
309#define XPC_M_INTERRUPT 0x04 /* send interrupt when msg consumed */ 300 */
310 301
311#define XPC_MSG_ADDRESS(_payload) \ 302struct xpc_fifo_entry_uv {
312 ((struct xpc_msg *)((u8 *)(_payload) - XPC_MSG_PAYLOAD_OFFSET)) 303 struct xpc_fifo_entry_uv *next;
304};
305
306struct xpc_fifo_head_uv {
307 struct xpc_fifo_entry_uv *first;
308 struct xpc_fifo_entry_uv *last;
309 spinlock_t lock;
310 int n_entries;
311};
313 312
314/* 313/*
315 * Defines notify entry. 314 * Define a sn2 styled message.
315 *
316 * A user-defined message resides in the payload area. The max size of the
317 * payload is defined by the user via xpc_connect().
318 *
319 * The size of a message entry (within a message queue) must be a 128-byte
320 * cacheline sized multiple in order to facilitate the BTE transfer of messages
321 * from one message queue to another.
322 */
323struct xpc_msg_sn2 {
324 u8 flags; /* FOR XPC INTERNAL USE ONLY */
325 u8 reserved[7]; /* FOR XPC INTERNAL USE ONLY */
326 s64 number; /* FOR XPC INTERNAL USE ONLY */
327
328 u64 payload; /* user defined portion of message */
329};
330
331/* struct xpc_msg_sn2 flags */
332
333#define XPC_M_SN2_DONE 0x01 /* msg has been received/consumed */
334#define XPC_M_SN2_READY 0x02 /* msg is ready to be sent */
335#define XPC_M_SN2_INTERRUPT 0x04 /* send interrupt when msg consumed */
336
337/*
338 * The format of a uv XPC notify_mq GRU message is as follows:
339 *
340 * A user-defined message resides in the payload area. The max size of the
341 * payload is defined by the user via xpc_connect().
342 *
343 * The size of a message (payload and header) sent via the GRU must be either 1
344 * or 2 GRU_CACHE_LINE_BYTES in length.
345 */
346
347struct xpc_notify_mq_msghdr_uv {
348 union {
349 unsigned int gru_msg_hdr; /* FOR GRU INTERNAL USE ONLY */
350 struct xpc_fifo_entry_uv next; /* FOR XPC INTERNAL USE ONLY */
351 } u;
352 short partid; /* FOR XPC INTERNAL USE ONLY */
353 u8 ch_number; /* FOR XPC INTERNAL USE ONLY */
354 u8 size; /* FOR XPC INTERNAL USE ONLY */
355 unsigned int msg_slot_number; /* FOR XPC INTERNAL USE ONLY */
356};
357
358struct xpc_notify_mq_msg_uv {
359 struct xpc_notify_mq_msghdr_uv hdr;
360 unsigned long payload;
361};
362
363/*
364 * Define sn2's notify entry.
316 * 365 *
317 * This is used to notify a message's sender that their message was received 366 * This is used to notify a message's sender that their message was received
318 * and consumed by the intended recipient. 367 * and consumed by the intended recipient.
319 */ 368 */
320struct xpc_notify { 369struct xpc_notify_sn2 {
321 u8 type; /* type of notification */ 370 u8 type; /* type of notification */
322 371
323 /* the following two fields are only used if type == XPC_N_CALL */ 372 /* the following two fields are only used if type == XPC_N_CALL */
@@ -325,9 +374,20 @@ struct xpc_notify {
325 void *key; /* pointer to user's key */ 374 void *key; /* pointer to user's key */
326}; 375};
327 376
328/* struct xpc_notify type of notification */ 377/* struct xpc_notify_sn2 type of notification */
378
379#define XPC_N_CALL 0x01 /* notify function provided by user */
329 380
330#define XPC_N_CALL 0x01 /* notify function provided by user */ 381/*
382 * Define uv's version of the notify entry. It additionally is used to allocate
383 * a msg slot on the remote partition into which is copied a sent message.
384 */
385struct xpc_send_msg_slot_uv {
386 struct xpc_fifo_entry_uv next;
387 unsigned int msg_slot_number;
388 xpc_notify_func func; /* user's notify function */
389 void *key; /* pointer to user's key */
390};
331 391
332/* 392/*
333 * Define the structure that manages all the stuff required by a channel. In 393 * Define the structure that manages all the stuff required by a channel. In
@@ -339,8 +399,12 @@ struct xpc_notify {
339 * There is an array of these structures for each remote partition. It is 399 * There is an array of these structures for each remote partition. It is
340 * allocated at the time a partition becomes active. The array contains one 400 * allocated at the time a partition becomes active. The array contains one
341 * of these structures for each potential channel connection to that partition. 401 * of these structures for each potential channel connection to that partition.
402 */
403
404/*
405 * The following is sn2 only.
342 * 406 *
343 * Each of these structures manages two message queues (circular buffers). 407 * Each channel structure manages two message queues (circular buffers).
344 * They are allocated at the time a channel connection is made. One of 408 * They are allocated at the time a channel connection is made. One of
345 * these message queues (local_msgqueue) holds the locally created messages 409 * these message queues (local_msgqueue) holds the locally created messages
346 * that are destined for the remote partition. The other of these message 410 * that are destined for the remote partition. The other of these message
@@ -407,58 +471,72 @@ struct xpc_notify {
407 * new messages, by the clearing of the message flags of the acknowledged 471 * new messages, by the clearing of the message flags of the acknowledged
408 * messages. 472 * messages.
409 */ 473 */
474
475struct xpc_channel_sn2 {
476 struct xpc_openclose_args *local_openclose_args; /* args passed on */
477 /* opening or closing of channel */
478
479 void *local_msgqueue_base; /* base address of kmalloc'd space */
480 struct xpc_msg_sn2 *local_msgqueue; /* local message queue */
481 void *remote_msgqueue_base; /* base address of kmalloc'd space */
482 struct xpc_msg_sn2 *remote_msgqueue; /* cached copy of remote */
483 /* partition's local message queue */
484 unsigned long remote_msgqueue_pa; /* phys addr of remote partition's */
485 /* local message queue */
486
487 struct xpc_notify_sn2 *notify_queue;/* notify queue for messages sent */
488
489 /* various flavors of local and remote Get/Put values */
490
491 struct xpc_gp_sn2 *local_GP; /* local Get/Put values */
492 struct xpc_gp_sn2 remote_GP; /* remote Get/Put values */
493 struct xpc_gp_sn2 w_local_GP; /* working local Get/Put values */
494 struct xpc_gp_sn2 w_remote_GP; /* working remote Get/Put values */
495 s64 next_msg_to_pull; /* Put value of next msg to pull */
496
497 struct mutex msg_to_pull_mutex; /* next msg to pull serialization */
498};
499
500struct xpc_channel_uv {
501 unsigned long remote_notify_mq_gpa; /* gru phys address of remote */
502 /* partition's notify mq */
503
504 struct xpc_send_msg_slot_uv *send_msg_slots;
505 struct xpc_notify_mq_msg_uv *recv_msg_slots;
506
507 struct xpc_fifo_head_uv msg_slot_free_list;
508 struct xpc_fifo_head_uv recv_msg_list; /* deliverable payloads */
509};
510
410struct xpc_channel { 511struct xpc_channel {
411 short partid; /* ID of remote partition connected */ 512 short partid; /* ID of remote partition connected */
412 spinlock_t lock; /* lock for updating this structure */ 513 spinlock_t lock; /* lock for updating this structure */
413 u32 flags; /* general flags */ 514 unsigned int flags; /* general flags */
414 515
415 enum xp_retval reason; /* reason why channel is disconnect'g */ 516 enum xp_retval reason; /* reason why channel is disconnect'g */
416 int reason_line; /* line# disconnect initiated from */ 517 int reason_line; /* line# disconnect initiated from */
417 518
418 u16 number; /* channel # */ 519 u16 number; /* channel # */
419 520
420 u16 msg_size; /* sizeof each msg entry */ 521 u16 entry_size; /* sizeof each msg entry */
421 u16 local_nentries; /* #of msg entries in local msg queue */ 522 u16 local_nentries; /* #of msg entries in local msg queue */
422 u16 remote_nentries; /* #of msg entries in remote msg queue */ 523 u16 remote_nentries; /* #of msg entries in remote msg queue */
423 524
424 void *local_msgqueue_base; /* base address of kmalloc'd space */
425 struct xpc_msg *local_msgqueue; /* local message queue */
426 void *remote_msgqueue_base; /* base address of kmalloc'd space */
427 struct xpc_msg *remote_msgqueue; /* cached copy of remote partition's */
428 /* local message queue */
429 u64 remote_msgqueue_pa; /* phys addr of remote partition's */
430 /* local message queue */
431
432 atomic_t references; /* #of external references to queues */ 525 atomic_t references; /* #of external references to queues */
433 526
434 atomic_t n_on_msg_allocate_wq; /* #on msg allocation wait queue */ 527 atomic_t n_on_msg_allocate_wq; /* #on msg allocation wait queue */
435 wait_queue_head_t msg_allocate_wq; /* msg allocation wait queue */ 528 wait_queue_head_t msg_allocate_wq; /* msg allocation wait queue */
436 529
437 u8 delayed_IPI_flags; /* IPI flags received, but delayed */ 530 u8 delayed_chctl_flags; /* chctl flags received, but delayed */
438 /* action until channel disconnected */ 531 /* action until channel disconnected */
439 532
440 /* queue of msg senders who want to be notified when msg received */
441
442 atomic_t n_to_notify; /* #of msg senders to notify */ 533 atomic_t n_to_notify; /* #of msg senders to notify */
443 struct xpc_notify *notify_queue; /* notify queue for messages sent */
444 534
445 xpc_channel_func func; /* user's channel function */ 535 xpc_channel_func func; /* user's channel function */
446 void *key; /* pointer to user's key */ 536 void *key; /* pointer to user's key */
447 537
448 struct mutex msg_to_pull_mutex; /* next msg to pull serialization */
449 struct completion wdisconnect_wait; /* wait for channel disconnect */ 538 struct completion wdisconnect_wait; /* wait for channel disconnect */
450 539
451 struct xpc_openclose_args *local_openclose_args; /* args passed on */
452 /* opening or closing of channel */
453
454 /* various flavors of local and remote Get/Put values */
455
456 struct xpc_gp *local_GP; /* local Get/Put values */
457 struct xpc_gp remote_GP; /* remote Get/Put values */
458 struct xpc_gp w_local_GP; /* working local Get/Put values */
459 struct xpc_gp w_remote_GP; /* working remote Get/Put values */
460 s64 next_msg_to_pull; /* Put value of next msg to pull */
461
462 /* kthread management related fields */ 540 /* kthread management related fields */
463 541
464 atomic_t kthreads_assigned; /* #of kthreads assigned to channel */ 542 atomic_t kthreads_assigned; /* #of kthreads assigned to channel */
@@ -469,6 +547,11 @@ struct xpc_channel {
469 547
470 wait_queue_head_t idle_wq; /* idle kthread wait queue */ 548 wait_queue_head_t idle_wq; /* idle kthread wait queue */
471 549
550 union {
551 struct xpc_channel_sn2 sn2;
552 struct xpc_channel_uv uv;
553 } sn;
554
472} ____cacheline_aligned; 555} ____cacheline_aligned;
473 556
474/* struct xpc_channel flags */ 557/* struct xpc_channel flags */
@@ -501,33 +584,128 @@ struct xpc_channel {
501#define XPC_C_WDISCONNECT 0x00040000 /* waiting for channel disconnect */ 584#define XPC_C_WDISCONNECT 0x00040000 /* waiting for channel disconnect */
502 585
503/* 586/*
504 * Manages channels on a partition basis. There is one of these structures 587 * The channel control flags (chctl) union consists of a 64-bit variable which
588 * is divided up into eight bytes, ordered from right to left. Byte zero
589 * pertains to channel 0, byte one to channel 1, and so on. Each channel's byte
590 * can have one or more of the chctl flags set in it.
591 */
592
593union xpc_channel_ctl_flags {
594 u64 all_flags;
595 u8 flags[XPC_MAX_NCHANNELS];
596};
597
598/* chctl flags */
599#define XPC_CHCTL_CLOSEREQUEST 0x01
600#define XPC_CHCTL_CLOSEREPLY 0x02
601#define XPC_CHCTL_OPENREQUEST 0x04
602#define XPC_CHCTL_OPENREPLY 0x08
603#define XPC_CHCTL_MSGREQUEST 0x10
604
605#define XPC_OPENCLOSE_CHCTL_FLAGS \
606 (XPC_CHCTL_CLOSEREQUEST | XPC_CHCTL_CLOSEREPLY | \
607 XPC_CHCTL_OPENREQUEST | XPC_CHCTL_OPENREPLY)
608#define XPC_MSG_CHCTL_FLAGS XPC_CHCTL_MSGREQUEST
609
610static inline int
611xpc_any_openclose_chctl_flags_set(union xpc_channel_ctl_flags *chctl)
612{
613 int ch_number;
614
615 for (ch_number = 0; ch_number < XPC_MAX_NCHANNELS; ch_number++) {
616 if (chctl->flags[ch_number] & XPC_OPENCLOSE_CHCTL_FLAGS)
617 return 1;
618 }
619 return 0;
620}
621
622static inline int
623xpc_any_msg_chctl_flags_set(union xpc_channel_ctl_flags *chctl)
624{
625 int ch_number;
626
627 for (ch_number = 0; ch_number < XPC_MAX_NCHANNELS; ch_number++) {
628 if (chctl->flags[ch_number] & XPC_MSG_CHCTL_FLAGS)
629 return 1;
630 }
631 return 0;
632}
633
634/*
635 * Manage channels on a partition basis. There is one of these structures
505 * for each partition (a partition will never utilize the structure that 636 * for each partition (a partition will never utilize the structure that
506 * represents itself). 637 * represents itself).
507 */ 638 */
639
640struct xpc_partition_sn2 {
641 unsigned long remote_amos_page_pa; /* paddr of partition's amos page */
642 int activate_IRQ_nasid; /* active partition's act/deact nasid */
643 int activate_IRQ_phys_cpuid; /* active part's act/deact phys cpuid */
644
645 unsigned long remote_vars_pa; /* phys addr of partition's vars */
646 unsigned long remote_vars_part_pa; /* paddr of partition's vars part */
647 u8 remote_vars_version; /* version# of partition's vars */
648
649 void *local_GPs_base; /* base address of kmalloc'd space */
650 struct xpc_gp_sn2 *local_GPs; /* local Get/Put values */
651 void *remote_GPs_base; /* base address of kmalloc'd space */
652 struct xpc_gp_sn2 *remote_GPs; /* copy of remote partition's local */
653 /* Get/Put values */
654 unsigned long remote_GPs_pa; /* phys addr of remote partition's local */
655 /* Get/Put values */
656
657 void *local_openclose_args_base; /* base address of kmalloc'd space */
658 struct xpc_openclose_args *local_openclose_args; /* local's args */
659 unsigned long remote_openclose_args_pa; /* phys addr of remote's args */
660
661 int notify_IRQ_nasid; /* nasid of where to send notify IRQs */
662 int notify_IRQ_phys_cpuid; /* CPUID of where to send notify IRQs */
663 char notify_IRQ_owner[8]; /* notify IRQ's owner's name */
664
665 struct amo *remote_chctl_amo_va; /* addr of remote chctl flags' amo */
666 struct amo *local_chctl_amo_va; /* address of chctl flags' amo */
667
668 struct timer_list dropped_notify_IRQ_timer; /* dropped IRQ timer */
669};
670
671struct xpc_partition_uv {
672 unsigned long remote_activate_mq_gpa; /* gru phys address of remote */
673 /* partition's activate mq */
674 spinlock_t flags_lock; /* protect updating of flags */
675 unsigned int flags; /* general flags */
676 u8 remote_act_state; /* remote partition's act_state */
677 u8 act_state_req; /* act_state request from remote partition */
678 enum xp_retval reason; /* reason for deactivate act_state request */
679 u64 heartbeat; /* incremented by remote partition */
680};
681
682/* struct xpc_partition_uv flags */
683
684#define XPC_P_HEARTBEAT_OFFLINE_UV 0x00000001
685#define XPC_P_ENGAGED_UV 0x00000002
686
687/* struct xpc_partition_uv act_state change requests */
688
689#define XPC_P_ASR_ACTIVATE_UV 0x01
690#define XPC_P_ASR_REACTIVATE_UV 0x02
691#define XPC_P_ASR_DEACTIVATE_UV 0x03
692
508struct xpc_partition { 693struct xpc_partition {
509 694
510 /* XPC HB infrastructure */ 695 /* XPC HB infrastructure */
511 696
512 u8 remote_rp_version; /* version# of partition's rsvd pg */ 697 u8 remote_rp_version; /* version# of partition's rsvd pg */
513 struct timespec remote_rp_stamp; /* time when rsvd pg was initialized */ 698 unsigned long remote_rp_ts_jiffies; /* timestamp when rsvd pg setup */
514 u64 remote_rp_pa; /* phys addr of partition's rsvd pg */ 699 unsigned long remote_rp_pa; /* phys addr of partition's rsvd pg */
515 u64 remote_vars_pa; /* phys addr of partition's vars */
516 u64 remote_vars_part_pa; /* phys addr of partition's vars part */
517 u64 last_heartbeat; /* HB at last read */ 700 u64 last_heartbeat; /* HB at last read */
518 u64 remote_amos_page_pa; /* phys addr of partition's amos page */ 701 u32 activate_IRQ_rcvd; /* IRQs since activation */
519 int remote_act_nasid; /* active part's act/deact nasid */
520 int remote_act_phys_cpuid; /* active part's act/deact phys cpuid */
521 u32 act_IRQ_rcvd; /* IRQs since activation */
522 spinlock_t act_lock; /* protect updating of act_state */ 702 spinlock_t act_lock; /* protect updating of act_state */
523 u8 act_state; /* from XPC HB viewpoint */ 703 u8 act_state; /* from XPC HB viewpoint */
524 u8 remote_vars_version; /* version# of partition's vars */
525 enum xp_retval reason; /* reason partition is deactivating */ 704 enum xp_retval reason; /* reason partition is deactivating */
526 int reason_line; /* line# deactivation initiated from */ 705 int reason_line; /* line# deactivation initiated from */
527 int reactivate_nasid; /* nasid in partition to reactivate */
528 706
529 unsigned long disengage_request_timeout; /* timeout in jiffies */ 707 unsigned long disengage_timeout; /* timeout in jiffies */
530 struct timer_list disengage_request_timer; 708 struct timer_list disengage_timer;
531 709
532 /* XPC infrastructure referencing and teardown control */ 710 /* XPC infrastructure referencing and teardown control */
533 711
@@ -535,85 +713,63 @@ struct xpc_partition {
535 wait_queue_head_t teardown_wq; /* kthread waiting to teardown infra */ 713 wait_queue_head_t teardown_wq; /* kthread waiting to teardown infra */
536 atomic_t references; /* #of references to infrastructure */ 714 atomic_t references; /* #of references to infrastructure */
537 715
538 /*
539 * NONE OF THE PRECEDING FIELDS OF THIS STRUCTURE WILL BE CLEARED WHEN
540 * XPC SETS UP THE NECESSARY INFRASTRUCTURE TO SUPPORT CROSS PARTITION
541 * COMMUNICATION. ALL OF THE FOLLOWING FIELDS WILL BE CLEARED. (THE
542 * 'nchannels' FIELD MUST BE THE FIRST OF THE FIELDS TO BE CLEARED.)
543 */
544
545 u8 nchannels; /* #of defined channels supported */ 716 u8 nchannels; /* #of defined channels supported */
546 atomic_t nchannels_active; /* #of channels that are not DISCONNECTED */ 717 atomic_t nchannels_active; /* #of channels that are not DISCONNECTED */
547 atomic_t nchannels_engaged; /* #of channels engaged with remote part */ 718 atomic_t nchannels_engaged; /* #of channels engaged with remote part */
548 struct xpc_channel *channels; /* array of channel structures */ 719 struct xpc_channel *channels; /* array of channel structures */
549 720
550 void *local_GPs_base; /* base address of kmalloc'd space */ 721 /* fields used for managing channel avialability and activity */
551 struct xpc_gp *local_GPs; /* local Get/Put values */
552 void *remote_GPs_base; /* base address of kmalloc'd space */
553 struct xpc_gp *remote_GPs; /* copy of remote partition's local */
554 /* Get/Put values */
555 u64 remote_GPs_pa; /* phys address of remote partition's local */
556 /* Get/Put values */
557 722
558 /* fields used to pass args when opening or closing a channel */ 723 union xpc_channel_ctl_flags chctl; /* chctl flags yet to be processed */
724 spinlock_t chctl_lock; /* chctl flags lock */
559 725
560 void *local_openclose_args_base; /* base address of kmalloc'd space */
561 struct xpc_openclose_args *local_openclose_args; /* local's args */
562 void *remote_openclose_args_base; /* base address of kmalloc'd space */ 726 void *remote_openclose_args_base; /* base address of kmalloc'd space */
563 struct xpc_openclose_args *remote_openclose_args; /* copy of remote's */ 727 struct xpc_openclose_args *remote_openclose_args; /* copy of remote's */
564 /* args */ 728 /* args */
565 u64 remote_openclose_args_pa; /* phys addr of remote's args */
566
567 /* IPI sending, receiving and handling related fields */
568
569 int remote_IPI_nasid; /* nasid of where to send IPIs */
570 int remote_IPI_phys_cpuid; /* phys CPU ID of where to send IPIs */
571 AMO_t *remote_IPI_amo_va; /* address of remote IPI AMO_t structure */
572
573 AMO_t *local_IPI_amo_va; /* address of IPI AMO_t structure */
574 u64 local_IPI_amo; /* IPI amo flags yet to be handled */
575 char IPI_owner[8]; /* IPI owner's name */
576 struct timer_list dropped_IPI_timer; /* dropped IPI timer */
577
578 spinlock_t IPI_lock; /* IPI handler lock */
579 729
580 /* channel manager related fields */ 730 /* channel manager related fields */
581 731
582 atomic_t channel_mgr_requests; /* #of requests to activate chan mgr */ 732 atomic_t channel_mgr_requests; /* #of requests to activate chan mgr */
583 wait_queue_head_t channel_mgr_wq; /* channel mgr's wait queue */ 733 wait_queue_head_t channel_mgr_wq; /* channel mgr's wait queue */
584 734
735 union {
736 struct xpc_partition_sn2 sn2;
737 struct xpc_partition_uv uv;
738 } sn;
739
585} ____cacheline_aligned; 740} ____cacheline_aligned;
586 741
587/* struct xpc_partition act_state values (for XPC HB) */ 742/* struct xpc_partition act_state values (for XPC HB) */
588 743
589#define XPC_P_INACTIVE 0x00 /* partition is not active */ 744#define XPC_P_AS_INACTIVE 0x00 /* partition is not active */
590#define XPC_P_ACTIVATION_REQ 0x01 /* created thread to activate */ 745#define XPC_P_AS_ACTIVATION_REQ 0x01 /* created thread to activate */
591#define XPC_P_ACTIVATING 0x02 /* activation thread started */ 746#define XPC_P_AS_ACTIVATING 0x02 /* activation thread started */
592#define XPC_P_ACTIVE 0x03 /* xpc_partition_up() was called */ 747#define XPC_P_AS_ACTIVE 0x03 /* xpc_partition_up() was called */
593#define XPC_P_DEACTIVATING 0x04 /* partition deactivation initiated */ 748#define XPC_P_AS_DEACTIVATING 0x04 /* partition deactivation initiated */
594 749
595#define XPC_DEACTIVATE_PARTITION(_p, _reason) \ 750#define XPC_DEACTIVATE_PARTITION(_p, _reason) \
596 xpc_deactivate_partition(__LINE__, (_p), (_reason)) 751 xpc_deactivate_partition(__LINE__, (_p), (_reason))
597 752
598/* struct xpc_partition setup_state values */ 753/* struct xpc_partition setup_state values */
599 754
600#define XPC_P_UNSET 0x00 /* infrastructure was never setup */ 755#define XPC_P_SS_UNSET 0x00 /* infrastructure was never setup */
601#define XPC_P_SETUP 0x01 /* infrastructure is setup */ 756#define XPC_P_SS_SETUP 0x01 /* infrastructure is setup */
602#define XPC_P_WTEARDOWN 0x02 /* waiting to teardown infrastructure */ 757#define XPC_P_SS_WTEARDOWN 0x02 /* waiting to teardown infrastructure */
603#define XPC_P_TORNDOWN 0x03 /* infrastructure is torndown */ 758#define XPC_P_SS_TORNDOWN 0x03 /* infrastructure is torndown */
604 759
605/* 760/*
606 * struct xpc_partition IPI_timer #of seconds to wait before checking for 761 * struct xpc_partition_sn2's dropped notify IRQ timer is set to wait the
607 * dropped IPIs. These occur whenever an IPI amo write doesn't complete until 762 * following interval #of seconds before checking for dropped notify IRQs.
608 * after the IPI was received. 763 * These can occur whenever an IRQ's associated amo write doesn't complete
764 * until after the IRQ was received.
609 */ 765 */
610#define XPC_P_DROPPED_IPI_WAIT (0.25 * HZ) 766#define XPC_DROPPED_NOTIFY_IRQ_WAIT_INTERVAL (0.25 * HZ)
611 767
612/* number of seconds to wait for other partitions to disengage */ 768/* number of seconds to wait for other partitions to disengage */
613#define XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT 90 769#define XPC_DISENGAGE_DEFAULT_TIMELIMIT 90
614 770
615/* interval in seconds to print 'waiting disengagement' messages */ 771/* interval in seconds to print 'waiting deactivation' messages */
616#define XPC_DISENGAGE_PRINTMSG_INTERVAL 10 772#define XPC_DEACTIVATE_PRINTMSG_INTERVAL 10
617 773
618#define XPC_PARTID(_p) ((short)((_p) - &xpc_partitions[0])) 774#define XPC_PARTID(_p) ((short)((_p) - &xpc_partitions[0]))
619 775
@@ -623,33 +779,92 @@ extern struct xpc_registration xpc_registrations[];
623/* found in xpc_main.c */ 779/* found in xpc_main.c */
624extern struct device *xpc_part; 780extern struct device *xpc_part;
625extern struct device *xpc_chan; 781extern struct device *xpc_chan;
626extern int xpc_disengage_request_timelimit; 782extern int xpc_disengage_timelimit;
627extern int xpc_disengage_request_timedout; 783extern int xpc_disengage_timedout;
628extern irqreturn_t xpc_notify_IRQ_handler(int, void *); 784extern int xpc_activate_IRQ_rcvd;
629extern void xpc_dropped_IPI_check(struct xpc_partition *); 785extern spinlock_t xpc_activate_IRQ_rcvd_lock;
786extern wait_queue_head_t xpc_activate_IRQ_wq;
787extern void *xpc_heartbeating_to_mask;
788extern void *xpc_kzalloc_cacheline_aligned(size_t, gfp_t, void **);
630extern void xpc_activate_partition(struct xpc_partition *); 789extern void xpc_activate_partition(struct xpc_partition *);
631extern void xpc_activate_kthreads(struct xpc_channel *, int); 790extern void xpc_activate_kthreads(struct xpc_channel *, int);
632extern void xpc_create_kthreads(struct xpc_channel *, int, int); 791extern void xpc_create_kthreads(struct xpc_channel *, int, int);
633extern void xpc_disconnect_wait(int); 792extern void xpc_disconnect_wait(int);
793extern int (*xpc_setup_partitions_sn) (void);
794extern enum xp_retval (*xpc_get_partition_rsvd_page_pa) (void *, u64 *,
795 unsigned long *,
796 size_t *);
797extern int (*xpc_setup_rsvd_page_sn) (struct xpc_rsvd_page *);
798extern void (*xpc_heartbeat_init) (void);
799extern void (*xpc_heartbeat_exit) (void);
800extern void (*xpc_increment_heartbeat) (void);
801extern void (*xpc_offline_heartbeat) (void);
802extern void (*xpc_online_heartbeat) (void);
803extern enum xp_retval (*xpc_get_remote_heartbeat) (struct xpc_partition *);
804extern enum xp_retval (*xpc_make_first_contact) (struct xpc_partition *);
805extern u64 (*xpc_get_chctl_all_flags) (struct xpc_partition *);
806extern enum xp_retval (*xpc_setup_msg_structures) (struct xpc_channel *);
807extern void (*xpc_teardown_msg_structures) (struct xpc_channel *);
808extern void (*xpc_notify_senders_of_disconnect) (struct xpc_channel *);
809extern void (*xpc_process_msg_chctl_flags) (struct xpc_partition *, int);
810extern int (*xpc_n_of_deliverable_payloads) (struct xpc_channel *);
811extern void *(*xpc_get_deliverable_payload) (struct xpc_channel *);
812extern void (*xpc_request_partition_activation) (struct xpc_rsvd_page *,
813 unsigned long, int);
814extern void (*xpc_request_partition_reactivation) (struct xpc_partition *);
815extern void (*xpc_request_partition_deactivation) (struct xpc_partition *);
816extern void (*xpc_cancel_partition_deactivation_request) (
817 struct xpc_partition *);
818extern void (*xpc_process_activate_IRQ_rcvd) (void);
819extern enum xp_retval (*xpc_setup_ch_structures_sn) (struct xpc_partition *);
820extern void (*xpc_teardown_ch_structures_sn) (struct xpc_partition *);
821
822extern void (*xpc_indicate_partition_engaged) (struct xpc_partition *);
823extern int (*xpc_partition_engaged) (short);
824extern int (*xpc_any_partition_engaged) (void);
825extern void (*xpc_indicate_partition_disengaged) (struct xpc_partition *);
826extern void (*xpc_assume_partition_disengaged) (short);
827
828extern void (*xpc_send_chctl_closerequest) (struct xpc_channel *,
829 unsigned long *);
830extern void (*xpc_send_chctl_closereply) (struct xpc_channel *,
831 unsigned long *);
832extern void (*xpc_send_chctl_openrequest) (struct xpc_channel *,
833 unsigned long *);
834extern void (*xpc_send_chctl_openreply) (struct xpc_channel *, unsigned long *);
835
836extern void (*xpc_save_remote_msgqueue_pa) (struct xpc_channel *,
837 unsigned long);
838
839extern enum xp_retval (*xpc_send_payload) (struct xpc_channel *, u32, void *,
840 u16, u8, xpc_notify_func, void *);
841extern void (*xpc_received_payload) (struct xpc_channel *, void *);
842
843/* found in xpc_sn2.c */
844extern int xpc_init_sn2(void);
845extern void xpc_exit_sn2(void);
846
847/* found in xpc_uv.c */
848extern int xpc_init_uv(void);
849extern void xpc_exit_uv(void);
634 850
635/* found in xpc_partition.c */ 851/* found in xpc_partition.c */
636extern int xpc_exiting; 852extern int xpc_exiting;
637extern struct xpc_vars *xpc_vars; 853extern int xpc_nasid_mask_nlongs;
638extern struct xpc_rsvd_page *xpc_rsvd_page; 854extern struct xpc_rsvd_page *xpc_rsvd_page;
639extern struct xpc_vars_part *xpc_vars_part; 855extern unsigned long *xpc_mach_nasids;
640extern struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1]; 856extern struct xpc_partition *xpc_partitions;
641extern char *xpc_remote_copy_buffer;
642extern void *xpc_remote_copy_buffer_base;
643extern void *xpc_kmalloc_cacheline_aligned(size_t, gfp_t, void **); 857extern void *xpc_kmalloc_cacheline_aligned(size_t, gfp_t, void **);
644extern struct xpc_rsvd_page *xpc_rsvd_page_init(void); 858extern int xpc_setup_rsvd_page(void);
645extern void xpc_allow_IPI_ops(void); 859extern void xpc_teardown_rsvd_page(void);
646extern void xpc_restrict_IPI_ops(void); 860extern int xpc_identify_activate_IRQ_sender(void);
647extern int xpc_identify_act_IRQ_sender(void);
648extern int xpc_partition_disengaged(struct xpc_partition *); 861extern int xpc_partition_disengaged(struct xpc_partition *);
649extern enum xp_retval xpc_mark_partition_active(struct xpc_partition *); 862extern enum xp_retval xpc_mark_partition_active(struct xpc_partition *);
650extern void xpc_mark_partition_inactive(struct xpc_partition *); 863extern void xpc_mark_partition_inactive(struct xpc_partition *);
651extern void xpc_discovery(void); 864extern void xpc_discovery(void);
652extern void xpc_check_remote_hb(void); 865extern enum xp_retval xpc_get_remote_rp(int, unsigned long *,
866 struct xpc_rsvd_page *,
867 unsigned long *);
653extern void xpc_deactivate_partition(const int, struct xpc_partition *, 868extern void xpc_deactivate_partition(const int, struct xpc_partition *,
654 enum xp_retval); 869 enum xp_retval);
655extern enum xp_retval xpc_initiate_partid_to_nasids(short, void *); 870extern enum xp_retval xpc_initiate_partid_to_nasids(short, void *);
@@ -657,21 +872,52 @@ extern enum xp_retval xpc_initiate_partid_to_nasids(short, void *);
657/* found in xpc_channel.c */ 872/* found in xpc_channel.c */
658extern void xpc_initiate_connect(int); 873extern void xpc_initiate_connect(int);
659extern void xpc_initiate_disconnect(int); 874extern void xpc_initiate_disconnect(int);
660extern enum xp_retval xpc_initiate_allocate(short, int, u32, void **); 875extern enum xp_retval xpc_allocate_msg_wait(struct xpc_channel *);
661extern enum xp_retval xpc_initiate_send(short, int, void *); 876extern enum xp_retval xpc_initiate_send(short, int, u32, void *, u16);
662extern enum xp_retval xpc_initiate_send_notify(short, int, void *, 877extern enum xp_retval xpc_initiate_send_notify(short, int, u32, void *, u16,
663 xpc_notify_func, void *); 878 xpc_notify_func, void *);
664extern void xpc_initiate_received(short, int, void *); 879extern void xpc_initiate_received(short, int, void *);
665extern enum xp_retval xpc_setup_infrastructure(struct xpc_partition *); 880extern void xpc_process_sent_chctl_flags(struct xpc_partition *);
666extern enum xp_retval xpc_pull_remote_vars_part(struct xpc_partition *);
667extern void xpc_process_channel_activity(struct xpc_partition *);
668extern void xpc_connected_callout(struct xpc_channel *); 881extern void xpc_connected_callout(struct xpc_channel *);
669extern void xpc_deliver_msg(struct xpc_channel *); 882extern void xpc_deliver_payload(struct xpc_channel *);
670extern void xpc_disconnect_channel(const int, struct xpc_channel *, 883extern void xpc_disconnect_channel(const int, struct xpc_channel *,
671 enum xp_retval, unsigned long *); 884 enum xp_retval, unsigned long *);
672extern void xpc_disconnect_callout(struct xpc_channel *, enum xp_retval); 885extern void xpc_disconnect_callout(struct xpc_channel *, enum xp_retval);
673extern void xpc_partition_going_down(struct xpc_partition *, enum xp_retval); 886extern void xpc_partition_going_down(struct xpc_partition *, enum xp_retval);
674extern void xpc_teardown_infrastructure(struct xpc_partition *); 887
888static inline int
889xpc_hb_allowed(short partid, void *heartbeating_to_mask)
890{
891 return test_bit(partid, heartbeating_to_mask);
892}
893
894static inline int
895xpc_any_hbs_allowed(void)
896{
897 DBUG_ON(xpc_heartbeating_to_mask == NULL);
898 return !bitmap_empty(xpc_heartbeating_to_mask, xp_max_npartitions);
899}
900
901static inline void
902xpc_allow_hb(short partid)
903{
904 DBUG_ON(xpc_heartbeating_to_mask == NULL);
905 set_bit(partid, xpc_heartbeating_to_mask);
906}
907
908static inline void
909xpc_disallow_hb(short partid)
910{
911 DBUG_ON(xpc_heartbeating_to_mask == NULL);
912 clear_bit(partid, xpc_heartbeating_to_mask);
913}
914
915static inline void
916xpc_disallow_all_hbs(void)
917{
918 DBUG_ON(xpc_heartbeating_to_mask == NULL);
919 bitmap_zero(xpc_heartbeating_to_mask, xp_max_npartitions);
920}
675 921
676static inline void 922static inline void
677xpc_wakeup_channel_mgr(struct xpc_partition *part) 923xpc_wakeup_channel_mgr(struct xpc_partition *part)
@@ -713,7 +959,7 @@ xpc_part_deref(struct xpc_partition *part)
713 s32 refs = atomic_dec_return(&part->references); 959 s32 refs = atomic_dec_return(&part->references);
714 960
715 DBUG_ON(refs < 0); 961 DBUG_ON(refs < 0);
716 if (refs == 0 && part->setup_state == XPC_P_WTEARDOWN) 962 if (refs == 0 && part->setup_state == XPC_P_SS_WTEARDOWN)
717 wake_up(&part->teardown_wq); 963 wake_up(&part->teardown_wq);
718} 964}
719 965
@@ -723,7 +969,7 @@ xpc_part_ref(struct xpc_partition *part)
723 int setup; 969 int setup;
724 970
725 atomic_inc(&part->references); 971 atomic_inc(&part->references);
726 setup = (part->setup_state == XPC_P_SETUP); 972 setup = (part->setup_state == XPC_P_SS_SETUP);
727 if (!setup) 973 if (!setup)
728 xpc_part_deref(part); 974 xpc_part_deref(part);
729 975
@@ -741,416 +987,4 @@ xpc_part_ref(struct xpc_partition *part)
741 (_p)->reason_line = _line; \ 987 (_p)->reason_line = _line; \
742 } 988 }
743 989
744/*
745 * This next set of inlines are used to keep track of when a partition is
746 * potentially engaged in accessing memory belonging to another partition.
747 */
748
749static inline void
750xpc_mark_partition_engaged(struct xpc_partition *part)
751{
752 unsigned long irq_flags;
753 AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa +
754 (XPC_ENGAGED_PARTITIONS_AMO *
755 sizeof(AMO_t)));
756
757 local_irq_save(irq_flags);
758
759 /* set bit corresponding to our partid in remote partition's AMO */
760 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR,
761 (1UL << sn_partition_id));
762 /*
763 * We must always use the nofault function regardless of whether we
764 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
765 * didn't, we'd never know that the other partition is down and would
766 * keep sending IPIs and AMOs to it until the heartbeat times out.
767 */
768 (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
769 variable),
770 xp_nofault_PIOR_target));
771
772 local_irq_restore(irq_flags);
773}
774
775static inline void
776xpc_mark_partition_disengaged(struct xpc_partition *part)
777{
778 unsigned long irq_flags;
779 AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa +
780 (XPC_ENGAGED_PARTITIONS_AMO *
781 sizeof(AMO_t)));
782
783 local_irq_save(irq_flags);
784
785 /* clear bit corresponding to our partid in remote partition's AMO */
786 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
787 ~(1UL << sn_partition_id));
788 /*
789 * We must always use the nofault function regardless of whether we
790 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
791 * didn't, we'd never know that the other partition is down and would
792 * keep sending IPIs and AMOs to it until the heartbeat times out.
793 */
794 (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
795 variable),
796 xp_nofault_PIOR_target));
797
798 local_irq_restore(irq_flags);
799}
800
801static inline void
802xpc_request_partition_disengage(struct xpc_partition *part)
803{
804 unsigned long irq_flags;
805 AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa +
806 (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t)));
807
808 local_irq_save(irq_flags);
809
810 /* set bit corresponding to our partid in remote partition's AMO */
811 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR,
812 (1UL << sn_partition_id));
813 /*
814 * We must always use the nofault function regardless of whether we
815 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
816 * didn't, we'd never know that the other partition is down and would
817 * keep sending IPIs and AMOs to it until the heartbeat times out.
818 */
819 (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
820 variable),
821 xp_nofault_PIOR_target));
822
823 local_irq_restore(irq_flags);
824}
825
826static inline void
827xpc_cancel_partition_disengage_request(struct xpc_partition *part)
828{
829 unsigned long irq_flags;
830 AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa +
831 (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t)));
832
833 local_irq_save(irq_flags);
834
835 /* clear bit corresponding to our partid in remote partition's AMO */
836 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
837 ~(1UL << sn_partition_id));
838 /*
839 * We must always use the nofault function regardless of whether we
840 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
841 * didn't, we'd never know that the other partition is down and would
842 * keep sending IPIs and AMOs to it until the heartbeat times out.
843 */
844 (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
845 variable),
846 xp_nofault_PIOR_target));
847
848 local_irq_restore(irq_flags);
849}
850
851static inline u64
852xpc_partition_engaged(u64 partid_mask)
853{
854 AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO;
855
856 /* return our partition's AMO variable ANDed with partid_mask */
857 return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) &
858 partid_mask);
859}
860
861static inline u64
862xpc_partition_disengage_requested(u64 partid_mask)
863{
864 AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO;
865
866 /* return our partition's AMO variable ANDed with partid_mask */
867 return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) &
868 partid_mask);
869}
870
871static inline void
872xpc_clear_partition_engaged(u64 partid_mask)
873{
874 AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO;
875
876 /* clear bit(s) based on partid_mask in our partition's AMO */
877 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
878 ~partid_mask);
879}
880
881static inline void
882xpc_clear_partition_disengage_request(u64 partid_mask)
883{
884 AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO;
885
886 /* clear bit(s) based on partid_mask in our partition's AMO */
887 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
888 ~partid_mask);
889}
890
891/*
892 * The following set of macros and inlines are used for the sending and
893 * receiving of IPIs (also known as IRQs). There are two flavors of IPIs,
894 * one that is associated with partition activity (SGI_XPC_ACTIVATE) and
895 * the other that is associated with channel activity (SGI_XPC_NOTIFY).
896 */
897
898static inline u64
899xpc_IPI_receive(AMO_t *amo)
900{
901 return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_CLEAR);
902}
903
904static inline enum xp_retval
905xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector)
906{
907 int ret = 0;
908 unsigned long irq_flags;
909
910 local_irq_save(irq_flags);
911
912 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR, flag);
913 sn_send_IPI_phys(nasid, phys_cpuid, vector, 0);
914
915 /*
916 * We must always use the nofault function regardless of whether we
917 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
918 * didn't, we'd never know that the other partition is down and would
919 * keep sending IPIs and AMOs to it until the heartbeat times out.
920 */
921 ret = xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->variable),
922 xp_nofault_PIOR_target));
923
924 local_irq_restore(irq_flags);
925
926 return ((ret == 0) ? xpSuccess : xpPioReadError);
927}
928
929/*
930 * IPIs associated with SGI_XPC_ACTIVATE IRQ.
931 */
932
933/*
934 * Flag the appropriate AMO variable and send an IPI to the specified node.
935 */
936static inline void
937xpc_activate_IRQ_send(u64 amos_page_pa, int from_nasid, int to_nasid,
938 int to_phys_cpuid)
939{
940 int w_index = XPC_NASID_W_INDEX(from_nasid);
941 int b_index = XPC_NASID_B_INDEX(from_nasid);
942 AMO_t *amos = (AMO_t *)__va(amos_page_pa +
943 (XPC_ACTIVATE_IRQ_AMOS * sizeof(AMO_t)));
944
945 (void)xpc_IPI_send(&amos[w_index], (1UL << b_index), to_nasid,
946 to_phys_cpuid, SGI_XPC_ACTIVATE);
947}
948
949static inline void
950xpc_IPI_send_activate(struct xpc_vars *vars)
951{
952 xpc_activate_IRQ_send(vars->amos_page_pa, cnodeid_to_nasid(0),
953 vars->act_nasid, vars->act_phys_cpuid);
954}
955
956static inline void
957xpc_IPI_send_activated(struct xpc_partition *part)
958{
959 xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0),
960 part->remote_act_nasid,
961 part->remote_act_phys_cpuid);
962}
963
964static inline void
965xpc_IPI_send_reactivate(struct xpc_partition *part)
966{
967 xpc_activate_IRQ_send(xpc_vars->amos_page_pa, part->reactivate_nasid,
968 xpc_vars->act_nasid, xpc_vars->act_phys_cpuid);
969}
970
971static inline void
972xpc_IPI_send_disengage(struct xpc_partition *part)
973{
974 xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0),
975 part->remote_act_nasid,
976 part->remote_act_phys_cpuid);
977}
978
979/*
980 * IPIs associated with SGI_XPC_NOTIFY IRQ.
981 */
982
983/*
984 * Send an IPI to the remote partition that is associated with the
985 * specified channel.
986 */
987#define XPC_NOTIFY_IRQ_SEND(_ch, _ipi_f, _irq_f) \
988 xpc_notify_IRQ_send(_ch, _ipi_f, #_ipi_f, _irq_f)
989
990static inline void
991xpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string,
992 unsigned long *irq_flags)
993{
994 struct xpc_partition *part = &xpc_partitions[ch->partid];
995 enum xp_retval ret;
996
997 if (likely(part->act_state != XPC_P_DEACTIVATING)) {
998 ret = xpc_IPI_send(part->remote_IPI_amo_va,
999 (u64)ipi_flag << (ch->number * 8),
1000 part->remote_IPI_nasid,
1001 part->remote_IPI_phys_cpuid, SGI_XPC_NOTIFY);
1002 dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n",
1003 ipi_flag_string, ch->partid, ch->number, ret);
1004 if (unlikely(ret != xpSuccess)) {
1005 if (irq_flags != NULL)
1006 spin_unlock_irqrestore(&ch->lock, *irq_flags);
1007 XPC_DEACTIVATE_PARTITION(part, ret);
1008 if (irq_flags != NULL)
1009 spin_lock_irqsave(&ch->lock, *irq_flags);
1010 }
1011 }
1012}
1013
1014/*
1015 * Make it look like the remote partition, which is associated with the
1016 * specified channel, sent us an IPI. This faked IPI will be handled
1017 * by xpc_dropped_IPI_check().
1018 */
1019#define XPC_NOTIFY_IRQ_SEND_LOCAL(_ch, _ipi_f) \
1020 xpc_notify_IRQ_send_local(_ch, _ipi_f, #_ipi_f)
1021
1022static inline void
1023xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag,
1024 char *ipi_flag_string)
1025{
1026 struct xpc_partition *part = &xpc_partitions[ch->partid];
1027
1028 FETCHOP_STORE_OP(TO_AMO((u64)&part->local_IPI_amo_va->variable),
1029 FETCHOP_OR, ((u64)ipi_flag << (ch->number * 8)));
1030 dev_dbg(xpc_chan, "%s sent local from partid=%d, channel=%d\n",
1031 ipi_flag_string, ch->partid, ch->number);
1032}
1033
1034/*
1035 * The sending and receiving of IPIs includes the setting of an AMO variable
1036 * to indicate the reason the IPI was sent. The 64-bit variable is divided
1037 * up into eight bytes, ordered from right to left. Byte zero pertains to
1038 * channel 0, byte one to channel 1, and so on. Each byte is described by
1039 * the following IPI flags.
1040 */
1041
1042#define XPC_IPI_CLOSEREQUEST 0x01
1043#define XPC_IPI_CLOSEREPLY 0x02
1044#define XPC_IPI_OPENREQUEST 0x04
1045#define XPC_IPI_OPENREPLY 0x08
1046#define XPC_IPI_MSGREQUEST 0x10
1047
1048/* given an AMO variable and a channel#, get its associated IPI flags */
1049#define XPC_GET_IPI_FLAGS(_amo, _c) ((u8) (((_amo) >> ((_c) * 8)) & 0xff))
1050#define XPC_SET_IPI_FLAGS(_amo, _c, _f) (_amo) |= ((u64) (_f) << ((_c) * 8))
1051
1052#define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & 0x0f0f0f0f0f0f0f0fUL)
1053#define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & 0x1010101010101010UL)
1054
1055static inline void
1056xpc_IPI_send_closerequest(struct xpc_channel *ch, unsigned long *irq_flags)
1057{
1058 struct xpc_openclose_args *args = ch->local_openclose_args;
1059
1060 args->reason = ch->reason;
1061
1062 XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_CLOSEREQUEST, irq_flags);
1063}
1064
1065static inline void
1066xpc_IPI_send_closereply(struct xpc_channel *ch, unsigned long *irq_flags)
1067{
1068 XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_CLOSEREPLY, irq_flags);
1069}
1070
1071static inline void
1072xpc_IPI_send_openrequest(struct xpc_channel *ch, unsigned long *irq_flags)
1073{
1074 struct xpc_openclose_args *args = ch->local_openclose_args;
1075
1076 args->msg_size = ch->msg_size;
1077 args->local_nentries = ch->local_nentries;
1078
1079 XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_OPENREQUEST, irq_flags);
1080}
1081
1082static inline void
1083xpc_IPI_send_openreply(struct xpc_channel *ch, unsigned long *irq_flags)
1084{
1085 struct xpc_openclose_args *args = ch->local_openclose_args;
1086
1087 args->remote_nentries = ch->remote_nentries;
1088 args->local_nentries = ch->local_nentries;
1089 args->local_msgqueue_pa = __pa(ch->local_msgqueue);
1090
1091 XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_OPENREPLY, irq_flags);
1092}
1093
1094static inline void
1095xpc_IPI_send_msgrequest(struct xpc_channel *ch)
1096{
1097 XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_MSGREQUEST, NULL);
1098}
1099
1100static inline void
1101xpc_IPI_send_local_msgrequest(struct xpc_channel *ch)
1102{
1103 XPC_NOTIFY_IRQ_SEND_LOCAL(ch, XPC_IPI_MSGREQUEST);
1104}
1105
1106/*
1107 * Memory for XPC's AMO variables is allocated by the MSPEC driver. These
1108 * pages are located in the lowest granule. The lowest granule uses 4k pages
1109 * for cached references and an alternate TLB handler to never provide a
1110 * cacheable mapping for the entire region. This will prevent speculative
1111 * reading of cached copies of our lines from being issued which will cause
1112 * a PI FSB Protocol error to be generated by the SHUB. For XPC, we need 64
1113 * AMO variables (based on XP_MAX_PARTITIONS) for message notification and an
1114 * additional 128 AMO variables (based on XP_NASID_MASK_WORDS) for partition
1115 * activation and 2 AMO variables for partition deactivation.
1116 */
1117static inline AMO_t *
1118xpc_IPI_init(int index)
1119{
1120 AMO_t *amo = xpc_vars->amos_page + index;
1121
1122 (void)xpc_IPI_receive(amo); /* clear AMO variable */
1123 return amo;
1124}
1125
1126static inline enum xp_retval
1127xpc_map_bte_errors(bte_result_t error)
1128{
1129 return ((error == BTE_SUCCESS) ? xpSuccess : xpBteCopyError);
1130}
1131
1132/*
1133 * Check to see if there is any channel activity to/from the specified
1134 * partition.
1135 */
1136static inline void
1137xpc_check_for_channel_activity(struct xpc_partition *part)
1138{
1139 u64 IPI_amo;
1140 unsigned long irq_flags;
1141
1142 IPI_amo = xpc_IPI_receive(part->local_IPI_amo_va);
1143 if (IPI_amo == 0)
1144 return;
1145
1146 spin_lock_irqsave(&part->IPI_lock, irq_flags);
1147 part->local_IPI_amo |= IPI_amo;
1148 spin_unlock_irqrestore(&part->IPI_lock, irq_flags);
1149
1150 dev_dbg(xpc_chan, "received IPI from partid=%d, IPI_amo=0x%lx\n",
1151 XPC_PARTID(part), IPI_amo);
1152
1153 xpc_wakeup_channel_mgr(part);
1154}
1155
1156#endif /* _DRIVERS_MISC_SGIXP_XPC_H */ 990#endif /* _DRIVERS_MISC_SGIXP_XPC_H */
diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c
index 9c90c2d55c08..9cd2ebe2a3b6 100644
--- a/drivers/misc/sgi-xp/xpc_channel.c
+++ b/drivers/misc/sgi-xp/xpc_channel.c
@@ -14,536 +14,10 @@
14 * 14 *
15 */ 15 */
16 16
17#include <linux/kernel.h> 17#include <linux/device.h>
18#include <linux/init.h>
19#include <linux/sched.h>
20#include <linux/cache.h>
21#include <linux/interrupt.h>
22#include <linux/mutex.h>
23#include <linux/completion.h>
24#include <asm/sn/bte.h>
25#include <asm/sn/sn_sal.h>
26#include "xpc.h" 18#include "xpc.h"
27 19
28/* 20/*
29 * Guarantee that the kzalloc'd memory is cacheline aligned.
30 */
31static void *
32xpc_kzalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
33{
34 /* see if kzalloc will give us cachline aligned memory by default */
35 *base = kzalloc(size, flags);
36 if (*base == NULL)
37 return NULL;
38
39 if ((u64)*base == L1_CACHE_ALIGN((u64)*base))
40 return *base;
41
42 kfree(*base);
43
44 /* nope, we'll have to do it ourselves */
45 *base = kzalloc(size + L1_CACHE_BYTES, flags);
46 if (*base == NULL)
47 return NULL;
48
49 return (void *)L1_CACHE_ALIGN((u64)*base);
50}
51
52/*
53 * Set up the initial values for the XPartition Communication channels.
54 */
55static void
56xpc_initialize_channels(struct xpc_partition *part, short partid)
57{
58 int ch_number;
59 struct xpc_channel *ch;
60
61 for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
62 ch = &part->channels[ch_number];
63
64 ch->partid = partid;
65 ch->number = ch_number;
66 ch->flags = XPC_C_DISCONNECTED;
67
68 ch->local_GP = &part->local_GPs[ch_number];
69 ch->local_openclose_args =
70 &part->local_openclose_args[ch_number];
71
72 atomic_set(&ch->kthreads_assigned, 0);
73 atomic_set(&ch->kthreads_idle, 0);
74 atomic_set(&ch->kthreads_active, 0);
75
76 atomic_set(&ch->references, 0);
77 atomic_set(&ch->n_to_notify, 0);
78
79 spin_lock_init(&ch->lock);
80 mutex_init(&ch->msg_to_pull_mutex);
81 init_completion(&ch->wdisconnect_wait);
82
83 atomic_set(&ch->n_on_msg_allocate_wq, 0);
84 init_waitqueue_head(&ch->msg_allocate_wq);
85 init_waitqueue_head(&ch->idle_wq);
86 }
87}
88
89/*
90 * Setup the infrastructure necessary to support XPartition Communication
91 * between the specified remote partition and the local one.
92 */
93enum xp_retval
94xpc_setup_infrastructure(struct xpc_partition *part)
95{
96 int ret, cpuid;
97 struct timer_list *timer;
98 short partid = XPC_PARTID(part);
99
100 /*
101 * Zero out MOST of the entry for this partition. Only the fields
102 * starting with `nchannels' will be zeroed. The preceding fields must
103 * remain `viable' across partition ups and downs, since they may be
104 * referenced during this memset() operation.
105 */
106 memset(&part->nchannels, 0, sizeof(struct xpc_partition) -
107 offsetof(struct xpc_partition, nchannels));
108
109 /*
110 * Allocate all of the channel structures as a contiguous chunk of
111 * memory.
112 */
113 part->channels = kzalloc(sizeof(struct xpc_channel) * XPC_NCHANNELS,
114 GFP_KERNEL);
115 if (part->channels == NULL) {
116 dev_err(xpc_chan, "can't get memory for channels\n");
117 return xpNoMemory;
118 }
119
120 part->nchannels = XPC_NCHANNELS;
121
122 /* allocate all the required GET/PUT values */
123
124 part->local_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE,
125 GFP_KERNEL,
126 &part->local_GPs_base);
127 if (part->local_GPs == NULL) {
128 kfree(part->channels);
129 part->channels = NULL;
130 dev_err(xpc_chan, "can't get memory for local get/put "
131 "values\n");
132 return xpNoMemory;
133 }
134
135 part->remote_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE,
136 GFP_KERNEL,
137 &part->
138 remote_GPs_base);
139 if (part->remote_GPs == NULL) {
140 dev_err(xpc_chan, "can't get memory for remote get/put "
141 "values\n");
142 kfree(part->local_GPs_base);
143 part->local_GPs = NULL;
144 kfree(part->channels);
145 part->channels = NULL;
146 return xpNoMemory;
147 }
148
149 /* allocate all the required open and close args */
150
151 part->local_openclose_args =
152 xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL,
153 &part->local_openclose_args_base);
154 if (part->local_openclose_args == NULL) {
155 dev_err(xpc_chan, "can't get memory for local connect args\n");
156 kfree(part->remote_GPs_base);
157 part->remote_GPs = NULL;
158 kfree(part->local_GPs_base);
159 part->local_GPs = NULL;
160 kfree(part->channels);
161 part->channels = NULL;
162 return xpNoMemory;
163 }
164
165 part->remote_openclose_args =
166 xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL,
167 &part->remote_openclose_args_base);
168 if (part->remote_openclose_args == NULL) {
169 dev_err(xpc_chan, "can't get memory for remote connect args\n");
170 kfree(part->local_openclose_args_base);
171 part->local_openclose_args = NULL;
172 kfree(part->remote_GPs_base);
173 part->remote_GPs = NULL;
174 kfree(part->local_GPs_base);
175 part->local_GPs = NULL;
176 kfree(part->channels);
177 part->channels = NULL;
178 return xpNoMemory;
179 }
180
181 xpc_initialize_channels(part, partid);
182
183 atomic_set(&part->nchannels_active, 0);
184 atomic_set(&part->nchannels_engaged, 0);
185
186 /* local_IPI_amo were set to 0 by an earlier memset() */
187
188 /* Initialize this partitions AMO_t structure */
189 part->local_IPI_amo_va = xpc_IPI_init(partid);
190
191 spin_lock_init(&part->IPI_lock);
192
193 atomic_set(&part->channel_mgr_requests, 1);
194 init_waitqueue_head(&part->channel_mgr_wq);
195
196 sprintf(part->IPI_owner, "xpc%02d", partid);
197 ret = request_irq(SGI_XPC_NOTIFY, xpc_notify_IRQ_handler, IRQF_SHARED,
198 part->IPI_owner, (void *)(u64)partid);
199 if (ret != 0) {
200 dev_err(xpc_chan, "can't register NOTIFY IRQ handler, "
201 "errno=%d\n", -ret);
202 kfree(part->remote_openclose_args_base);
203 part->remote_openclose_args = NULL;
204 kfree(part->local_openclose_args_base);
205 part->local_openclose_args = NULL;
206 kfree(part->remote_GPs_base);
207 part->remote_GPs = NULL;
208 kfree(part->local_GPs_base);
209 part->local_GPs = NULL;
210 kfree(part->channels);
211 part->channels = NULL;
212 return xpLackOfResources;
213 }
214
215 /* Setup a timer to check for dropped IPIs */
216 timer = &part->dropped_IPI_timer;
217 init_timer(timer);
218 timer->function = (void (*)(unsigned long))xpc_dropped_IPI_check;
219 timer->data = (unsigned long)part;
220 timer->expires = jiffies + XPC_P_DROPPED_IPI_WAIT;
221 add_timer(timer);
222
223 /*
224 * With the setting of the partition setup_state to XPC_P_SETUP, we're
225 * declaring that this partition is ready to go.
226 */
227 part->setup_state = XPC_P_SETUP;
228
229 /*
230 * Setup the per partition specific variables required by the
231 * remote partition to establish channel connections with us.
232 *
233 * The setting of the magic # indicates that these per partition
234 * specific variables are ready to be used.
235 */
236 xpc_vars_part[partid].GPs_pa = __pa(part->local_GPs);
237 xpc_vars_part[partid].openclose_args_pa =
238 __pa(part->local_openclose_args);
239 xpc_vars_part[partid].IPI_amo_pa = __pa(part->local_IPI_amo_va);
240 cpuid = raw_smp_processor_id(); /* any CPU in this partition will do */
241 xpc_vars_part[partid].IPI_nasid = cpuid_to_nasid(cpuid);
242 xpc_vars_part[partid].IPI_phys_cpuid = cpu_physical_id(cpuid);
243 xpc_vars_part[partid].nchannels = part->nchannels;
244 xpc_vars_part[partid].magic = XPC_VP_MAGIC1;
245
246 return xpSuccess;
247}
248
249/*
250 * Create a wrapper that hides the underlying mechanism for pulling a cacheline
251 * (or multiple cachelines) from a remote partition.
252 *
253 * src must be a cacheline aligned physical address on the remote partition.
254 * dst must be a cacheline aligned virtual address on this partition.
255 * cnt must be an cacheline sized
256 */
257static enum xp_retval
258xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst,
259 const void *src, size_t cnt)
260{
261 bte_result_t bte_ret;
262
263 DBUG_ON((u64)src != L1_CACHE_ALIGN((u64)src));
264 DBUG_ON((u64)dst != L1_CACHE_ALIGN((u64)dst));
265 DBUG_ON(cnt != L1_CACHE_ALIGN(cnt));
266
267 if (part->act_state == XPC_P_DEACTIVATING)
268 return part->reason;
269
270 bte_ret = xp_bte_copy((u64)src, (u64)dst, (u64)cnt,
271 (BTE_NORMAL | BTE_WACQUIRE), NULL);
272 if (bte_ret == BTE_SUCCESS)
273 return xpSuccess;
274
275 dev_dbg(xpc_chan, "xp_bte_copy() from partition %d failed, ret=%d\n",
276 XPC_PARTID(part), bte_ret);
277
278 return xpc_map_bte_errors(bte_ret);
279}
280
281/*
282 * Pull the remote per partition specific variables from the specified
283 * partition.
284 */
285enum xp_retval
286xpc_pull_remote_vars_part(struct xpc_partition *part)
287{
288 u8 buffer[L1_CACHE_BYTES * 2];
289 struct xpc_vars_part *pulled_entry_cacheline =
290 (struct xpc_vars_part *)L1_CACHE_ALIGN((u64)buffer);
291 struct xpc_vars_part *pulled_entry;
292 u64 remote_entry_cacheline_pa, remote_entry_pa;
293 short partid = XPC_PARTID(part);
294 enum xp_retval ret;
295
296 /* pull the cacheline that contains the variables we're interested in */
297
298 DBUG_ON(part->remote_vars_part_pa !=
299 L1_CACHE_ALIGN(part->remote_vars_part_pa));
300 DBUG_ON(sizeof(struct xpc_vars_part) != L1_CACHE_BYTES / 2);
301
302 remote_entry_pa = part->remote_vars_part_pa +
303 sn_partition_id * sizeof(struct xpc_vars_part);
304
305 remote_entry_cacheline_pa = (remote_entry_pa & ~(L1_CACHE_BYTES - 1));
306
307 pulled_entry = (struct xpc_vars_part *)((u64)pulled_entry_cacheline +
308 (remote_entry_pa &
309 (L1_CACHE_BYTES - 1)));
310
311 ret = xpc_pull_remote_cachelines(part, pulled_entry_cacheline,
312 (void *)remote_entry_cacheline_pa,
313 L1_CACHE_BYTES);
314 if (ret != xpSuccess) {
315 dev_dbg(xpc_chan, "failed to pull XPC vars_part from "
316 "partition %d, ret=%d\n", partid, ret);
317 return ret;
318 }
319
320 /* see if they've been set up yet */
321
322 if (pulled_entry->magic != XPC_VP_MAGIC1 &&
323 pulled_entry->magic != XPC_VP_MAGIC2) {
324
325 if (pulled_entry->magic != 0) {
326 dev_dbg(xpc_chan, "partition %d's XPC vars_part for "
327 "partition %d has bad magic value (=0x%lx)\n",
328 partid, sn_partition_id, pulled_entry->magic);
329 return xpBadMagic;
330 }
331
332 /* they've not been initialized yet */
333 return xpRetry;
334 }
335
336 if (xpc_vars_part[partid].magic == XPC_VP_MAGIC1) {
337
338 /* validate the variables */
339
340 if (pulled_entry->GPs_pa == 0 ||
341 pulled_entry->openclose_args_pa == 0 ||
342 pulled_entry->IPI_amo_pa == 0) {
343
344 dev_err(xpc_chan, "partition %d's XPC vars_part for "
345 "partition %d are not valid\n", partid,
346 sn_partition_id);
347 return xpInvalidAddress;
348 }
349
350 /* the variables we imported look to be valid */
351
352 part->remote_GPs_pa = pulled_entry->GPs_pa;
353 part->remote_openclose_args_pa =
354 pulled_entry->openclose_args_pa;
355 part->remote_IPI_amo_va =
356 (AMO_t *)__va(pulled_entry->IPI_amo_pa);
357 part->remote_IPI_nasid = pulled_entry->IPI_nasid;
358 part->remote_IPI_phys_cpuid = pulled_entry->IPI_phys_cpuid;
359
360 if (part->nchannels > pulled_entry->nchannels)
361 part->nchannels = pulled_entry->nchannels;
362
363 /* let the other side know that we've pulled their variables */
364
365 xpc_vars_part[partid].magic = XPC_VP_MAGIC2;
366 }
367
368 if (pulled_entry->magic == XPC_VP_MAGIC1)
369 return xpRetry;
370
371 return xpSuccess;
372}
373
374/*
375 * Get the IPI flags and pull the openclose args and/or remote GPs as needed.
376 */
377static u64
378xpc_get_IPI_flags(struct xpc_partition *part)
379{
380 unsigned long irq_flags;
381 u64 IPI_amo;
382 enum xp_retval ret;
383
384 /*
385 * See if there are any IPI flags to be handled.
386 */
387
388 spin_lock_irqsave(&part->IPI_lock, irq_flags);
389 IPI_amo = part->local_IPI_amo;
390 if (IPI_amo != 0)
391 part->local_IPI_amo = 0;
392
393 spin_unlock_irqrestore(&part->IPI_lock, irq_flags);
394
395 if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_amo)) {
396 ret = xpc_pull_remote_cachelines(part,
397 part->remote_openclose_args,
398 (void *)part->
399 remote_openclose_args_pa,
400 XPC_OPENCLOSE_ARGS_SIZE);
401 if (ret != xpSuccess) {
402 XPC_DEACTIVATE_PARTITION(part, ret);
403
404 dev_dbg(xpc_chan, "failed to pull openclose args from "
405 "partition %d, ret=%d\n", XPC_PARTID(part),
406 ret);
407
408 /* don't bother processing IPIs anymore */
409 IPI_amo = 0;
410 }
411 }
412
413 if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_amo)) {
414 ret = xpc_pull_remote_cachelines(part, part->remote_GPs,
415 (void *)part->remote_GPs_pa,
416 XPC_GP_SIZE);
417 if (ret != xpSuccess) {
418 XPC_DEACTIVATE_PARTITION(part, ret);
419
420 dev_dbg(xpc_chan, "failed to pull GPs from partition "
421 "%d, ret=%d\n", XPC_PARTID(part), ret);
422
423 /* don't bother processing IPIs anymore */
424 IPI_amo = 0;
425 }
426 }
427
428 return IPI_amo;
429}
430
431/*
432 * Allocate the local message queue and the notify queue.
433 */
434static enum xp_retval
435xpc_allocate_local_msgqueue(struct xpc_channel *ch)
436{
437 unsigned long irq_flags;
438 int nentries;
439 size_t nbytes;
440
441 for (nentries = ch->local_nentries; nentries > 0; nentries--) {
442
443 nbytes = nentries * ch->msg_size;
444 ch->local_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes,
445 GFP_KERNEL,
446 &ch->local_msgqueue_base);
447 if (ch->local_msgqueue == NULL)
448 continue;
449
450 nbytes = nentries * sizeof(struct xpc_notify);
451 ch->notify_queue = kzalloc(nbytes, GFP_KERNEL);
452 if (ch->notify_queue == NULL) {
453 kfree(ch->local_msgqueue_base);
454 ch->local_msgqueue = NULL;
455 continue;
456 }
457
458 spin_lock_irqsave(&ch->lock, irq_flags);
459 if (nentries < ch->local_nentries) {
460 dev_dbg(xpc_chan, "nentries=%d local_nentries=%d, "
461 "partid=%d, channel=%d\n", nentries,
462 ch->local_nentries, ch->partid, ch->number);
463
464 ch->local_nentries = nentries;
465 }
466 spin_unlock_irqrestore(&ch->lock, irq_flags);
467 return xpSuccess;
468 }
469
470 dev_dbg(xpc_chan, "can't get memory for local message queue and notify "
471 "queue, partid=%d, channel=%d\n", ch->partid, ch->number);
472 return xpNoMemory;
473}
474
475/*
476 * Allocate the cached remote message queue.
477 */
478static enum xp_retval
479xpc_allocate_remote_msgqueue(struct xpc_channel *ch)
480{
481 unsigned long irq_flags;
482 int nentries;
483 size_t nbytes;
484
485 DBUG_ON(ch->remote_nentries <= 0);
486
487 for (nentries = ch->remote_nentries; nentries > 0; nentries--) {
488
489 nbytes = nentries * ch->msg_size;
490 ch->remote_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes,
491 GFP_KERNEL,
492 &ch->remote_msgqueue_base);
493 if (ch->remote_msgqueue == NULL)
494 continue;
495
496 spin_lock_irqsave(&ch->lock, irq_flags);
497 if (nentries < ch->remote_nentries) {
498 dev_dbg(xpc_chan, "nentries=%d remote_nentries=%d, "
499 "partid=%d, channel=%d\n", nentries,
500 ch->remote_nentries, ch->partid, ch->number);
501
502 ch->remote_nentries = nentries;
503 }
504 spin_unlock_irqrestore(&ch->lock, irq_flags);
505 return xpSuccess;
506 }
507
508 dev_dbg(xpc_chan, "can't get memory for cached remote message queue, "
509 "partid=%d, channel=%d\n", ch->partid, ch->number);
510 return xpNoMemory;
511}
512
513/*
514 * Allocate message queues and other stuff associated with a channel.
515 *
516 * Note: Assumes all of the channel sizes are filled in.
517 */
518static enum xp_retval
519xpc_allocate_msgqueues(struct xpc_channel *ch)
520{
521 unsigned long irq_flags;
522 enum xp_retval ret;
523
524 DBUG_ON(ch->flags & XPC_C_SETUP);
525
526 ret = xpc_allocate_local_msgqueue(ch);
527 if (ret != xpSuccess)
528 return ret;
529
530 ret = xpc_allocate_remote_msgqueue(ch);
531 if (ret != xpSuccess) {
532 kfree(ch->local_msgqueue_base);
533 ch->local_msgqueue = NULL;
534 kfree(ch->notify_queue);
535 ch->notify_queue = NULL;
536 return ret;
537 }
538
539 spin_lock_irqsave(&ch->lock, irq_flags);
540 ch->flags |= XPC_C_SETUP;
541 spin_unlock_irqrestore(&ch->lock, irq_flags);
542
543 return xpSuccess;
544}
545
546/*
547 * Process a connect message from a remote partition. 21 * Process a connect message from a remote partition.
548 * 22 *
549 * Note: xpc_process_connect() is expecting to be called with the 23 * Note: xpc_process_connect() is expecting to be called with the
@@ -565,30 +39,29 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
565 39
566 if (!(ch->flags & XPC_C_SETUP)) { 40 if (!(ch->flags & XPC_C_SETUP)) {
567 spin_unlock_irqrestore(&ch->lock, *irq_flags); 41 spin_unlock_irqrestore(&ch->lock, *irq_flags);
568 ret = xpc_allocate_msgqueues(ch); 42 ret = xpc_setup_msg_structures(ch);
569 spin_lock_irqsave(&ch->lock, *irq_flags); 43 spin_lock_irqsave(&ch->lock, *irq_flags);
570 44
571 if (ret != xpSuccess) 45 if (ret != xpSuccess)
572 XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags); 46 XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags);
573 47
48 ch->flags |= XPC_C_SETUP;
49
574 if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING)) 50 if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING))
575 return; 51 return;
576 52
577 DBUG_ON(!(ch->flags & XPC_C_SETUP));
578 DBUG_ON(ch->local_msgqueue == NULL); 53 DBUG_ON(ch->local_msgqueue == NULL);
579 DBUG_ON(ch->remote_msgqueue == NULL); 54 DBUG_ON(ch->remote_msgqueue == NULL);
580 } 55 }
581 56
582 if (!(ch->flags & XPC_C_OPENREPLY)) { 57 if (!(ch->flags & XPC_C_OPENREPLY)) {
583 ch->flags |= XPC_C_OPENREPLY; 58 ch->flags |= XPC_C_OPENREPLY;
584 xpc_IPI_send_openreply(ch, irq_flags); 59 xpc_send_chctl_openreply(ch, irq_flags);
585 } 60 }
586 61
587 if (!(ch->flags & XPC_C_ROPENREPLY)) 62 if (!(ch->flags & XPC_C_ROPENREPLY))
588 return; 63 return;
589 64
590 DBUG_ON(ch->remote_msgqueue_pa == 0);
591
592 ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */ 65 ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */
593 66
594 dev_info(xpc_chan, "channel %d to partition %d connected\n", 67 dev_info(xpc_chan, "channel %d to partition %d connected\n",
@@ -600,99 +73,6 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
600} 73}
601 74
602/* 75/*
603 * Notify those who wanted to be notified upon delivery of their message.
604 */
605static void
606xpc_notify_senders(struct xpc_channel *ch, enum xp_retval reason, s64 put)
607{
608 struct xpc_notify *notify;
609 u8 notify_type;
610 s64 get = ch->w_remote_GP.get - 1;
611
612 while (++get < put && atomic_read(&ch->n_to_notify) > 0) {
613
614 notify = &ch->notify_queue[get % ch->local_nentries];
615
616 /*
617 * See if the notify entry indicates it was associated with
618 * a message who's sender wants to be notified. It is possible
619 * that it is, but someone else is doing or has done the
620 * notification.
621 */
622 notify_type = notify->type;
623 if (notify_type == 0 ||
624 cmpxchg(&notify->type, notify_type, 0) != notify_type) {
625 continue;
626 }
627
628 DBUG_ON(notify_type != XPC_N_CALL);
629
630 atomic_dec(&ch->n_to_notify);
631
632 if (notify->func != NULL) {
633 dev_dbg(xpc_chan, "notify->func() called, notify=0x%p, "
634 "msg_number=%ld, partid=%d, channel=%d\n",
635 (void *)notify, get, ch->partid, ch->number);
636
637 notify->func(reason, ch->partid, ch->number,
638 notify->key);
639
640 dev_dbg(xpc_chan, "notify->func() returned, "
641 "notify=0x%p, msg_number=%ld, partid=%d, "
642 "channel=%d\n", (void *)notify, get,
643 ch->partid, ch->number);
644 }
645 }
646}
647
648/*
649 * Free up message queues and other stuff that were allocated for the specified
650 * channel.
651 *
652 * Note: ch->reason and ch->reason_line are left set for debugging purposes,
653 * they're cleared when XPC_C_DISCONNECTED is cleared.
654 */
655static void
656xpc_free_msgqueues(struct xpc_channel *ch)
657{
658 DBUG_ON(!spin_is_locked(&ch->lock));
659 DBUG_ON(atomic_read(&ch->n_to_notify) != 0);
660
661 ch->remote_msgqueue_pa = 0;
662 ch->func = NULL;
663 ch->key = NULL;
664 ch->msg_size = 0;
665 ch->local_nentries = 0;
666 ch->remote_nentries = 0;
667 ch->kthreads_assigned_limit = 0;
668 ch->kthreads_idle_limit = 0;
669
670 ch->local_GP->get = 0;
671 ch->local_GP->put = 0;
672 ch->remote_GP.get = 0;
673 ch->remote_GP.put = 0;
674 ch->w_local_GP.get = 0;
675 ch->w_local_GP.put = 0;
676 ch->w_remote_GP.get = 0;
677 ch->w_remote_GP.put = 0;
678 ch->next_msg_to_pull = 0;
679
680 if (ch->flags & XPC_C_SETUP) {
681 ch->flags &= ~XPC_C_SETUP;
682
683 dev_dbg(xpc_chan, "ch->flags=0x%x, partid=%d, channel=%d\n",
684 ch->flags, ch->partid, ch->number);
685
686 kfree(ch->local_msgqueue_base);
687 ch->local_msgqueue = NULL;
688 kfree(ch->remote_msgqueue_base);
689 ch->remote_msgqueue = NULL;
690 kfree(ch->notify_queue);
691 ch->notify_queue = NULL;
692 }
693}
694
695/*
696 * spin_lock_irqsave() is expected to be held on entry. 76 * spin_lock_irqsave() is expected to be held on entry.
697 */ 77 */
698static void 78static void
@@ -717,9 +97,9 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
717 DBUG_ON((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && 97 DBUG_ON((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
718 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE)); 98 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE));
719 99
720 if (part->act_state == XPC_P_DEACTIVATING) { 100 if (part->act_state == XPC_P_AS_DEACTIVATING) {
721 /* can't proceed until the other side disengages from us */ 101 /* can't proceed until the other side disengages from us */
722 if (xpc_partition_engaged(1UL << ch->partid)) 102 if (xpc_partition_engaged(ch->partid))
723 return; 103 return;
724 104
725 } else { 105 } else {
@@ -731,7 +111,7 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
731 111
732 if (!(ch->flags & XPC_C_CLOSEREPLY)) { 112 if (!(ch->flags & XPC_C_CLOSEREPLY)) {
733 ch->flags |= XPC_C_CLOSEREPLY; 113 ch->flags |= XPC_C_CLOSEREPLY;
734 xpc_IPI_send_closereply(ch, irq_flags); 114 xpc_send_chctl_closereply(ch, irq_flags);
735 } 115 }
736 116
737 if (!(ch->flags & XPC_C_RCLOSEREPLY)) 117 if (!(ch->flags & XPC_C_RCLOSEREPLY))
@@ -740,8 +120,8 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
740 120
741 /* wake those waiting for notify completion */ 121 /* wake those waiting for notify completion */
742 if (atomic_read(&ch->n_to_notify) > 0) { 122 if (atomic_read(&ch->n_to_notify) > 0) {
743 /* >>> we do callout while holding ch->lock */ 123 /* we do callout while holding ch->lock, callout can't block */
744 xpc_notify_senders(ch, ch->reason, ch->w_local_GP.put); 124 xpc_notify_senders_of_disconnect(ch);
745 } 125 }
746 126
747 /* both sides are disconnected now */ 127 /* both sides are disconnected now */
@@ -752,10 +132,24 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
752 spin_lock_irqsave(&ch->lock, *irq_flags); 132 spin_lock_irqsave(&ch->lock, *irq_flags);
753 } 133 }
754 134
135 DBUG_ON(atomic_read(&ch->n_to_notify) != 0);
136
755 /* it's now safe to free the channel's message queues */ 137 /* it's now safe to free the channel's message queues */
756 xpc_free_msgqueues(ch); 138 xpc_teardown_msg_structures(ch);
757 139
758 /* mark disconnected, clear all other flags except XPC_C_WDISCONNECT */ 140 ch->func = NULL;
141 ch->key = NULL;
142 ch->entry_size = 0;
143 ch->local_nentries = 0;
144 ch->remote_nentries = 0;
145 ch->kthreads_assigned_limit = 0;
146 ch->kthreads_idle_limit = 0;
147
148 /*
149 * Mark the channel disconnected and clear all other flags, including
150 * XPC_C_SETUP (because of call to xpc_teardown_msg_structures()) but
151 * not including XPC_C_WDISCONNECT (if it was set).
152 */
759 ch->flags = (XPC_C_DISCONNECTED | (ch->flags & XPC_C_WDISCONNECT)); 153 ch->flags = (XPC_C_DISCONNECTED | (ch->flags & XPC_C_WDISCONNECT));
760 154
761 atomic_dec(&part->nchannels_active); 155 atomic_dec(&part->nchannels_active);
@@ -768,15 +162,15 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
768 if (ch->flags & XPC_C_WDISCONNECT) { 162 if (ch->flags & XPC_C_WDISCONNECT) {
769 /* we won't lose the CPU since we're holding ch->lock */ 163 /* we won't lose the CPU since we're holding ch->lock */
770 complete(&ch->wdisconnect_wait); 164 complete(&ch->wdisconnect_wait);
771 } else if (ch->delayed_IPI_flags) { 165 } else if (ch->delayed_chctl_flags) {
772 if (part->act_state != XPC_P_DEACTIVATING) { 166 if (part->act_state != XPC_P_AS_DEACTIVATING) {
773 /* time to take action on any delayed IPI flags */ 167 /* time to take action on any delayed chctl flags */
774 spin_lock(&part->IPI_lock); 168 spin_lock(&part->chctl_lock);
775 XPC_SET_IPI_FLAGS(part->local_IPI_amo, ch->number, 169 part->chctl.flags[ch->number] |=
776 ch->delayed_IPI_flags); 170 ch->delayed_chctl_flags;
777 spin_unlock(&part->IPI_lock); 171 spin_unlock(&part->chctl_lock);
778 } 172 }
779 ch->delayed_IPI_flags = 0; 173 ch->delayed_chctl_flags = 0;
780 } 174 }
781} 175}
782 176
@@ -784,8 +178,8 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
784 * Process a change in the channel's remote connection state. 178 * Process a change in the channel's remote connection state.
785 */ 179 */
786static void 180static void
787xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number, 181xpc_process_openclose_chctl_flags(struct xpc_partition *part, int ch_number,
788 u8 IPI_flags) 182 u8 chctl_flags)
789{ 183{
790 unsigned long irq_flags; 184 unsigned long irq_flags;
791 struct xpc_openclose_args *args = 185 struct xpc_openclose_args *args =
@@ -800,24 +194,24 @@ again:
800 if ((ch->flags & XPC_C_DISCONNECTED) && 194 if ((ch->flags & XPC_C_DISCONNECTED) &&
801 (ch->flags & XPC_C_WDISCONNECT)) { 195 (ch->flags & XPC_C_WDISCONNECT)) {
802 /* 196 /*
803 * Delay processing IPI flags until thread waiting disconnect 197 * Delay processing chctl flags until thread waiting disconnect
804 * has had a chance to see that the channel is disconnected. 198 * has had a chance to see that the channel is disconnected.
805 */ 199 */
806 ch->delayed_IPI_flags |= IPI_flags; 200 ch->delayed_chctl_flags |= chctl_flags;
807 spin_unlock_irqrestore(&ch->lock, irq_flags); 201 spin_unlock_irqrestore(&ch->lock, irq_flags);
808 return; 202 return;
809 } 203 }
810 204
811 if (IPI_flags & XPC_IPI_CLOSEREQUEST) { 205 if (chctl_flags & XPC_CHCTL_CLOSEREQUEST) {
812 206
813 dev_dbg(xpc_chan, "XPC_IPI_CLOSEREQUEST (reason=%d) received " 207 dev_dbg(xpc_chan, "XPC_CHCTL_CLOSEREQUEST (reason=%d) received "
814 "from partid=%d, channel=%d\n", args->reason, 208 "from partid=%d, channel=%d\n", args->reason,
815 ch->partid, ch->number); 209 ch->partid, ch->number);
816 210
817 /* 211 /*
818 * If RCLOSEREQUEST is set, we're probably waiting for 212 * If RCLOSEREQUEST is set, we're probably waiting for
819 * RCLOSEREPLY. We should find it and a ROPENREQUEST packed 213 * RCLOSEREPLY. We should find it and a ROPENREQUEST packed
820 * with this RCLOSEREQUEST in the IPI_flags. 214 * with this RCLOSEREQUEST in the chctl_flags.
821 */ 215 */
822 216
823 if (ch->flags & XPC_C_RCLOSEREQUEST) { 217 if (ch->flags & XPC_C_RCLOSEREQUEST) {
@@ -826,8 +220,8 @@ again:
826 DBUG_ON(!(ch->flags & XPC_C_CLOSEREPLY)); 220 DBUG_ON(!(ch->flags & XPC_C_CLOSEREPLY));
827 DBUG_ON(ch->flags & XPC_C_RCLOSEREPLY); 221 DBUG_ON(ch->flags & XPC_C_RCLOSEREPLY);
828 222
829 DBUG_ON(!(IPI_flags & XPC_IPI_CLOSEREPLY)); 223 DBUG_ON(!(chctl_flags & XPC_CHCTL_CLOSEREPLY));
830 IPI_flags &= ~XPC_IPI_CLOSEREPLY; 224 chctl_flags &= ~XPC_CHCTL_CLOSEREPLY;
831 ch->flags |= XPC_C_RCLOSEREPLY; 225 ch->flags |= XPC_C_RCLOSEREPLY;
832 226
833 /* both sides have finished disconnecting */ 227 /* both sides have finished disconnecting */
@@ -837,17 +231,15 @@ again:
837 } 231 }
838 232
839 if (ch->flags & XPC_C_DISCONNECTED) { 233 if (ch->flags & XPC_C_DISCONNECTED) {
840 if (!(IPI_flags & XPC_IPI_OPENREQUEST)) { 234 if (!(chctl_flags & XPC_CHCTL_OPENREQUEST)) {
841 if ((XPC_GET_IPI_FLAGS(part->local_IPI_amo, 235 if (part->chctl.flags[ch_number] &
842 ch_number) & 236 XPC_CHCTL_OPENREQUEST) {
843 XPC_IPI_OPENREQUEST)) { 237
844 238 DBUG_ON(ch->delayed_chctl_flags != 0);
845 DBUG_ON(ch->delayed_IPI_flags != 0); 239 spin_lock(&part->chctl_lock);
846 spin_lock(&part->IPI_lock); 240 part->chctl.flags[ch_number] |=
847 XPC_SET_IPI_FLAGS(part->local_IPI_amo, 241 XPC_CHCTL_CLOSEREQUEST;
848 ch_number, 242 spin_unlock(&part->chctl_lock);
849 XPC_IPI_CLOSEREQUEST);
850 spin_unlock(&part->IPI_lock);
851 } 243 }
852 spin_unlock_irqrestore(&ch->lock, irq_flags); 244 spin_unlock_irqrestore(&ch->lock, irq_flags);
853 return; 245 return;
@@ -860,7 +252,7 @@ again:
860 ch->flags |= (XPC_C_CONNECTING | XPC_C_ROPENREQUEST); 252 ch->flags |= (XPC_C_CONNECTING | XPC_C_ROPENREQUEST);
861 } 253 }
862 254
863 IPI_flags &= ~(XPC_IPI_OPENREQUEST | XPC_IPI_OPENREPLY); 255 chctl_flags &= ~(XPC_CHCTL_OPENREQUEST | XPC_CHCTL_OPENREPLY);
864 256
865 /* 257 /*
866 * The meaningful CLOSEREQUEST connection state fields are: 258 * The meaningful CLOSEREQUEST connection state fields are:
@@ -878,7 +270,7 @@ again:
878 270
879 XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags); 271 XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
880 272
881 DBUG_ON(IPI_flags & XPC_IPI_CLOSEREPLY); 273 DBUG_ON(chctl_flags & XPC_CHCTL_CLOSEREPLY);
882 spin_unlock_irqrestore(&ch->lock, irq_flags); 274 spin_unlock_irqrestore(&ch->lock, irq_flags);
883 return; 275 return;
884 } 276 }
@@ -886,13 +278,13 @@ again:
886 xpc_process_disconnect(ch, &irq_flags); 278 xpc_process_disconnect(ch, &irq_flags);
887 } 279 }
888 280
889 if (IPI_flags & XPC_IPI_CLOSEREPLY) { 281 if (chctl_flags & XPC_CHCTL_CLOSEREPLY) {
890 282
891 dev_dbg(xpc_chan, "XPC_IPI_CLOSEREPLY received from partid=%d," 283 dev_dbg(xpc_chan, "XPC_CHCTL_CLOSEREPLY received from partid="
892 " channel=%d\n", ch->partid, ch->number); 284 "%d, channel=%d\n", ch->partid, ch->number);
893 285
894 if (ch->flags & XPC_C_DISCONNECTED) { 286 if (ch->flags & XPC_C_DISCONNECTED) {
895 DBUG_ON(part->act_state != XPC_P_DEACTIVATING); 287 DBUG_ON(part->act_state != XPC_P_AS_DEACTIVATING);
896 spin_unlock_irqrestore(&ch->lock, irq_flags); 288 spin_unlock_irqrestore(&ch->lock, irq_flags);
897 return; 289 return;
898 } 290 }
@@ -900,15 +292,14 @@ again:
900 DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); 292 DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
901 293
902 if (!(ch->flags & XPC_C_RCLOSEREQUEST)) { 294 if (!(ch->flags & XPC_C_RCLOSEREQUEST)) {
903 if ((XPC_GET_IPI_FLAGS(part->local_IPI_amo, ch_number) 295 if (part->chctl.flags[ch_number] &
904 & XPC_IPI_CLOSEREQUEST)) { 296 XPC_CHCTL_CLOSEREQUEST) {
905 297
906 DBUG_ON(ch->delayed_IPI_flags != 0); 298 DBUG_ON(ch->delayed_chctl_flags != 0);
907 spin_lock(&part->IPI_lock); 299 spin_lock(&part->chctl_lock);
908 XPC_SET_IPI_FLAGS(part->local_IPI_amo, 300 part->chctl.flags[ch_number] |=
909 ch_number, 301 XPC_CHCTL_CLOSEREPLY;
910 XPC_IPI_CLOSEREPLY); 302 spin_unlock(&part->chctl_lock);
911 spin_unlock(&part->IPI_lock);
912 } 303 }
913 spin_unlock_irqrestore(&ch->lock, irq_flags); 304 spin_unlock_irqrestore(&ch->lock, irq_flags);
914 return; 305 return;
@@ -922,21 +313,21 @@ again:
922 } 313 }
923 } 314 }
924 315
925 if (IPI_flags & XPC_IPI_OPENREQUEST) { 316 if (chctl_flags & XPC_CHCTL_OPENREQUEST) {
926 317
927 dev_dbg(xpc_chan, "XPC_IPI_OPENREQUEST (msg_size=%d, " 318 dev_dbg(xpc_chan, "XPC_CHCTL_OPENREQUEST (entry_size=%d, "
928 "local_nentries=%d) received from partid=%d, " 319 "local_nentries=%d) received from partid=%d, "
929 "channel=%d\n", args->msg_size, args->local_nentries, 320 "channel=%d\n", args->entry_size, args->local_nentries,
930 ch->partid, ch->number); 321 ch->partid, ch->number);
931 322
932 if (part->act_state == XPC_P_DEACTIVATING || 323 if (part->act_state == XPC_P_AS_DEACTIVATING ||
933 (ch->flags & XPC_C_ROPENREQUEST)) { 324 (ch->flags & XPC_C_ROPENREQUEST)) {
934 spin_unlock_irqrestore(&ch->lock, irq_flags); 325 spin_unlock_irqrestore(&ch->lock, irq_flags);
935 return; 326 return;
936 } 327 }
937 328
938 if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_WDISCONNECT)) { 329 if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_WDISCONNECT)) {
939 ch->delayed_IPI_flags |= XPC_IPI_OPENREQUEST; 330 ch->delayed_chctl_flags |= XPC_CHCTL_OPENREQUEST;
940 spin_unlock_irqrestore(&ch->lock, irq_flags); 331 spin_unlock_irqrestore(&ch->lock, irq_flags);
941 return; 332 return;
942 } 333 }
@@ -947,10 +338,10 @@ again:
947 338
948 /* 339 /*
949 * The meaningful OPENREQUEST connection state fields are: 340 * The meaningful OPENREQUEST connection state fields are:
950 * msg_size = size of channel's messages in bytes 341 * entry_size = size of channel's messages in bytes
951 * local_nentries = remote partition's local_nentries 342 * local_nentries = remote partition's local_nentries
952 */ 343 */
953 if (args->msg_size == 0 || args->local_nentries == 0) { 344 if (args->entry_size == 0 || args->local_nentries == 0) {
954 /* assume OPENREQUEST was delayed by mistake */ 345 /* assume OPENREQUEST was delayed by mistake */
955 spin_unlock_irqrestore(&ch->lock, irq_flags); 346 spin_unlock_irqrestore(&ch->lock, irq_flags);
956 return; 347 return;
@@ -960,14 +351,14 @@ again:
960 ch->remote_nentries = args->local_nentries; 351 ch->remote_nentries = args->local_nentries;
961 352
962 if (ch->flags & XPC_C_OPENREQUEST) { 353 if (ch->flags & XPC_C_OPENREQUEST) {
963 if (args->msg_size != ch->msg_size) { 354 if (args->entry_size != ch->entry_size) {
964 XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes, 355 XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes,
965 &irq_flags); 356 &irq_flags);
966 spin_unlock_irqrestore(&ch->lock, irq_flags); 357 spin_unlock_irqrestore(&ch->lock, irq_flags);
967 return; 358 return;
968 } 359 }
969 } else { 360 } else {
970 ch->msg_size = args->msg_size; 361 ch->entry_size = args->entry_size;
971 362
972 XPC_SET_REASON(ch, 0, 0); 363 XPC_SET_REASON(ch, 0, 0);
973 ch->flags &= ~XPC_C_DISCONNECTED; 364 ch->flags &= ~XPC_C_DISCONNECTED;
@@ -978,13 +369,13 @@ again:
978 xpc_process_connect(ch, &irq_flags); 369 xpc_process_connect(ch, &irq_flags);
979 } 370 }
980 371
981 if (IPI_flags & XPC_IPI_OPENREPLY) { 372 if (chctl_flags & XPC_CHCTL_OPENREPLY) {
982 373
983 dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY (local_msgqueue_pa=0x%lx, " 374 dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY (local_msgqueue_pa="
984 "local_nentries=%d, remote_nentries=%d) received from " 375 "0x%lx, local_nentries=%d, remote_nentries=%d) "
985 "partid=%d, channel=%d\n", args->local_msgqueue_pa, 376 "received from partid=%d, channel=%d\n",
986 args->local_nentries, args->remote_nentries, 377 args->local_msgqueue_pa, args->local_nentries,
987 ch->partid, ch->number); 378 args->remote_nentries, ch->partid, ch->number);
988 379
989 if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) { 380 if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) {
990 spin_unlock_irqrestore(&ch->lock, irq_flags); 381 spin_unlock_irqrestore(&ch->lock, irq_flags);
@@ -1012,10 +403,10 @@ again:
1012 DBUG_ON(args->remote_nentries == 0); 403 DBUG_ON(args->remote_nentries == 0);
1013 404
1014 ch->flags |= XPC_C_ROPENREPLY; 405 ch->flags |= XPC_C_ROPENREPLY;
1015 ch->remote_msgqueue_pa = args->local_msgqueue_pa; 406 xpc_save_remote_msgqueue_pa(ch, args->local_msgqueue_pa);
1016 407
1017 if (args->local_nentries < ch->remote_nentries) { 408 if (args->local_nentries < ch->remote_nentries) {
1018 dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY: new " 409 dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY: new "
1019 "remote_nentries=%d, old remote_nentries=%d, " 410 "remote_nentries=%d, old remote_nentries=%d, "
1020 "partid=%d, channel=%d\n", 411 "partid=%d, channel=%d\n",
1021 args->local_nentries, ch->remote_nentries, 412 args->local_nentries, ch->remote_nentries,
@@ -1024,7 +415,7 @@ again:
1024 ch->remote_nentries = args->local_nentries; 415 ch->remote_nentries = args->local_nentries;
1025 } 416 }
1026 if (args->remote_nentries < ch->local_nentries) { 417 if (args->remote_nentries < ch->local_nentries) {
1027 dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY: new " 418 dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY: new "
1028 "local_nentries=%d, old local_nentries=%d, " 419 "local_nentries=%d, old local_nentries=%d, "
1029 "partid=%d, channel=%d\n", 420 "partid=%d, channel=%d\n",
1030 args->remote_nentries, ch->local_nentries, 421 args->remote_nentries, ch->local_nentries,
@@ -1082,7 +473,7 @@ xpc_connect_channel(struct xpc_channel *ch)
1082 ch->local_nentries = registration->nentries; 473 ch->local_nentries = registration->nentries;
1083 474
1084 if (ch->flags & XPC_C_ROPENREQUEST) { 475 if (ch->flags & XPC_C_ROPENREQUEST) {
1085 if (registration->msg_size != ch->msg_size) { 476 if (registration->entry_size != ch->entry_size) {
1086 /* the local and remote sides aren't the same */ 477 /* the local and remote sides aren't the same */
1087 478
1088 /* 479 /*
@@ -1101,7 +492,7 @@ xpc_connect_channel(struct xpc_channel *ch)
1101 return xpUnequalMsgSizes; 492 return xpUnequalMsgSizes;
1102 } 493 }
1103 } else { 494 } else {
1104 ch->msg_size = registration->msg_size; 495 ch->entry_size = registration->entry_size;
1105 496
1106 XPC_SET_REASON(ch, 0, 0); 497 XPC_SET_REASON(ch, 0, 0);
1107 ch->flags &= ~XPC_C_DISCONNECTED; 498 ch->flags &= ~XPC_C_DISCONNECTED;
@@ -1114,7 +505,7 @@ xpc_connect_channel(struct xpc_channel *ch)
1114 /* initiate the connection */ 505 /* initiate the connection */
1115 506
1116 ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING); 507 ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING);
1117 xpc_IPI_send_openrequest(ch, &irq_flags); 508 xpc_send_chctl_openrequest(ch, &irq_flags);
1118 509
1119 xpc_process_connect(ch, &irq_flags); 510 xpc_process_connect(ch, &irq_flags);
1120 511
@@ -1123,152 +514,16 @@ xpc_connect_channel(struct xpc_channel *ch)
1123 return xpSuccess; 514 return xpSuccess;
1124} 515}
1125 516
1126/*
1127 * Clear some of the msg flags in the local message queue.
1128 */
1129static inline void
1130xpc_clear_local_msgqueue_flags(struct xpc_channel *ch)
1131{
1132 struct xpc_msg *msg;
1133 s64 get;
1134
1135 get = ch->w_remote_GP.get;
1136 do {
1137 msg = (struct xpc_msg *)((u64)ch->local_msgqueue +
1138 (get % ch->local_nentries) *
1139 ch->msg_size);
1140 msg->flags = 0;
1141 } while (++get < ch->remote_GP.get);
1142}
1143
1144/*
1145 * Clear some of the msg flags in the remote message queue.
1146 */
1147static inline void
1148xpc_clear_remote_msgqueue_flags(struct xpc_channel *ch)
1149{
1150 struct xpc_msg *msg;
1151 s64 put;
1152
1153 put = ch->w_remote_GP.put;
1154 do {
1155 msg = (struct xpc_msg *)((u64)ch->remote_msgqueue +
1156 (put % ch->remote_nentries) *
1157 ch->msg_size);
1158 msg->flags = 0;
1159 } while (++put < ch->remote_GP.put);
1160}
1161
1162static void
1163xpc_process_msg_IPI(struct xpc_partition *part, int ch_number)
1164{
1165 struct xpc_channel *ch = &part->channels[ch_number];
1166 int nmsgs_sent;
1167
1168 ch->remote_GP = part->remote_GPs[ch_number];
1169
1170 /* See what, if anything, has changed for each connected channel */
1171
1172 xpc_msgqueue_ref(ch);
1173
1174 if (ch->w_remote_GP.get == ch->remote_GP.get &&
1175 ch->w_remote_GP.put == ch->remote_GP.put) {
1176 /* nothing changed since GPs were last pulled */
1177 xpc_msgqueue_deref(ch);
1178 return;
1179 }
1180
1181 if (!(ch->flags & XPC_C_CONNECTED)) {
1182 xpc_msgqueue_deref(ch);
1183 return;
1184 }
1185
1186 /*
1187 * First check to see if messages recently sent by us have been
1188 * received by the other side. (The remote GET value will have
1189 * changed since we last looked at it.)
1190 */
1191
1192 if (ch->w_remote_GP.get != ch->remote_GP.get) {
1193
1194 /*
1195 * We need to notify any senders that want to be notified
1196 * that their sent messages have been received by their
1197 * intended recipients. We need to do this before updating
1198 * w_remote_GP.get so that we don't allocate the same message
1199 * queue entries prematurely (see xpc_allocate_msg()).
1200 */
1201 if (atomic_read(&ch->n_to_notify) > 0) {
1202 /*
1203 * Notify senders that messages sent have been
1204 * received and delivered by the other side.
1205 */
1206 xpc_notify_senders(ch, xpMsgDelivered,
1207 ch->remote_GP.get);
1208 }
1209
1210 /*
1211 * Clear msg->flags in previously sent messages, so that
1212 * they're ready for xpc_allocate_msg().
1213 */
1214 xpc_clear_local_msgqueue_flags(ch);
1215
1216 ch->w_remote_GP.get = ch->remote_GP.get;
1217
1218 dev_dbg(xpc_chan, "w_remote_GP.get changed to %ld, partid=%d, "
1219 "channel=%d\n", ch->w_remote_GP.get, ch->partid,
1220 ch->number);
1221
1222 /*
1223 * If anyone was waiting for message queue entries to become
1224 * available, wake them up.
1225 */
1226 if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
1227 wake_up(&ch->msg_allocate_wq);
1228 }
1229
1230 /*
1231 * Now check for newly sent messages by the other side. (The remote
1232 * PUT value will have changed since we last looked at it.)
1233 */
1234
1235 if (ch->w_remote_GP.put != ch->remote_GP.put) {
1236 /*
1237 * Clear msg->flags in previously received messages, so that
1238 * they're ready for xpc_get_deliverable_msg().
1239 */
1240 xpc_clear_remote_msgqueue_flags(ch);
1241
1242 ch->w_remote_GP.put = ch->remote_GP.put;
1243
1244 dev_dbg(xpc_chan, "w_remote_GP.put changed to %ld, partid=%d, "
1245 "channel=%d\n", ch->w_remote_GP.put, ch->partid,
1246 ch->number);
1247
1248 nmsgs_sent = ch->w_remote_GP.put - ch->w_local_GP.get;
1249 if (nmsgs_sent > 0) {
1250 dev_dbg(xpc_chan, "msgs waiting to be copied and "
1251 "delivered=%d, partid=%d, channel=%d\n",
1252 nmsgs_sent, ch->partid, ch->number);
1253
1254 if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE)
1255 xpc_activate_kthreads(ch, nmsgs_sent);
1256 }
1257 }
1258
1259 xpc_msgqueue_deref(ch);
1260}
1261
1262void 517void
1263xpc_process_channel_activity(struct xpc_partition *part) 518xpc_process_sent_chctl_flags(struct xpc_partition *part)
1264{ 519{
1265 unsigned long irq_flags; 520 unsigned long irq_flags;
1266 u64 IPI_amo, IPI_flags; 521 union xpc_channel_ctl_flags chctl;
1267 struct xpc_channel *ch; 522 struct xpc_channel *ch;
1268 int ch_number; 523 int ch_number;
1269 u32 ch_flags; 524 u32 ch_flags;
1270 525
1271 IPI_amo = xpc_get_IPI_flags(part); 526 chctl.all_flags = xpc_get_chctl_all_flags(part);
1272 527
1273 /* 528 /*
1274 * Initiate channel connections for registered channels. 529 * Initiate channel connections for registered channels.
@@ -1281,14 +536,14 @@ xpc_process_channel_activity(struct xpc_partition *part)
1281 ch = &part->channels[ch_number]; 536 ch = &part->channels[ch_number];
1282 537
1283 /* 538 /*
1284 * Process any open or close related IPI flags, and then deal 539 * Process any open or close related chctl flags, and then deal
1285 * with connecting or disconnecting the channel as required. 540 * with connecting or disconnecting the channel as required.
1286 */ 541 */
1287 542
1288 IPI_flags = XPC_GET_IPI_FLAGS(IPI_amo, ch_number); 543 if (chctl.flags[ch_number] & XPC_OPENCLOSE_CHCTL_FLAGS) {
1289 544 xpc_process_openclose_chctl_flags(part, ch_number,
1290 if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_flags)) 545 chctl.flags[ch_number]);
1291 xpc_process_openclose_IPI(part, ch_number, IPI_flags); 546 }
1292 547
1293 ch_flags = ch->flags; /* need an atomic snapshot of flags */ 548 ch_flags = ch->flags; /* need an atomic snapshot of flags */
1294 549
@@ -1299,7 +554,7 @@ xpc_process_channel_activity(struct xpc_partition *part)
1299 continue; 554 continue;
1300 } 555 }
1301 556
1302 if (part->act_state == XPC_P_DEACTIVATING) 557 if (part->act_state == XPC_P_AS_DEACTIVATING)
1303 continue; 558 continue;
1304 559
1305 if (!(ch_flags & XPC_C_CONNECTED)) { 560 if (!(ch_flags & XPC_C_CONNECTED)) {
@@ -1315,13 +570,13 @@ xpc_process_channel_activity(struct xpc_partition *part)
1315 } 570 }
1316 571
1317 /* 572 /*
1318 * Process any message related IPI flags, this may involve the 573 * Process any message related chctl flags, this may involve
1319 * activation of kthreads to deliver any pending messages sent 574 * the activation of kthreads to deliver any pending messages
1320 * from the other partition. 575 * sent from the other partition.
1321 */ 576 */
1322 577
1323 if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_flags)) 578 if (chctl.flags[ch_number] & XPC_MSG_CHCTL_FLAGS)
1324 xpc_process_msg_IPI(part, ch_number); 579 xpc_process_msg_chctl_flags(part, ch_number);
1325 } 580 }
1326} 581}
1327 582
@@ -1369,59 +624,6 @@ xpc_partition_going_down(struct xpc_partition *part, enum xp_retval reason)
1369} 624}
1370 625
1371/* 626/*
1372 * Teardown the infrastructure necessary to support XPartition Communication
1373 * between the specified remote partition and the local one.
1374 */
1375void
1376xpc_teardown_infrastructure(struct xpc_partition *part)
1377{
1378 short partid = XPC_PARTID(part);
1379
1380 /*
1381 * We start off by making this partition inaccessible to local
1382 * processes by marking it as no longer setup. Then we make it
1383 * inaccessible to remote processes by clearing the XPC per partition
1384 * specific variable's magic # (which indicates that these variables
1385 * are no longer valid) and by ignoring all XPC notify IPIs sent to
1386 * this partition.
1387 */
1388
1389 DBUG_ON(atomic_read(&part->nchannels_engaged) != 0);
1390 DBUG_ON(atomic_read(&part->nchannels_active) != 0);
1391 DBUG_ON(part->setup_state != XPC_P_SETUP);
1392 part->setup_state = XPC_P_WTEARDOWN;
1393
1394 xpc_vars_part[partid].magic = 0;
1395
1396 free_irq(SGI_XPC_NOTIFY, (void *)(u64)partid);
1397
1398 /*
1399 * Before proceeding with the teardown we have to wait until all
1400 * existing references cease.
1401 */
1402 wait_event(part->teardown_wq, (atomic_read(&part->references) == 0));
1403
1404 /* now we can begin tearing down the infrastructure */
1405
1406 part->setup_state = XPC_P_TORNDOWN;
1407
1408 /* in case we've still got outstanding timers registered... */
1409 del_timer_sync(&part->dropped_IPI_timer);
1410
1411 kfree(part->remote_openclose_args_base);
1412 part->remote_openclose_args = NULL;
1413 kfree(part->local_openclose_args_base);
1414 part->local_openclose_args = NULL;
1415 kfree(part->remote_GPs_base);
1416 part->remote_GPs = NULL;
1417 kfree(part->local_GPs_base);
1418 part->local_GPs = NULL;
1419 kfree(part->channels);
1420 part->channels = NULL;
1421 part->local_IPI_amo_va = NULL;
1422}
1423
1424/*
1425 * Called by XP at the time of channel connection registration to cause 627 * Called by XP at the time of channel connection registration to cause
1426 * XPC to establish connections to all currently active partitions. 628 * XPC to establish connections to all currently active partitions.
1427 */ 629 */
@@ -1432,9 +634,9 @@ xpc_initiate_connect(int ch_number)
1432 struct xpc_partition *part; 634 struct xpc_partition *part;
1433 struct xpc_channel *ch; 635 struct xpc_channel *ch;
1434 636
1435 DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); 637 DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS);
1436 638
1437 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { 639 for (partid = 0; partid < xp_max_npartitions; partid++) {
1438 part = &xpc_partitions[partid]; 640 part = &xpc_partitions[partid];
1439 641
1440 if (xpc_part_ref(part)) { 642 if (xpc_part_ref(part)) {
@@ -1488,10 +690,10 @@ xpc_initiate_disconnect(int ch_number)
1488 struct xpc_partition *part; 690 struct xpc_partition *part;
1489 struct xpc_channel *ch; 691 struct xpc_channel *ch;
1490 692
1491 DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); 693 DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS);
1492 694
1493 /* initiate the channel disconnect for every active partition */ 695 /* initiate the channel disconnect for every active partition */
1494 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { 696 for (partid = 0; partid < xp_max_npartitions; partid++) {
1495 part = &xpc_partitions[partid]; 697 part = &xpc_partitions[partid];
1496 698
1497 if (xpc_part_ref(part)) { 699 if (xpc_part_ref(part)) {
@@ -1550,7 +752,7 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch,
1550 XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY | 752 XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
1551 XPC_C_CONNECTING | XPC_C_CONNECTED); 753 XPC_C_CONNECTING | XPC_C_CONNECTED);
1552 754
1553 xpc_IPI_send_closerequest(ch, irq_flags); 755 xpc_send_chctl_closerequest(ch, irq_flags);
1554 756
1555 if (channel_was_connected) 757 if (channel_was_connected)
1556 ch->flags |= XPC_C_WASCONNECTED; 758 ch->flags |= XPC_C_WASCONNECTED;
@@ -1598,7 +800,7 @@ xpc_disconnect_callout(struct xpc_channel *ch, enum xp_retval reason)
1598 * Wait for a message entry to become available for the specified channel, 800 * Wait for a message entry to become available for the specified channel,
1599 * but don't wait any longer than 1 jiffy. 801 * but don't wait any longer than 1 jiffy.
1600 */ 802 */
1601static enum xp_retval 803enum xp_retval
1602xpc_allocate_msg_wait(struct xpc_channel *ch) 804xpc_allocate_msg_wait(struct xpc_channel *ch)
1603{ 805{
1604 enum xp_retval ret; 806 enum xp_retval ret;
@@ -1625,315 +827,54 @@ xpc_allocate_msg_wait(struct xpc_channel *ch)
1625} 827}
1626 828
1627/* 829/*
1628 * Allocate an entry for a message from the message queue associated with the 830 * Send a message that contains the user's payload on the specified channel
1629 * specified channel. 831 * connected to the specified partition.
1630 */
1631static enum xp_retval
1632xpc_allocate_msg(struct xpc_channel *ch, u32 flags,
1633 struct xpc_msg **address_of_msg)
1634{
1635 struct xpc_msg *msg;
1636 enum xp_retval ret;
1637 s64 put;
1638
1639 /* this reference will be dropped in xpc_send_msg() */
1640 xpc_msgqueue_ref(ch);
1641
1642 if (ch->flags & XPC_C_DISCONNECTING) {
1643 xpc_msgqueue_deref(ch);
1644 return ch->reason;
1645 }
1646 if (!(ch->flags & XPC_C_CONNECTED)) {
1647 xpc_msgqueue_deref(ch);
1648 return xpNotConnected;
1649 }
1650
1651 /*
1652 * Get the next available message entry from the local message queue.
1653 * If none are available, we'll make sure that we grab the latest
1654 * GP values.
1655 */
1656 ret = xpTimeout;
1657
1658 while (1) {
1659
1660 put = ch->w_local_GP.put;
1661 rmb(); /* guarantee that .put loads before .get */
1662 if (put - ch->w_remote_GP.get < ch->local_nentries) {
1663
1664 /* There are available message entries. We need to try
1665 * to secure one for ourselves. We'll do this by trying
1666 * to increment w_local_GP.put as long as someone else
1667 * doesn't beat us to it. If they do, we'll have to
1668 * try again.
1669 */
1670 if (cmpxchg(&ch->w_local_GP.put, put, put + 1) == put) {
1671 /* we got the entry referenced by put */
1672 break;
1673 }
1674 continue; /* try again */
1675 }
1676
1677 /*
1678 * There aren't any available msg entries at this time.
1679 *
1680 * In waiting for a message entry to become available,
1681 * we set a timeout in case the other side is not
1682 * sending completion IPIs. This lets us fake an IPI
1683 * that will cause the IPI handler to fetch the latest
1684 * GP values as if an IPI was sent by the other side.
1685 */
1686 if (ret == xpTimeout)
1687 xpc_IPI_send_local_msgrequest(ch);
1688
1689 if (flags & XPC_NOWAIT) {
1690 xpc_msgqueue_deref(ch);
1691 return xpNoWait;
1692 }
1693
1694 ret = xpc_allocate_msg_wait(ch);
1695 if (ret != xpInterrupted && ret != xpTimeout) {
1696 xpc_msgqueue_deref(ch);
1697 return ret;
1698 }
1699 }
1700
1701 /* get the message's address and initialize it */
1702 msg = (struct xpc_msg *)((u64)ch->local_msgqueue +
1703 (put % ch->local_nentries) * ch->msg_size);
1704
1705 DBUG_ON(msg->flags != 0);
1706 msg->number = put;
1707
1708 dev_dbg(xpc_chan, "w_local_GP.put changed to %ld; msg=0x%p, "
1709 "msg_number=%ld, partid=%d, channel=%d\n", put + 1,
1710 (void *)msg, msg->number, ch->partid, ch->number);
1711
1712 *address_of_msg = msg;
1713
1714 return xpSuccess;
1715}
1716
1717/*
1718 * Allocate an entry for a message from the message queue associated with the
1719 * specified channel. NOTE that this routine can sleep waiting for a message
1720 * entry to become available. To not sleep, pass in the XPC_NOWAIT flag.
1721 * 832 *
1722 * Arguments: 833 * NOTE that this routine can sleep waiting for a message entry to become
834 * available. To not sleep, pass in the XPC_NOWAIT flag.
1723 * 835 *
1724 * partid - ID of partition to which the channel is connected. 836 * Once sent, this routine will not wait for the message to be received, nor
1725 * ch_number - channel #. 837 * will notification be given when it does happen.
1726 * flags - see xpc.h for valid flags.
1727 * payload - address of the allocated payload area pointer (filled in on
1728 * return) in which the user-defined message is constructed.
1729 */
1730enum xp_retval
1731xpc_initiate_allocate(short partid, int ch_number, u32 flags, void **payload)
1732{
1733 struct xpc_partition *part = &xpc_partitions[partid];
1734 enum xp_retval ret = xpUnknownReason;
1735 struct xpc_msg *msg = NULL;
1736
1737 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
1738 DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
1739
1740 *payload = NULL;
1741
1742 if (xpc_part_ref(part)) {
1743 ret = xpc_allocate_msg(&part->channels[ch_number], flags, &msg);
1744 xpc_part_deref(part);
1745
1746 if (msg != NULL)
1747 *payload = &msg->payload;
1748 }
1749
1750 return ret;
1751}
1752
1753/*
1754 * Now we actually send the messages that are ready to be sent by advancing
1755 * the local message queue's Put value and then send an IPI to the recipient
1756 * partition.
1757 */
1758static void
1759xpc_send_msgs(struct xpc_channel *ch, s64 initial_put)
1760{
1761 struct xpc_msg *msg;
1762 s64 put = initial_put + 1;
1763 int send_IPI = 0;
1764
1765 while (1) {
1766
1767 while (1) {
1768 if (put == ch->w_local_GP.put)
1769 break;
1770
1771 msg = (struct xpc_msg *)((u64)ch->local_msgqueue +
1772 (put % ch->local_nentries) *
1773 ch->msg_size);
1774
1775 if (!(msg->flags & XPC_M_READY))
1776 break;
1777
1778 put++;
1779 }
1780
1781 if (put == initial_put) {
1782 /* nothing's changed */
1783 break;
1784 }
1785
1786 if (cmpxchg_rel(&ch->local_GP->put, initial_put, put) !=
1787 initial_put) {
1788 /* someone else beat us to it */
1789 DBUG_ON(ch->local_GP->put < initial_put);
1790 break;
1791 }
1792
1793 /* we just set the new value of local_GP->put */
1794
1795 dev_dbg(xpc_chan, "local_GP->put changed to %ld, partid=%d, "
1796 "channel=%d\n", put, ch->partid, ch->number);
1797
1798 send_IPI = 1;
1799
1800 /*
1801 * We need to ensure that the message referenced by
1802 * local_GP->put is not XPC_M_READY or that local_GP->put
1803 * equals w_local_GP.put, so we'll go have a look.
1804 */
1805 initial_put = put;
1806 }
1807
1808 if (send_IPI)
1809 xpc_IPI_send_msgrequest(ch);
1810}
1811
1812/*
1813 * Common code that does the actual sending of the message by advancing the
1814 * local message queue's Put value and sends an IPI to the partition the
1815 * message is being sent to.
1816 */
1817static enum xp_retval
1818xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type,
1819 xpc_notify_func func, void *key)
1820{
1821 enum xp_retval ret = xpSuccess;
1822 struct xpc_notify *notify = notify;
1823 s64 put, msg_number = msg->number;
1824
1825 DBUG_ON(notify_type == XPC_N_CALL && func == NULL);
1826 DBUG_ON((((u64)msg - (u64)ch->local_msgqueue) / ch->msg_size) !=
1827 msg_number % ch->local_nentries);
1828 DBUG_ON(msg->flags & XPC_M_READY);
1829
1830 if (ch->flags & XPC_C_DISCONNECTING) {
1831 /* drop the reference grabbed in xpc_allocate_msg() */
1832 xpc_msgqueue_deref(ch);
1833 return ch->reason;
1834 }
1835
1836 if (notify_type != 0) {
1837 /*
1838 * Tell the remote side to send an ACK interrupt when the
1839 * message has been delivered.
1840 */
1841 msg->flags |= XPC_M_INTERRUPT;
1842
1843 atomic_inc(&ch->n_to_notify);
1844
1845 notify = &ch->notify_queue[msg_number % ch->local_nentries];
1846 notify->func = func;
1847 notify->key = key;
1848 notify->type = notify_type;
1849
1850 /* >>> is a mb() needed here? */
1851
1852 if (ch->flags & XPC_C_DISCONNECTING) {
1853 /*
1854 * An error occurred between our last error check and
1855 * this one. We will try to clear the type field from
1856 * the notify entry. If we succeed then
1857 * xpc_disconnect_channel() didn't already process
1858 * the notify entry.
1859 */
1860 if (cmpxchg(&notify->type, notify_type, 0) ==
1861 notify_type) {
1862 atomic_dec(&ch->n_to_notify);
1863 ret = ch->reason;
1864 }
1865
1866 /* drop the reference grabbed in xpc_allocate_msg() */
1867 xpc_msgqueue_deref(ch);
1868 return ret;
1869 }
1870 }
1871
1872 msg->flags |= XPC_M_READY;
1873
1874 /*
1875 * The preceding store of msg->flags must occur before the following
1876 * load of ch->local_GP->put.
1877 */
1878 mb();
1879
1880 /* see if the message is next in line to be sent, if so send it */
1881
1882 put = ch->local_GP->put;
1883 if (put == msg_number)
1884 xpc_send_msgs(ch, put);
1885
1886 /* drop the reference grabbed in xpc_allocate_msg() */
1887 xpc_msgqueue_deref(ch);
1888 return ret;
1889}
1890
1891/*
1892 * Send a message previously allocated using xpc_initiate_allocate() on the
1893 * specified channel connected to the specified partition.
1894 *
1895 * This routine will not wait for the message to be received, nor will
1896 * notification be given when it does happen. Once this routine has returned
1897 * the message entry allocated via xpc_initiate_allocate() is no longer
1898 * accessable to the caller.
1899 *
1900 * This routine, although called by users, does not call xpc_part_ref() to
1901 * ensure that the partition infrastructure is in place. It relies on the
1902 * fact that we called xpc_msgqueue_ref() in xpc_allocate_msg().
1903 * 838 *
1904 * Arguments: 839 * Arguments:
1905 * 840 *
1906 * partid - ID of partition to which the channel is connected. 841 * partid - ID of partition to which the channel is connected.
1907 * ch_number - channel # to send message on. 842 * ch_number - channel # to send message on.
1908 * payload - pointer to the payload area allocated via 843 * flags - see xp.h for valid flags.
1909 * xpc_initiate_allocate(). 844 * payload - pointer to the payload which is to be sent.
845 * payload_size - size of the payload in bytes.
1910 */ 846 */
1911enum xp_retval 847enum xp_retval
1912xpc_initiate_send(short partid, int ch_number, void *payload) 848xpc_initiate_send(short partid, int ch_number, u32 flags, void *payload,
849 u16 payload_size)
1913{ 850{
1914 struct xpc_partition *part = &xpc_partitions[partid]; 851 struct xpc_partition *part = &xpc_partitions[partid];
1915 struct xpc_msg *msg = XPC_MSG_ADDRESS(payload); 852 enum xp_retval ret = xpUnknownReason;
1916 enum xp_retval ret;
1917 853
1918 dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *)msg, 854 dev_dbg(xpc_chan, "payload=0x%p, partid=%d, channel=%d\n", payload,
1919 partid, ch_number); 855 partid, ch_number);
1920 856
1921 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); 857 DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
1922 DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); 858 DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
1923 DBUG_ON(msg == NULL); 859 DBUG_ON(payload == NULL);
1924 860
1925 ret = xpc_send_msg(&part->channels[ch_number], msg, 0, NULL, NULL); 861 if (xpc_part_ref(part)) {
862 ret = xpc_send_payload(&part->channels[ch_number], flags,
863 payload, payload_size, 0, NULL, NULL);
864 xpc_part_deref(part);
865 }
1926 866
1927 return ret; 867 return ret;
1928} 868}
1929 869
1930/* 870/*
1931 * Send a message previously allocated using xpc_initiate_allocate on the 871 * Send a message that contains the user's payload on the specified channel
1932 * specified channel connected to the specified partition. 872 * connected to the specified partition.
1933 * 873 *
1934 * This routine will not wait for the message to be sent. Once this routine 874 * NOTE that this routine can sleep waiting for a message entry to become
1935 * has returned the message entry allocated via xpc_initiate_allocate() is no 875 * available. To not sleep, pass in the XPC_NOWAIT flag.
1936 * longer accessable to the caller. 876 *
877 * This routine will not wait for the message to be sent or received.
1937 * 878 *
1938 * Once the remote end of the channel has received the message, the function 879 * Once the remote end of the channel has received the message, the function
1939 * passed as an argument to xpc_initiate_send_notify() will be called. This 880 * passed as an argument to xpc_initiate_send_notify() will be called. This
@@ -1943,158 +884,51 @@ xpc_initiate_send(short partid, int ch_number, void *payload)
1943 * 884 *
1944 * If this routine returns an error, the caller's function will NOT be called. 885 * If this routine returns an error, the caller's function will NOT be called.
1945 * 886 *
1946 * This routine, although called by users, does not call xpc_part_ref() to
1947 * ensure that the partition infrastructure is in place. It relies on the
1948 * fact that we called xpc_msgqueue_ref() in xpc_allocate_msg().
1949 *
1950 * Arguments: 887 * Arguments:
1951 * 888 *
1952 * partid - ID of partition to which the channel is connected. 889 * partid - ID of partition to which the channel is connected.
1953 * ch_number - channel # to send message on. 890 * ch_number - channel # to send message on.
1954 * payload - pointer to the payload area allocated via 891 * flags - see xp.h for valid flags.
1955 * xpc_initiate_allocate(). 892 * payload - pointer to the payload which is to be sent.
893 * payload_size - size of the payload in bytes.
1956 * func - function to call with asynchronous notification of message 894 * func - function to call with asynchronous notification of message
1957 * receipt. THIS FUNCTION MUST BE NON-BLOCKING. 895 * receipt. THIS FUNCTION MUST BE NON-BLOCKING.
1958 * key - user-defined key to be passed to the function when it's called. 896 * key - user-defined key to be passed to the function when it's called.
1959 */ 897 */
1960enum xp_retval 898enum xp_retval
1961xpc_initiate_send_notify(short partid, int ch_number, void *payload, 899xpc_initiate_send_notify(short partid, int ch_number, u32 flags, void *payload,
1962 xpc_notify_func func, void *key) 900 u16 payload_size, xpc_notify_func func, void *key)
1963{ 901{
1964 struct xpc_partition *part = &xpc_partitions[partid]; 902 struct xpc_partition *part = &xpc_partitions[partid];
1965 struct xpc_msg *msg = XPC_MSG_ADDRESS(payload); 903 enum xp_retval ret = xpUnknownReason;
1966 enum xp_retval ret;
1967 904
1968 dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *)msg, 905 dev_dbg(xpc_chan, "payload=0x%p, partid=%d, channel=%d\n", payload,
1969 partid, ch_number); 906 partid, ch_number);
1970 907
1971 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); 908 DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
1972 DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); 909 DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
1973 DBUG_ON(msg == NULL); 910 DBUG_ON(payload == NULL);
1974 DBUG_ON(func == NULL); 911 DBUG_ON(func == NULL);
1975 912
1976 ret = xpc_send_msg(&part->channels[ch_number], msg, XPC_N_CALL, 913 if (xpc_part_ref(part)) {
1977 func, key); 914 ret = xpc_send_payload(&part->channels[ch_number], flags,
1978 return ret; 915 payload, payload_size, XPC_N_CALL, func,
1979} 916 key);
1980 917 xpc_part_deref(part);
1981static struct xpc_msg *
1982xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
1983{
1984 struct xpc_partition *part = &xpc_partitions[ch->partid];
1985 struct xpc_msg *remote_msg, *msg;
1986 u32 msg_index, nmsgs;
1987 u64 msg_offset;
1988 enum xp_retval ret;
1989
1990 if (mutex_lock_interruptible(&ch->msg_to_pull_mutex) != 0) {
1991 /* we were interrupted by a signal */
1992 return NULL;
1993 }
1994
1995 while (get >= ch->next_msg_to_pull) {
1996
1997 /* pull as many messages as are ready and able to be pulled */
1998
1999 msg_index = ch->next_msg_to_pull % ch->remote_nentries;
2000
2001 DBUG_ON(ch->next_msg_to_pull >= ch->w_remote_GP.put);
2002 nmsgs = ch->w_remote_GP.put - ch->next_msg_to_pull;
2003 if (msg_index + nmsgs > ch->remote_nentries) {
2004 /* ignore the ones that wrap the msg queue for now */
2005 nmsgs = ch->remote_nentries - msg_index;
2006 }
2007
2008 msg_offset = msg_index * ch->msg_size;
2009 msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + msg_offset);
2010 remote_msg = (struct xpc_msg *)(ch->remote_msgqueue_pa +
2011 msg_offset);
2012
2013 ret = xpc_pull_remote_cachelines(part, msg, remote_msg,
2014 nmsgs * ch->msg_size);
2015 if (ret != xpSuccess) {
2016
2017 dev_dbg(xpc_chan, "failed to pull %d msgs starting with"
2018 " msg %ld from partition %d, channel=%d, "
2019 "ret=%d\n", nmsgs, ch->next_msg_to_pull,
2020 ch->partid, ch->number, ret);
2021
2022 XPC_DEACTIVATE_PARTITION(part, ret);
2023
2024 mutex_unlock(&ch->msg_to_pull_mutex);
2025 return NULL;
2026 }
2027
2028 ch->next_msg_to_pull += nmsgs;
2029 } 918 }
2030 919 return ret;
2031 mutex_unlock(&ch->msg_to_pull_mutex);
2032
2033 /* return the message we were looking for */
2034 msg_offset = (get % ch->remote_nentries) * ch->msg_size;
2035 msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + msg_offset);
2036
2037 return msg;
2038}
2039
2040/*
2041 * Get a message to be delivered.
2042 */
2043static struct xpc_msg *
2044xpc_get_deliverable_msg(struct xpc_channel *ch)
2045{
2046 struct xpc_msg *msg = NULL;
2047 s64 get;
2048
2049 do {
2050 if (ch->flags & XPC_C_DISCONNECTING)
2051 break;
2052
2053 get = ch->w_local_GP.get;
2054 rmb(); /* guarantee that .get loads before .put */
2055 if (get == ch->w_remote_GP.put)
2056 break;
2057
2058 /* There are messages waiting to be pulled and delivered.
2059 * We need to try to secure one for ourselves. We'll do this
2060 * by trying to increment w_local_GP.get and hope that no one
2061 * else beats us to it. If they do, we'll we'll simply have
2062 * to try again for the next one.
2063 */
2064
2065 if (cmpxchg(&ch->w_local_GP.get, get, get + 1) == get) {
2066 /* we got the entry referenced by get */
2067
2068 dev_dbg(xpc_chan, "w_local_GP.get changed to %ld, "
2069 "partid=%d, channel=%d\n", get + 1,
2070 ch->partid, ch->number);
2071
2072 /* pull the message from the remote partition */
2073
2074 msg = xpc_pull_remote_msg(ch, get);
2075
2076 DBUG_ON(msg != NULL && msg->number != get);
2077 DBUG_ON(msg != NULL && (msg->flags & XPC_M_DONE));
2078 DBUG_ON(msg != NULL && !(msg->flags & XPC_M_READY));
2079
2080 break;
2081 }
2082
2083 } while (1);
2084
2085 return msg;
2086} 920}
2087 921
2088/* 922/*
2089 * Deliver a message to its intended recipient. 923 * Deliver a message's payload to its intended recipient.
2090 */ 924 */
2091void 925void
2092xpc_deliver_msg(struct xpc_channel *ch) 926xpc_deliver_payload(struct xpc_channel *ch)
2093{ 927{
2094 struct xpc_msg *msg; 928 void *payload;
2095 929
2096 msg = xpc_get_deliverable_msg(ch); 930 payload = xpc_get_deliverable_payload(ch);
2097 if (msg != NULL) { 931 if (payload != NULL) {
2098 932
2099 /* 933 /*
2100 * This ref is taken to protect the payload itself from being 934 * This ref is taken to protect the payload itself from being
@@ -2106,18 +940,16 @@ xpc_deliver_msg(struct xpc_channel *ch)
2106 atomic_inc(&ch->kthreads_active); 940 atomic_inc(&ch->kthreads_active);
2107 941
2108 if (ch->func != NULL) { 942 if (ch->func != NULL) {
2109 dev_dbg(xpc_chan, "ch->func() called, msg=0x%p, " 943 dev_dbg(xpc_chan, "ch->func() called, payload=0x%p "
2110 "msg_number=%ld, partid=%d, channel=%d\n", 944 "partid=%d channel=%d\n", payload, ch->partid,
2111 (void *)msg, msg->number, ch->partid,
2112 ch->number); 945 ch->number);
2113 946
2114 /* deliver the message to its intended recipient */ 947 /* deliver the message to its intended recipient */
2115 ch->func(xpMsgReceived, ch->partid, ch->number, 948 ch->func(xpMsgReceived, ch->partid, ch->number, payload,
2116 &msg->payload, ch->key); 949 ch->key);
2117 950
2118 dev_dbg(xpc_chan, "ch->func() returned, msg=0x%p, " 951 dev_dbg(xpc_chan, "ch->func() returned, payload=0x%p "
2119 "msg_number=%ld, partid=%d, channel=%d\n", 952 "partid=%d channel=%d\n", payload, ch->partid,
2120 (void *)msg, msg->number, ch->partid,
2121 ch->number); 953 ch->number);
2122 } 954 }
2123 955
@@ -2126,118 +958,31 @@ xpc_deliver_msg(struct xpc_channel *ch)
2126} 958}
2127 959
2128/* 960/*
2129 * Now we actually acknowledge the messages that have been delivered and ack'd 961 * Acknowledge receipt of a delivered message's payload.
2130 * by advancing the cached remote message queue's Get value and if requested
2131 * send an IPI to the message sender's partition.
2132 */
2133static void
2134xpc_acknowledge_msgs(struct xpc_channel *ch, s64 initial_get, u8 msg_flags)
2135{
2136 struct xpc_msg *msg;
2137 s64 get = initial_get + 1;
2138 int send_IPI = 0;
2139
2140 while (1) {
2141
2142 while (1) {
2143 if (get == ch->w_local_GP.get)
2144 break;
2145
2146 msg = (struct xpc_msg *)((u64)ch->remote_msgqueue +
2147 (get % ch->remote_nentries) *
2148 ch->msg_size);
2149
2150 if (!(msg->flags & XPC_M_DONE))
2151 break;
2152
2153 msg_flags |= msg->flags;
2154 get++;
2155 }
2156
2157 if (get == initial_get) {
2158 /* nothing's changed */
2159 break;
2160 }
2161
2162 if (cmpxchg_rel(&ch->local_GP->get, initial_get, get) !=
2163 initial_get) {
2164 /* someone else beat us to it */
2165 DBUG_ON(ch->local_GP->get <= initial_get);
2166 break;
2167 }
2168
2169 /* we just set the new value of local_GP->get */
2170
2171 dev_dbg(xpc_chan, "local_GP->get changed to %ld, partid=%d, "
2172 "channel=%d\n", get, ch->partid, ch->number);
2173
2174 send_IPI = (msg_flags & XPC_M_INTERRUPT);
2175
2176 /*
2177 * We need to ensure that the message referenced by
2178 * local_GP->get is not XPC_M_DONE or that local_GP->get
2179 * equals w_local_GP.get, so we'll go have a look.
2180 */
2181 initial_get = get;
2182 }
2183
2184 if (send_IPI)
2185 xpc_IPI_send_msgrequest(ch);
2186}
2187
2188/*
2189 * Acknowledge receipt of a delivered message.
2190 *
2191 * If a message has XPC_M_INTERRUPT set, send an interrupt to the partition
2192 * that sent the message.
2193 * 962 *
2194 * This function, although called by users, does not call xpc_part_ref() to 963 * This function, although called by users, does not call xpc_part_ref() to
2195 * ensure that the partition infrastructure is in place. It relies on the 964 * ensure that the partition infrastructure is in place. It relies on the
2196 * fact that we called xpc_msgqueue_ref() in xpc_deliver_msg(). 965 * fact that we called xpc_msgqueue_ref() in xpc_deliver_payload().
2197 * 966 *
2198 * Arguments: 967 * Arguments:
2199 * 968 *
2200 * partid - ID of partition to which the channel is connected. 969 * partid - ID of partition to which the channel is connected.
2201 * ch_number - channel # message received on. 970 * ch_number - channel # message received on.
2202 * payload - pointer to the payload area allocated via 971 * payload - pointer to the payload area allocated via
2203 * xpc_initiate_allocate(). 972 * xpc_initiate_send() or xpc_initiate_send_notify().
2204 */ 973 */
2205void 974void
2206xpc_initiate_received(short partid, int ch_number, void *payload) 975xpc_initiate_received(short partid, int ch_number, void *payload)
2207{ 976{
2208 struct xpc_partition *part = &xpc_partitions[partid]; 977 struct xpc_partition *part = &xpc_partitions[partid];
2209 struct xpc_channel *ch; 978 struct xpc_channel *ch;
2210 struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
2211 s64 get, msg_number = msg->number;
2212 979
2213 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); 980 DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
2214 DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); 981 DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
2215 982
2216 ch = &part->channels[ch_number]; 983 ch = &part->channels[ch_number];
984 xpc_received_payload(ch, payload);
2217 985
2218 dev_dbg(xpc_chan, "msg=0x%p, msg_number=%ld, partid=%d, channel=%d\n", 986 /* the call to xpc_msgqueue_ref() was done by xpc_deliver_payload() */
2219 (void *)msg, msg_number, ch->partid, ch->number);
2220
2221 DBUG_ON((((u64)msg - (u64)ch->remote_msgqueue) / ch->msg_size) !=
2222 msg_number % ch->remote_nentries);
2223 DBUG_ON(msg->flags & XPC_M_DONE);
2224
2225 msg->flags |= XPC_M_DONE;
2226
2227 /*
2228 * The preceding store of msg->flags must occur before the following
2229 * load of ch->local_GP->get.
2230 */
2231 mb();
2232
2233 /*
2234 * See if this message is next in line to be acknowledged as having
2235 * been delivered.
2236 */
2237 get = ch->local_GP->get;
2238 if (get == msg_number)
2239 xpc_acknowledge_msgs(ch, get, msg->flags);
2240
2241 /* the call to xpc_msgqueue_ref() was done by xpc_deliver_msg() */
2242 xpc_msgqueue_deref(ch); 987 xpc_msgqueue_deref(ch);
2243} 988}
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
index c3b4227f48a5..46325fc84811 100644
--- a/drivers/misc/sgi-xp/xpc_main.c
+++ b/drivers/misc/sgi-xp/xpc_main.c
@@ -25,37 +25,31 @@
25 * 25 *
26 * Caveats: 26 * Caveats:
27 * 27 *
28 * . We currently have no way to determine which nasid an IPI came 28 * . Currently on sn2, we have no way to determine which nasid an IRQ
29 * from. Thus, xpc_IPI_send() does a remote AMO write followed by 29 * came from. Thus, xpc_send_IRQ_sn2() does a remote amo write
30 * an IPI. The AMO indicates where data is to be pulled from, so 30 * followed by an IPI. The amo indicates where data is to be pulled
31 * after the IPI arrives, the remote partition checks the AMO word. 31 * from, so after the IPI arrives, the remote partition checks the amo
32 * The IPI can actually arrive before the AMO however, so other code 32 * word. The IPI can actually arrive before the amo however, so other
33 * must periodically check for this case. Also, remote AMO operations 33 * code must periodically check for this case. Also, remote amo
34 * do not reliably time out. Thus we do a remote PIO read solely to 34 * operations do not reliably time out. Thus we do a remote PIO read
35 * know whether the remote partition is down and whether we should 35 * solely to know whether the remote partition is down and whether we
36 * stop sending IPIs to it. This remote PIO read operation is set up 36 * should stop sending IPIs to it. This remote PIO read operation is
37 * in a special nofault region so SAL knows to ignore (and cleanup) 37 * set up in a special nofault region so SAL knows to ignore (and
38 * any errors due to the remote AMO write, PIO read, and/or PIO 38 * cleanup) any errors due to the remote amo write, PIO read, and/or
39 * write operations. 39 * PIO write operations.
40 * 40 *
41 * If/when new hardware solves this IPI problem, we should abandon 41 * If/when new hardware solves this IPI problem, we should abandon
42 * the current approach. 42 * the current approach.
43 * 43 *
44 */ 44 */
45 45
46#include <linux/kernel.h>
47#include <linux/module.h> 46#include <linux/module.h>
48#include <linux/init.h> 47#include <linux/sysctl.h>
49#include <linux/cache.h> 48#include <linux/device.h>
50#include <linux/interrupt.h>
51#include <linux/delay.h> 49#include <linux/delay.h>
52#include <linux/reboot.h> 50#include <linux/reboot.h>
53#include <linux/completion.h>
54#include <linux/kdebug.h> 51#include <linux/kdebug.h>
55#include <linux/kthread.h> 52#include <linux/kthread.h>
56#include <linux/uaccess.h>
57#include <asm/sn/intr.h>
58#include <asm/sn/sn_sal.h>
59#include "xpc.h" 53#include "xpc.h"
60 54
61/* define two XPC debug device structures to be used with dev_dbg() et al */ 55/* define two XPC debug device structures to be used with dev_dbg() et al */
@@ -89,9 +83,9 @@ static int xpc_hb_check_interval = XPC_HB_CHECK_DEFAULT_INTERVAL;
89static int xpc_hb_check_min_interval = 10; 83static int xpc_hb_check_min_interval = 10;
90static int xpc_hb_check_max_interval = 120; 84static int xpc_hb_check_max_interval = 120;
91 85
92int xpc_disengage_request_timelimit = XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT; 86int xpc_disengage_timelimit = XPC_DISENGAGE_DEFAULT_TIMELIMIT;
93static int xpc_disengage_request_min_timelimit; /* = 0 */ 87static int xpc_disengage_min_timelimit; /* = 0 */
94static int xpc_disengage_request_max_timelimit = 120; 88static int xpc_disengage_max_timelimit = 120;
95 89
96static ctl_table xpc_sys_xpc_hb_dir[] = { 90static ctl_table xpc_sys_xpc_hb_dir[] = {
97 { 91 {
@@ -124,14 +118,14 @@ static ctl_table xpc_sys_xpc_dir[] = {
124 .child = xpc_sys_xpc_hb_dir}, 118 .child = xpc_sys_xpc_hb_dir},
125 { 119 {
126 .ctl_name = CTL_UNNUMBERED, 120 .ctl_name = CTL_UNNUMBERED,
127 .procname = "disengage_request_timelimit", 121 .procname = "disengage_timelimit",
128 .data = &xpc_disengage_request_timelimit, 122 .data = &xpc_disengage_timelimit,
129 .maxlen = sizeof(int), 123 .maxlen = sizeof(int),
130 .mode = 0644, 124 .mode = 0644,
131 .proc_handler = &proc_dointvec_minmax, 125 .proc_handler = &proc_dointvec_minmax,
132 .strategy = &sysctl_intvec, 126 .strategy = &sysctl_intvec,
133 .extra1 = &xpc_disengage_request_min_timelimit, 127 .extra1 = &xpc_disengage_min_timelimit,
134 .extra2 = &xpc_disengage_request_max_timelimit}, 128 .extra2 = &xpc_disengage_max_timelimit},
135 {} 129 {}
136}; 130};
137static ctl_table xpc_sys_dir[] = { 131static ctl_table xpc_sys_dir[] = {
@@ -144,16 +138,19 @@ static ctl_table xpc_sys_dir[] = {
144}; 138};
145static struct ctl_table_header *xpc_sysctl; 139static struct ctl_table_header *xpc_sysctl;
146 140
147/* non-zero if any remote partition disengage request was timed out */ 141/* non-zero if any remote partition disengage was timed out */
148int xpc_disengage_request_timedout; 142int xpc_disengage_timedout;
149 143
150/* #of IRQs received */ 144/* #of activate IRQs received and not yet processed */
151static atomic_t xpc_act_IRQ_rcvd; 145int xpc_activate_IRQ_rcvd;
146DEFINE_SPINLOCK(xpc_activate_IRQ_rcvd_lock);
152 147
153/* IRQ handler notifies this wait queue on receipt of an IRQ */ 148/* IRQ handler notifies this wait queue on receipt of an IRQ */
154static DECLARE_WAIT_QUEUE_HEAD(xpc_act_IRQ_wq); 149DECLARE_WAIT_QUEUE_HEAD(xpc_activate_IRQ_wq);
155 150
156static unsigned long xpc_hb_check_timeout; 151static unsigned long xpc_hb_check_timeout;
152static struct timer_list xpc_hb_timer;
153void *xpc_heartbeating_to_mask;
157 154
158/* notification that the xpc_hb_checker thread has exited */ 155/* notification that the xpc_hb_checker thread has exited */
159static DECLARE_COMPLETION(xpc_hb_checker_exited); 156static DECLARE_COMPLETION(xpc_hb_checker_exited);
@@ -161,8 +158,6 @@ static DECLARE_COMPLETION(xpc_hb_checker_exited);
161/* notification that the xpc_discovery thread has exited */ 158/* notification that the xpc_discovery thread has exited */
162static DECLARE_COMPLETION(xpc_discovery_exited); 159static DECLARE_COMPLETION(xpc_discovery_exited);
163 160
164static struct timer_list xpc_hb_timer;
165
166static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *); 161static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *);
167 162
168static int xpc_system_reboot(struct notifier_block *, unsigned long, void *); 163static int xpc_system_reboot(struct notifier_block *, unsigned long, void *);
@@ -175,31 +170,76 @@ static struct notifier_block xpc_die_notifier = {
175 .notifier_call = xpc_system_die, 170 .notifier_call = xpc_system_die,
176}; 171};
177 172
173int (*xpc_setup_partitions_sn) (void);
174enum xp_retval (*xpc_get_partition_rsvd_page_pa) (void *buf, u64 *cookie,
175 unsigned long *rp_pa,
176 size_t *len);
177int (*xpc_setup_rsvd_page_sn) (struct xpc_rsvd_page *rp);
178void (*xpc_heartbeat_init) (void);
179void (*xpc_heartbeat_exit) (void);
180void (*xpc_increment_heartbeat) (void);
181void (*xpc_offline_heartbeat) (void);
182void (*xpc_online_heartbeat) (void);
183enum xp_retval (*xpc_get_remote_heartbeat) (struct xpc_partition *part);
184
185enum xp_retval (*xpc_make_first_contact) (struct xpc_partition *part);
186void (*xpc_notify_senders_of_disconnect) (struct xpc_channel *ch);
187u64 (*xpc_get_chctl_all_flags) (struct xpc_partition *part);
188enum xp_retval (*xpc_setup_msg_structures) (struct xpc_channel *ch);
189void (*xpc_teardown_msg_structures) (struct xpc_channel *ch);
190void (*xpc_process_msg_chctl_flags) (struct xpc_partition *part, int ch_number);
191int (*xpc_n_of_deliverable_payloads) (struct xpc_channel *ch);
192void *(*xpc_get_deliverable_payload) (struct xpc_channel *ch);
193
194void (*xpc_request_partition_activation) (struct xpc_rsvd_page *remote_rp,
195 unsigned long remote_rp_pa,
196 int nasid);
197void (*xpc_request_partition_reactivation) (struct xpc_partition *part);
198void (*xpc_request_partition_deactivation) (struct xpc_partition *part);
199void (*xpc_cancel_partition_deactivation_request) (struct xpc_partition *part);
200
201void (*xpc_process_activate_IRQ_rcvd) (void);
202enum xp_retval (*xpc_setup_ch_structures_sn) (struct xpc_partition *part);
203void (*xpc_teardown_ch_structures_sn) (struct xpc_partition *part);
204
205void (*xpc_indicate_partition_engaged) (struct xpc_partition *part);
206int (*xpc_partition_engaged) (short partid);
207int (*xpc_any_partition_engaged) (void);
208void (*xpc_indicate_partition_disengaged) (struct xpc_partition *part);
209void (*xpc_assume_partition_disengaged) (short partid);
210
211void (*xpc_send_chctl_closerequest) (struct xpc_channel *ch,
212 unsigned long *irq_flags);
213void (*xpc_send_chctl_closereply) (struct xpc_channel *ch,
214 unsigned long *irq_flags);
215void (*xpc_send_chctl_openrequest) (struct xpc_channel *ch,
216 unsigned long *irq_flags);
217void (*xpc_send_chctl_openreply) (struct xpc_channel *ch,
218 unsigned long *irq_flags);
219
220void (*xpc_save_remote_msgqueue_pa) (struct xpc_channel *ch,
221 unsigned long msgqueue_pa);
222
223enum xp_retval (*xpc_send_payload) (struct xpc_channel *ch, u32 flags,
224 void *payload, u16 payload_size,
225 u8 notify_type, xpc_notify_func func,
226 void *key);
227void (*xpc_received_payload) (struct xpc_channel *ch, void *payload);
228
178/* 229/*
179 * Timer function to enforce the timelimit on the partition disengage request. 230 * Timer function to enforce the timelimit on the partition disengage.
180 */ 231 */
181static void 232static void
182xpc_timeout_partition_disengage_request(unsigned long data) 233xpc_timeout_partition_disengage(unsigned long data)
183{ 234{
184 struct xpc_partition *part = (struct xpc_partition *)data; 235 struct xpc_partition *part = (struct xpc_partition *)data;
185 236
186 DBUG_ON(time_before(jiffies, part->disengage_request_timeout)); 237 DBUG_ON(time_is_after_jiffies(part->disengage_timeout));
187 238
188 (void)xpc_partition_disengaged(part); 239 (void)xpc_partition_disengaged(part);
189 240
190 DBUG_ON(part->disengage_request_timeout != 0); 241 DBUG_ON(part->disengage_timeout != 0);
191 DBUG_ON(xpc_partition_engaged(1UL << XPC_PARTID(part)) != 0); 242 DBUG_ON(xpc_partition_engaged(XPC_PARTID(part)));
192}
193
194/*
195 * Notify the heartbeat check thread that an IRQ has been received.
196 */
197static irqreturn_t
198xpc_act_IRQ_handler(int irq, void *dev_id)
199{
200 atomic_inc(&xpc_act_IRQ_rcvd);
201 wake_up_interruptible(&xpc_act_IRQ_wq);
202 return IRQ_HANDLED;
203} 243}
204 244
205/* 245/*
@@ -210,15 +250,63 @@ xpc_act_IRQ_handler(int irq, void *dev_id)
210static void 250static void
211xpc_hb_beater(unsigned long dummy) 251xpc_hb_beater(unsigned long dummy)
212{ 252{
213 xpc_vars->heartbeat++; 253 xpc_increment_heartbeat();
214 254
215 if (time_after_eq(jiffies, xpc_hb_check_timeout)) 255 if (time_is_before_eq_jiffies(xpc_hb_check_timeout))
216 wake_up_interruptible(&xpc_act_IRQ_wq); 256 wake_up_interruptible(&xpc_activate_IRQ_wq);
217 257
218 xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ); 258 xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ);
219 add_timer(&xpc_hb_timer); 259 add_timer(&xpc_hb_timer);
220} 260}
221 261
262static void
263xpc_start_hb_beater(void)
264{
265 xpc_heartbeat_init();
266 init_timer(&xpc_hb_timer);
267 xpc_hb_timer.function = xpc_hb_beater;
268 xpc_hb_beater(0);
269}
270
271static void
272xpc_stop_hb_beater(void)
273{
274 del_timer_sync(&xpc_hb_timer);
275 xpc_heartbeat_exit();
276}
277
278/*
279 * At periodic intervals, scan through all active partitions and ensure
280 * their heartbeat is still active. If not, the partition is deactivated.
281 */
282static void
283xpc_check_remote_hb(void)
284{
285 struct xpc_partition *part;
286 short partid;
287 enum xp_retval ret;
288
289 for (partid = 0; partid < xp_max_npartitions; partid++) {
290
291 if (xpc_exiting)
292 break;
293
294 if (partid == xp_partition_id)
295 continue;
296
297 part = &xpc_partitions[partid];
298
299 if (part->act_state == XPC_P_AS_INACTIVE ||
300 part->act_state == XPC_P_AS_DEACTIVATING) {
301 continue;
302 }
303
304 ret = xpc_get_remote_heartbeat(part);
305 if (ret != xpSuccess)
306 XPC_DEACTIVATE_PARTITION(part, ret);
307 }
308}
309
222/* 310/*
223 * This thread is responsible for nearly all of the partition 311 * This thread is responsible for nearly all of the partition
224 * activation/deactivation. 312 * activation/deactivation.
@@ -226,8 +314,6 @@ xpc_hb_beater(unsigned long dummy)
226static int 314static int
227xpc_hb_checker(void *ignore) 315xpc_hb_checker(void *ignore)
228{ 316{
229 int last_IRQ_count = 0;
230 int new_IRQ_count;
231 int force_IRQ = 0; 317 int force_IRQ = 0;
232 318
233 /* this thread was marked active by xpc_hb_init() */ 319 /* this thread was marked active by xpc_hb_init() */
@@ -236,56 +322,49 @@ xpc_hb_checker(void *ignore)
236 322
237 /* set our heartbeating to other partitions into motion */ 323 /* set our heartbeating to other partitions into motion */
238 xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ); 324 xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);
239 xpc_hb_beater(0); 325 xpc_start_hb_beater();
240 326
241 while (!xpc_exiting) { 327 while (!xpc_exiting) {
242 328
243 dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have " 329 dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have "
244 "been received\n", 330 "been received\n",
245 (int)(xpc_hb_check_timeout - jiffies), 331 (int)(xpc_hb_check_timeout - jiffies),
246 atomic_read(&xpc_act_IRQ_rcvd) - last_IRQ_count); 332 xpc_activate_IRQ_rcvd);
247 333
248 /* checking of remote heartbeats is skewed by IRQ handling */ 334 /* checking of remote heartbeats is skewed by IRQ handling */
249 if (time_after_eq(jiffies, xpc_hb_check_timeout)) { 335 if (time_is_before_eq_jiffies(xpc_hb_check_timeout)) {
336 xpc_hb_check_timeout = jiffies +
337 (xpc_hb_check_interval * HZ);
338
250 dev_dbg(xpc_part, "checking remote heartbeats\n"); 339 dev_dbg(xpc_part, "checking remote heartbeats\n");
251 xpc_check_remote_hb(); 340 xpc_check_remote_hb();
252 341
253 /* 342 /*
254 * We need to periodically recheck to ensure no 343 * On sn2 we need to periodically recheck to ensure no
255 * IPI/AMO pairs have been missed. That check 344 * IRQ/amo pairs have been missed.
256 * must always reset xpc_hb_check_timeout.
257 */ 345 */
258 force_IRQ = 1; 346 if (is_shub())
347 force_IRQ = 1;
259 } 348 }
260 349
261 /* check for outstanding IRQs */ 350 /* check for outstanding IRQs */
262 new_IRQ_count = atomic_read(&xpc_act_IRQ_rcvd); 351 if (xpc_activate_IRQ_rcvd > 0 || force_IRQ != 0) {
263 if (last_IRQ_count < new_IRQ_count || force_IRQ != 0) {
264 force_IRQ = 0; 352 force_IRQ = 0;
265 353 dev_dbg(xpc_part, "processing activate IRQs "
266 dev_dbg(xpc_part, "found an IRQ to process; will be " 354 "received\n");
267 "resetting xpc_hb_check_timeout\n"); 355 xpc_process_activate_IRQ_rcvd();
268
269 last_IRQ_count += xpc_identify_act_IRQ_sender();
270 if (last_IRQ_count < new_IRQ_count) {
271 /* retry once to help avoid missing AMO */
272 (void)xpc_identify_act_IRQ_sender();
273 }
274 last_IRQ_count = new_IRQ_count;
275
276 xpc_hb_check_timeout = jiffies +
277 (xpc_hb_check_interval * HZ);
278 } 356 }
279 357
280 /* wait for IRQ or timeout */ 358 /* wait for IRQ or timeout */
281 (void)wait_event_interruptible(xpc_act_IRQ_wq, 359 (void)wait_event_interruptible(xpc_activate_IRQ_wq,
282 (last_IRQ_count < 360 (time_is_before_eq_jiffies(
283 atomic_read(&xpc_act_IRQ_rcvd) 361 xpc_hb_check_timeout) ||
284 || time_after_eq(jiffies, 362 xpc_activate_IRQ_rcvd > 0 ||
285 xpc_hb_check_timeout) ||
286 xpc_exiting)); 363 xpc_exiting));
287 } 364 }
288 365
366 xpc_stop_hb_beater();
367
289 dev_dbg(xpc_part, "heartbeat checker is exiting\n"); 368 dev_dbg(xpc_part, "heartbeat checker is exiting\n");
290 369
291 /* mark this thread as having exited */ 370 /* mark this thread as having exited */
@@ -311,37 +390,8 @@ xpc_initiate_discovery(void *ignore)
311} 390}
312 391
313/* 392/*
314 * Establish first contact with the remote partititon. This involves pulling
315 * the XPC per partition variables from the remote partition and waiting for
316 * the remote partition to pull ours.
317 */
318static enum xp_retval
319xpc_make_first_contact(struct xpc_partition *part)
320{
321 enum xp_retval ret;
322
323 while ((ret = xpc_pull_remote_vars_part(part)) != xpSuccess) {
324 if (ret != xpRetry) {
325 XPC_DEACTIVATE_PARTITION(part, ret);
326 return ret;
327 }
328
329 dev_dbg(xpc_chan, "waiting to make first contact with "
330 "partition %d\n", XPC_PARTID(part));
331
332 /* wait a 1/4 of a second or so */
333 (void)msleep_interruptible(250);
334
335 if (part->act_state == XPC_P_DEACTIVATING)
336 return part->reason;
337 }
338
339 return xpc_mark_partition_active(part);
340}
341
342/*
343 * The first kthread assigned to a newly activated partition is the one 393 * The first kthread assigned to a newly activated partition is the one
344 * created by XPC HB with which it calls xpc_partition_up(). XPC hangs on to 394 * created by XPC HB with which it calls xpc_activating(). XPC hangs on to
345 * that kthread until the partition is brought down, at which time that kthread 395 * that kthread until the partition is brought down, at which time that kthread
346 * returns back to XPC HB. (The return of that kthread will signify to XPC HB 396 * returns back to XPC HB. (The return of that kthread will signify to XPC HB
347 * that XPC has dismantled all communication infrastructure for the associated 397 * that XPC has dismantled all communication infrastructure for the associated
@@ -354,11 +404,11 @@ xpc_make_first_contact(struct xpc_partition *part)
354static void 404static void
355xpc_channel_mgr(struct xpc_partition *part) 405xpc_channel_mgr(struct xpc_partition *part)
356{ 406{
357 while (part->act_state != XPC_P_DEACTIVATING || 407 while (part->act_state != XPC_P_AS_DEACTIVATING ||
358 atomic_read(&part->nchannels_active) > 0 || 408 atomic_read(&part->nchannels_active) > 0 ||
359 !xpc_partition_disengaged(part)) { 409 !xpc_partition_disengaged(part)) {
360 410
361 xpc_process_channel_activity(part); 411 xpc_process_sent_chctl_flags(part);
362 412
363 /* 413 /*
364 * Wait until we've been requested to activate kthreads or 414 * Wait until we've been requested to activate kthreads or
@@ -376,8 +426,8 @@ xpc_channel_mgr(struct xpc_partition *part)
376 atomic_dec(&part->channel_mgr_requests); 426 atomic_dec(&part->channel_mgr_requests);
377 (void)wait_event_interruptible(part->channel_mgr_wq, 427 (void)wait_event_interruptible(part->channel_mgr_wq,
378 (atomic_read(&part->channel_mgr_requests) > 0 || 428 (atomic_read(&part->channel_mgr_requests) > 0 ||
379 part->local_IPI_amo != 0 || 429 part->chctl.all_flags != 0 ||
380 (part->act_state == XPC_P_DEACTIVATING && 430 (part->act_state == XPC_P_AS_DEACTIVATING &&
381 atomic_read(&part->nchannels_active) == 0 && 431 atomic_read(&part->nchannels_active) == 0 &&
382 xpc_partition_disengaged(part)))); 432 xpc_partition_disengaged(part))));
383 atomic_set(&part->channel_mgr_requests, 1); 433 atomic_set(&part->channel_mgr_requests, 1);
@@ -385,47 +435,163 @@ xpc_channel_mgr(struct xpc_partition *part)
385} 435}
386 436
387/* 437/*
388 * When XPC HB determines that a partition has come up, it will create a new 438 * Guarantee that the kzalloc'd memory is cacheline aligned.
389 * kthread and that kthread will call this function to attempt to set up the
390 * basic infrastructure used for Cross Partition Communication with the newly
391 * upped partition.
392 *
393 * The kthread that was created by XPC HB and which setup the XPC
394 * infrastructure will remain assigned to the partition until the partition
395 * goes down. At which time the kthread will teardown the XPC infrastructure
396 * and then exit.
397 *
398 * XPC HB will put the remote partition's XPC per partition specific variables
399 * physical address into xpc_partitions[partid].remote_vars_part_pa prior to
400 * calling xpc_partition_up().
401 */ 439 */
402static void 440void *
403xpc_partition_up(struct xpc_partition *part) 441xpc_kzalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
442{
443 /* see if kzalloc will give us cachline aligned memory by default */
444 *base = kzalloc(size, flags);
445 if (*base == NULL)
446 return NULL;
447
448 if ((u64)*base == L1_CACHE_ALIGN((u64)*base))
449 return *base;
450
451 kfree(*base);
452
453 /* nope, we'll have to do it ourselves */
454 *base = kzalloc(size + L1_CACHE_BYTES, flags);
455 if (*base == NULL)
456 return NULL;
457
458 return (void *)L1_CACHE_ALIGN((u64)*base);
459}
460
461/*
462 * Setup the channel structures necessary to support XPartition Communication
463 * between the specified remote partition and the local one.
464 */
465static enum xp_retval
466xpc_setup_ch_structures(struct xpc_partition *part)
404{ 467{
468 enum xp_retval ret;
469 int ch_number;
470 struct xpc_channel *ch;
471 short partid = XPC_PARTID(part);
472
473 /*
474 * Allocate all of the channel structures as a contiguous chunk of
475 * memory.
476 */
405 DBUG_ON(part->channels != NULL); 477 DBUG_ON(part->channels != NULL);
478 part->channels = kzalloc(sizeof(struct xpc_channel) * XPC_MAX_NCHANNELS,
479 GFP_KERNEL);
480 if (part->channels == NULL) {
481 dev_err(xpc_chan, "can't get memory for channels\n");
482 return xpNoMemory;
483 }
406 484
407 dev_dbg(xpc_chan, "activating partition %d\n", XPC_PARTID(part)); 485 /* allocate the remote open and close args */
408 486
409 if (xpc_setup_infrastructure(part) != xpSuccess) 487 part->remote_openclose_args =
410 return; 488 xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE,
489 GFP_KERNEL, &part->
490 remote_openclose_args_base);
491 if (part->remote_openclose_args == NULL) {
492 dev_err(xpc_chan, "can't get memory for remote connect args\n");
493 ret = xpNoMemory;
494 goto out_1;
495 }
496
497 part->chctl.all_flags = 0;
498 spin_lock_init(&part->chctl_lock);
499
500 atomic_set(&part->channel_mgr_requests, 1);
501 init_waitqueue_head(&part->channel_mgr_wq);
502
503 part->nchannels = XPC_MAX_NCHANNELS;
504
505 atomic_set(&part->nchannels_active, 0);
506 atomic_set(&part->nchannels_engaged, 0);
507
508 for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
509 ch = &part->channels[ch_number];
510
511 ch->partid = partid;
512 ch->number = ch_number;
513 ch->flags = XPC_C_DISCONNECTED;
514
515 atomic_set(&ch->kthreads_assigned, 0);
516 atomic_set(&ch->kthreads_idle, 0);
517 atomic_set(&ch->kthreads_active, 0);
518
519 atomic_set(&ch->references, 0);
520 atomic_set(&ch->n_to_notify, 0);
521
522 spin_lock_init(&ch->lock);
523 init_completion(&ch->wdisconnect_wait);
524
525 atomic_set(&ch->n_on_msg_allocate_wq, 0);
526 init_waitqueue_head(&ch->msg_allocate_wq);
527 init_waitqueue_head(&ch->idle_wq);
528 }
529
530 ret = xpc_setup_ch_structures_sn(part);
531 if (ret != xpSuccess)
532 goto out_2;
533
534 /*
535 * With the setting of the partition setup_state to XPC_P_SS_SETUP,
536 * we're declaring that this partition is ready to go.
537 */
538 part->setup_state = XPC_P_SS_SETUP;
539
540 return xpSuccess;
541
542 /* setup of ch structures failed */
543out_2:
544 kfree(part->remote_openclose_args_base);
545 part->remote_openclose_args = NULL;
546out_1:
547 kfree(part->channels);
548 part->channels = NULL;
549 return ret;
550}
551
552/*
553 * Teardown the channel structures necessary to support XPartition Communication
554 * between the specified remote partition and the local one.
555 */
556static void
557xpc_teardown_ch_structures(struct xpc_partition *part)
558{
559 DBUG_ON(atomic_read(&part->nchannels_engaged) != 0);
560 DBUG_ON(atomic_read(&part->nchannels_active) != 0);
411 561
412 /* 562 /*
413 * The kthread that XPC HB called us with will become the 563 * Make this partition inaccessible to local processes by marking it
414 * channel manager for this partition. It will not return 564 * as no longer setup. Then wait before proceeding with the teardown
415 * back to XPC HB until the partition's XPC infrastructure 565 * until all existing references cease.
416 * has been dismantled.
417 */ 566 */
567 DBUG_ON(part->setup_state != XPC_P_SS_SETUP);
568 part->setup_state = XPC_P_SS_WTEARDOWN;
418 569
419 (void)xpc_part_ref(part); /* this will always succeed */ 570 wait_event(part->teardown_wq, (atomic_read(&part->references) == 0));
420 571
421 if (xpc_make_first_contact(part) == xpSuccess) 572 /* now we can begin tearing down the infrastructure */
422 xpc_channel_mgr(part);
423 573
424 xpc_part_deref(part); 574 xpc_teardown_ch_structures_sn(part);
425 575
426 xpc_teardown_infrastructure(part); 576 kfree(part->remote_openclose_args_base);
577 part->remote_openclose_args = NULL;
578 kfree(part->channels);
579 part->channels = NULL;
580
581 part->setup_state = XPC_P_SS_TORNDOWN;
427} 582}
428 583
584/*
585 * When XPC HB determines that a partition has come up, it will create a new
586 * kthread and that kthread will call this function to attempt to set up the
587 * basic infrastructure used for Cross Partition Communication with the newly
588 * upped partition.
589 *
590 * The kthread that was created by XPC HB and which setup the XPC
591 * infrastructure will remain assigned to the partition becoming the channel
592 * manager for that partition until the partition is deactivating, at which
593 * time the kthread will teardown the XPC infrastructure and then exit.
594 */
429static int 595static int
430xpc_activating(void *__partid) 596xpc_activating(void *__partid)
431{ 597{
@@ -433,64 +599,47 @@ xpc_activating(void *__partid)
433 struct xpc_partition *part = &xpc_partitions[partid]; 599 struct xpc_partition *part = &xpc_partitions[partid];
434 unsigned long irq_flags; 600 unsigned long irq_flags;
435 601
436 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); 602 DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
437 603
438 spin_lock_irqsave(&part->act_lock, irq_flags); 604 spin_lock_irqsave(&part->act_lock, irq_flags);
439 605
440 if (part->act_state == XPC_P_DEACTIVATING) { 606 if (part->act_state == XPC_P_AS_DEACTIVATING) {
441 part->act_state = XPC_P_INACTIVE; 607 part->act_state = XPC_P_AS_INACTIVE;
442 spin_unlock_irqrestore(&part->act_lock, irq_flags); 608 spin_unlock_irqrestore(&part->act_lock, irq_flags);
443 part->remote_rp_pa = 0; 609 part->remote_rp_pa = 0;
444 return 0; 610 return 0;
445 } 611 }
446 612
447 /* indicate the thread is activating */ 613 /* indicate the thread is activating */
448 DBUG_ON(part->act_state != XPC_P_ACTIVATION_REQ); 614 DBUG_ON(part->act_state != XPC_P_AS_ACTIVATION_REQ);
449 part->act_state = XPC_P_ACTIVATING; 615 part->act_state = XPC_P_AS_ACTIVATING;
450 616
451 XPC_SET_REASON(part, 0, 0); 617 XPC_SET_REASON(part, 0, 0);
452 spin_unlock_irqrestore(&part->act_lock, irq_flags); 618 spin_unlock_irqrestore(&part->act_lock, irq_flags);
453 619
454 dev_dbg(xpc_part, "bringing partition %d up\n", partid); 620 dev_dbg(xpc_part, "activating partition %d\n", partid);
455 621
456 /* 622 xpc_allow_hb(partid);
457 * Register the remote partition's AMOs with SAL so it can handle
458 * and cleanup errors within that address range should the remote
459 * partition go down. We don't unregister this range because it is
460 * difficult to tell when outstanding writes to the remote partition
461 * are finished and thus when it is safe to unregister. This should
462 * not result in wasted space in the SAL xp_addr_region table because
463 * we should get the same page for remote_amos_page_pa after module
464 * reloads and system reboots.
465 */
466 if (sn_register_xp_addr_region(part->remote_amos_page_pa,
467 PAGE_SIZE, 1) < 0) {
468 dev_warn(xpc_part, "xpc_partition_up(%d) failed to register "
469 "xp_addr region\n", partid);
470 623
471 spin_lock_irqsave(&part->act_lock, irq_flags); 624 if (xpc_setup_ch_structures(part) == xpSuccess) {
472 part->act_state = XPC_P_INACTIVE; 625 (void)xpc_part_ref(part); /* this will always succeed */
473 XPC_SET_REASON(part, xpPhysAddrRegFailed, __LINE__);
474 spin_unlock_irqrestore(&part->act_lock, irq_flags);
475 part->remote_rp_pa = 0;
476 return 0;
477 }
478 626
479 xpc_allow_hb(partid, xpc_vars); 627 if (xpc_make_first_contact(part) == xpSuccess) {
480 xpc_IPI_send_activated(part); 628 xpc_mark_partition_active(part);
629 xpc_channel_mgr(part);
630 /* won't return until partition is deactivating */
631 }
481 632
482 /* 633 xpc_part_deref(part);
483 * xpc_partition_up() holds this thread and marks this partition as 634 xpc_teardown_ch_structures(part);
484 * XPC_P_ACTIVE by calling xpc_hb_mark_active(). 635 }
485 */
486 (void)xpc_partition_up(part);
487 636
488 xpc_disallow_hb(partid, xpc_vars); 637 xpc_disallow_hb(partid);
489 xpc_mark_partition_inactive(part); 638 xpc_mark_partition_inactive(part);
490 639
491 if (part->reason == xpReactivating) { 640 if (part->reason == xpReactivating) {
492 /* interrupting ourselves results in activating partition */ 641 /* interrupting ourselves results in activating partition */
493 xpc_IPI_send_reactivate(part); 642 xpc_request_partition_reactivation(part);
494 } 643 }
495 644
496 return 0; 645 return 0;
@@ -505,9 +654,9 @@ xpc_activate_partition(struct xpc_partition *part)
505 654
506 spin_lock_irqsave(&part->act_lock, irq_flags); 655 spin_lock_irqsave(&part->act_lock, irq_flags);
507 656
508 DBUG_ON(part->act_state != XPC_P_INACTIVE); 657 DBUG_ON(part->act_state != XPC_P_AS_INACTIVE);
509 658
510 part->act_state = XPC_P_ACTIVATION_REQ; 659 part->act_state = XPC_P_AS_ACTIVATION_REQ;
511 XPC_SET_REASON(part, xpCloneKThread, __LINE__); 660 XPC_SET_REASON(part, xpCloneKThread, __LINE__);
512 661
513 spin_unlock_irqrestore(&part->act_lock, irq_flags); 662 spin_unlock_irqrestore(&part->act_lock, irq_flags);
@@ -516,62 +665,12 @@ xpc_activate_partition(struct xpc_partition *part)
516 partid); 665 partid);
517 if (IS_ERR(kthread)) { 666 if (IS_ERR(kthread)) {
518 spin_lock_irqsave(&part->act_lock, irq_flags); 667 spin_lock_irqsave(&part->act_lock, irq_flags);
519 part->act_state = XPC_P_INACTIVE; 668 part->act_state = XPC_P_AS_INACTIVE;
520 XPC_SET_REASON(part, xpCloneKThreadFailed, __LINE__); 669 XPC_SET_REASON(part, xpCloneKThreadFailed, __LINE__);
521 spin_unlock_irqrestore(&part->act_lock, irq_flags); 670 spin_unlock_irqrestore(&part->act_lock, irq_flags);
522 } 671 }
523} 672}
524 673
525/*
526 * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified
527 * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more
528 * than one partition, we use an AMO_t structure per partition to indicate
529 * whether a partition has sent an IPI or not. If it has, then wake up the
530 * associated kthread to handle it.
531 *
532 * All SGI_XPC_NOTIFY IRQs received by XPC are the result of IPIs sent by XPC
533 * running on other partitions.
534 *
535 * Noteworthy Arguments:
536 *
537 * irq - Interrupt ReQuest number. NOT USED.
538 *
539 * dev_id - partid of IPI's potential sender.
540 */
541irqreturn_t
542xpc_notify_IRQ_handler(int irq, void *dev_id)
543{
544 short partid = (short)(u64)dev_id;
545 struct xpc_partition *part = &xpc_partitions[partid];
546
547 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
548
549 if (xpc_part_ref(part)) {
550 xpc_check_for_channel_activity(part);
551
552 xpc_part_deref(part);
553 }
554 return IRQ_HANDLED;
555}
556
557/*
558 * Check to see if xpc_notify_IRQ_handler() dropped any IPIs on the floor
559 * because the write to their associated IPI amo completed after the IRQ/IPI
560 * was received.
561 */
562void
563xpc_dropped_IPI_check(struct xpc_partition *part)
564{
565 if (xpc_part_ref(part)) {
566 xpc_check_for_channel_activity(part);
567
568 part->dropped_IPI_timer.expires = jiffies +
569 XPC_P_DROPPED_IPI_WAIT;
570 add_timer(&part->dropped_IPI_timer);
571 xpc_part_deref(part);
572 }
573}
574
575void 674void
576xpc_activate_kthreads(struct xpc_channel *ch, int needed) 675xpc_activate_kthreads(struct xpc_channel *ch, int needed)
577{ 676{
@@ -616,9 +715,9 @@ xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
616 do { 715 do {
617 /* deliver messages to their intended recipients */ 716 /* deliver messages to their intended recipients */
618 717
619 while (ch->w_local_GP.get < ch->w_remote_GP.put && 718 while (xpc_n_of_deliverable_payloads(ch) > 0 &&
620 !(ch->flags & XPC_C_DISCONNECTING)) { 719 !(ch->flags & XPC_C_DISCONNECTING)) {
621 xpc_deliver_msg(ch); 720 xpc_deliver_payload(ch);
622 } 721 }
623 722
624 if (atomic_inc_return(&ch->kthreads_idle) > 723 if (atomic_inc_return(&ch->kthreads_idle) >
@@ -632,7 +731,7 @@ xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
632 "wait_event_interruptible_exclusive()\n"); 731 "wait_event_interruptible_exclusive()\n");
633 732
634 (void)wait_event_interruptible_exclusive(ch->idle_wq, 733 (void)wait_event_interruptible_exclusive(ch->idle_wq,
635 (ch->w_local_GP.get < ch->w_remote_GP.put || 734 (xpc_n_of_deliverable_payloads(ch) > 0 ||
636 (ch->flags & XPC_C_DISCONNECTING))); 735 (ch->flags & XPC_C_DISCONNECTING)));
637 736
638 atomic_dec(&ch->kthreads_idle); 737 atomic_dec(&ch->kthreads_idle);
@@ -677,7 +776,7 @@ xpc_kthread_start(void *args)
677 * additional kthreads to help deliver them. We only 776 * additional kthreads to help deliver them. We only
678 * need one less than total #of messages to deliver. 777 * need one less than total #of messages to deliver.
679 */ 778 */
680 n_needed = ch->w_remote_GP.put - ch->w_local_GP.get - 1; 779 n_needed = xpc_n_of_deliverable_payloads(ch) - 1;
681 if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING)) 780 if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING))
682 xpc_activate_kthreads(ch, n_needed); 781 xpc_activate_kthreads(ch, n_needed);
683 782
@@ -703,11 +802,9 @@ xpc_kthread_start(void *args)
703 } 802 }
704 spin_unlock_irqrestore(&ch->lock, irq_flags); 803 spin_unlock_irqrestore(&ch->lock, irq_flags);
705 804
706 if (atomic_dec_return(&ch->kthreads_assigned) == 0) { 805 if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
707 if (atomic_dec_return(&part->nchannels_engaged) == 0) { 806 atomic_dec_return(&part->nchannels_engaged) == 0) {
708 xpc_mark_partition_disengaged(part); 807 xpc_indicate_partition_disengaged(part);
709 xpc_IPI_send_disengage(part);
710 }
711 } 808 }
712 809
713 xpc_msgqueue_deref(ch); 810 xpc_msgqueue_deref(ch);
@@ -758,9 +855,9 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
758 } else if (ch->flags & XPC_C_DISCONNECTING) { 855 } else if (ch->flags & XPC_C_DISCONNECTING) {
759 break; 856 break;
760 857
761 } else if (atomic_inc_return(&ch->kthreads_assigned) == 1) { 858 } else if (atomic_inc_return(&ch->kthreads_assigned) == 1 &&
762 if (atomic_inc_return(&part->nchannels_engaged) == 1) 859 atomic_inc_return(&part->nchannels_engaged) == 1) {
763 xpc_mark_partition_engaged(part); 860 xpc_indicate_partition_engaged(part);
764 } 861 }
765 (void)xpc_part_ref(part); 862 (void)xpc_part_ref(part);
766 xpc_msgqueue_ref(ch); 863 xpc_msgqueue_ref(ch);
@@ -782,8 +879,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
782 879
783 if (atomic_dec_return(&ch->kthreads_assigned) == 0 && 880 if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
784 atomic_dec_return(&part->nchannels_engaged) == 0) { 881 atomic_dec_return(&part->nchannels_engaged) == 0) {
785 xpc_mark_partition_disengaged(part); 882 xpc_indicate_partition_disengaged(part);
786 xpc_IPI_send_disengage(part);
787 } 883 }
788 xpc_msgqueue_deref(ch); 884 xpc_msgqueue_deref(ch);
789 xpc_part_deref(part); 885 xpc_part_deref(part);
@@ -815,7 +911,7 @@ xpc_disconnect_wait(int ch_number)
815 int wakeup_channel_mgr; 911 int wakeup_channel_mgr;
816 912
817 /* now wait for all callouts to the caller's function to cease */ 913 /* now wait for all callouts to the caller's function to cease */
818 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { 914 for (partid = 0; partid < xp_max_npartitions; partid++) {
819 part = &xpc_partitions[partid]; 915 part = &xpc_partitions[partid];
820 916
821 if (!xpc_part_ref(part)) 917 if (!xpc_part_ref(part))
@@ -834,16 +930,15 @@ xpc_disconnect_wait(int ch_number)
834 DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED)); 930 DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED));
835 wakeup_channel_mgr = 0; 931 wakeup_channel_mgr = 0;
836 932
837 if (ch->delayed_IPI_flags) { 933 if (ch->delayed_chctl_flags) {
838 if (part->act_state != XPC_P_DEACTIVATING) { 934 if (part->act_state != XPC_P_AS_DEACTIVATING) {
839 spin_lock(&part->IPI_lock); 935 spin_lock(&part->chctl_lock);
840 XPC_SET_IPI_FLAGS(part->local_IPI_amo, 936 part->chctl.flags[ch->number] |=
841 ch->number, 937 ch->delayed_chctl_flags;
842 ch->delayed_IPI_flags); 938 spin_unlock(&part->chctl_lock);
843 spin_unlock(&part->IPI_lock);
844 wakeup_channel_mgr = 1; 939 wakeup_channel_mgr = 1;
845 } 940 }
846 ch->delayed_IPI_flags = 0; 941 ch->delayed_chctl_flags = 0;
847 } 942 }
848 943
849 ch->flags &= ~XPC_C_WDISCONNECT; 944 ch->flags &= ~XPC_C_WDISCONNECT;
@@ -856,13 +951,63 @@ xpc_disconnect_wait(int ch_number)
856 } 951 }
857} 952}
858 953
954static int
955xpc_setup_partitions(void)
956{
957 short partid;
958 struct xpc_partition *part;
959
960 xpc_partitions = kzalloc(sizeof(struct xpc_partition) *
961 xp_max_npartitions, GFP_KERNEL);
962 if (xpc_partitions == NULL) {
963 dev_err(xpc_part, "can't get memory for partition structure\n");
964 return -ENOMEM;
965 }
966
967 /*
968 * The first few fields of each entry of xpc_partitions[] need to
969 * be initialized now so that calls to xpc_connect() and
970 * xpc_disconnect() can be made prior to the activation of any remote
971 * partition. NOTE THAT NONE OF THE OTHER FIELDS BELONGING TO THESE
972 * ENTRIES ARE MEANINGFUL UNTIL AFTER AN ENTRY'S CORRESPONDING
973 * PARTITION HAS BEEN ACTIVATED.
974 */
975 for (partid = 0; partid < xp_max_npartitions; partid++) {
976 part = &xpc_partitions[partid];
977
978 DBUG_ON((u64)part != L1_CACHE_ALIGN((u64)part));
979
980 part->activate_IRQ_rcvd = 0;
981 spin_lock_init(&part->act_lock);
982 part->act_state = XPC_P_AS_INACTIVE;
983 XPC_SET_REASON(part, 0, 0);
984
985 init_timer(&part->disengage_timer);
986 part->disengage_timer.function =
987 xpc_timeout_partition_disengage;
988 part->disengage_timer.data = (unsigned long)part;
989
990 part->setup_state = XPC_P_SS_UNSET;
991 init_waitqueue_head(&part->teardown_wq);
992 atomic_set(&part->references, 0);
993 }
994
995 return xpc_setup_partitions_sn();
996}
997
998static void
999xpc_teardown_partitions(void)
1000{
1001 kfree(xpc_partitions);
1002}
1003
859static void 1004static void
860xpc_do_exit(enum xp_retval reason) 1005xpc_do_exit(enum xp_retval reason)
861{ 1006{
862 short partid; 1007 short partid;
863 int active_part_count, printed_waiting_msg = 0; 1008 int active_part_count, printed_waiting_msg = 0;
864 struct xpc_partition *part; 1009 struct xpc_partition *part;
865 unsigned long printmsg_time, disengage_request_timeout = 0; 1010 unsigned long printmsg_time, disengage_timeout = 0;
866 1011
867 /* a 'rmmod XPC' and a 'reboot' cannot both end up here together */ 1012 /* a 'rmmod XPC' and a 'reboot' cannot both end up here together */
868 DBUG_ON(xpc_exiting == 1); 1013 DBUG_ON(xpc_exiting == 1);
@@ -873,10 +1018,7 @@ xpc_do_exit(enum xp_retval reason)
873 * the heartbeat checker thread in case it's sleeping. 1018 * the heartbeat checker thread in case it's sleeping.
874 */ 1019 */
875 xpc_exiting = 1; 1020 xpc_exiting = 1;
876 wake_up_interruptible(&xpc_act_IRQ_wq); 1021 wake_up_interruptible(&xpc_activate_IRQ_wq);
877
878 /* ignore all incoming interrupts */
879 free_irq(SGI_XPC_ACTIVATE, NULL);
880 1022
881 /* wait for the discovery thread to exit */ 1023 /* wait for the discovery thread to exit */
882 wait_for_completion(&xpc_discovery_exited); 1024 wait_for_completion(&xpc_discovery_exited);
@@ -889,17 +1031,17 @@ xpc_do_exit(enum xp_retval reason)
889 1031
890 /* wait for all partitions to become inactive */ 1032 /* wait for all partitions to become inactive */
891 1033
892 printmsg_time = jiffies + (XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ); 1034 printmsg_time = jiffies + (XPC_DEACTIVATE_PRINTMSG_INTERVAL * HZ);
893 xpc_disengage_request_timedout = 0; 1035 xpc_disengage_timedout = 0;
894 1036
895 do { 1037 do {
896 active_part_count = 0; 1038 active_part_count = 0;
897 1039
898 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { 1040 for (partid = 0; partid < xp_max_npartitions; partid++) {
899 part = &xpc_partitions[partid]; 1041 part = &xpc_partitions[partid];
900 1042
901 if (xpc_partition_disengaged(part) && 1043 if (xpc_partition_disengaged(part) &&
902 part->act_state == XPC_P_INACTIVE) { 1044 part->act_state == XPC_P_AS_INACTIVE) {
903 continue; 1045 continue;
904 } 1046 }
905 1047
@@ -907,36 +1049,32 @@ xpc_do_exit(enum xp_retval reason)
907 1049
908 XPC_DEACTIVATE_PARTITION(part, reason); 1050 XPC_DEACTIVATE_PARTITION(part, reason);
909 1051
910 if (part->disengage_request_timeout > 1052 if (part->disengage_timeout > disengage_timeout)
911 disengage_request_timeout) { 1053 disengage_timeout = part->disengage_timeout;
912 disengage_request_timeout =
913 part->disengage_request_timeout;
914 }
915 } 1054 }
916 1055
917 if (xpc_partition_engaged(-1UL)) { 1056 if (xpc_any_partition_engaged()) {
918 if (time_after(jiffies, printmsg_time)) { 1057 if (time_is_before_jiffies(printmsg_time)) {
919 dev_info(xpc_part, "waiting for remote " 1058 dev_info(xpc_part, "waiting for remote "
920 "partitions to disengage, timeout in " 1059 "partitions to deactivate, timeout in "
921 "%ld seconds\n", 1060 "%ld seconds\n", (disengage_timeout -
922 (disengage_request_timeout - jiffies) 1061 jiffies) / HZ);
923 / HZ);
924 printmsg_time = jiffies + 1062 printmsg_time = jiffies +
925 (XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ); 1063 (XPC_DEACTIVATE_PRINTMSG_INTERVAL * HZ);
926 printed_waiting_msg = 1; 1064 printed_waiting_msg = 1;
927 } 1065 }
928 1066
929 } else if (active_part_count > 0) { 1067 } else if (active_part_count > 0) {
930 if (printed_waiting_msg) { 1068 if (printed_waiting_msg) {
931 dev_info(xpc_part, "waiting for local partition" 1069 dev_info(xpc_part, "waiting for local partition"
932 " to disengage\n"); 1070 " to deactivate\n");
933 printed_waiting_msg = 0; 1071 printed_waiting_msg = 0;
934 } 1072 }
935 1073
936 } else { 1074 } else {
937 if (!xpc_disengage_request_timedout) { 1075 if (!xpc_disengage_timedout) {
938 dev_info(xpc_part, "all partitions have " 1076 dev_info(xpc_part, "all partitions have "
939 "disengaged\n"); 1077 "deactivated\n");
940 } 1078 }
941 break; 1079 break;
942 } 1080 }
@@ -946,33 +1084,28 @@ xpc_do_exit(enum xp_retval reason)
946 1084
947 } while (1); 1085 } while (1);
948 1086
949 DBUG_ON(xpc_partition_engaged(-1UL)); 1087 DBUG_ON(xpc_any_partition_engaged());
1088 DBUG_ON(xpc_any_hbs_allowed() != 0);
950 1089
951 /* indicate to others that our reserved page is uninitialized */ 1090 xpc_teardown_rsvd_page();
952 xpc_rsvd_page->vars_pa = 0;
953
954 /* now it's time to eliminate our heartbeat */
955 del_timer_sync(&xpc_hb_timer);
956 DBUG_ON(xpc_vars->heartbeating_to_mask != 0);
957 1091
958 if (reason == xpUnloading) { 1092 if (reason == xpUnloading) {
959 /* take ourselves off of the reboot_notifier_list */
960 (void)unregister_reboot_notifier(&xpc_reboot_notifier);
961
962 /* take ourselves off of the die_notifier list */
963 (void)unregister_die_notifier(&xpc_die_notifier); 1093 (void)unregister_die_notifier(&xpc_die_notifier);
1094 (void)unregister_reboot_notifier(&xpc_reboot_notifier);
964 } 1095 }
965 1096
966 /* close down protections for IPI operations */
967 xpc_restrict_IPI_ops();
968
969 /* clear the interface to XPC's functions */ 1097 /* clear the interface to XPC's functions */
970 xpc_clear_interface(); 1098 xpc_clear_interface();
971 1099
972 if (xpc_sysctl) 1100 if (xpc_sysctl)
973 unregister_sysctl_table(xpc_sysctl); 1101 unregister_sysctl_table(xpc_sysctl);
974 1102
975 kfree(xpc_remote_copy_buffer_base); 1103 xpc_teardown_partitions();
1104
1105 if (is_shub())
1106 xpc_exit_sn2();
1107 else
1108 xpc_exit_uv();
976} 1109}
977 1110
978/* 1111/*
@@ -1002,60 +1135,57 @@ xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused)
1002} 1135}
1003 1136
1004/* 1137/*
1005 * Notify other partitions to disengage from all references to our memory. 1138 * Notify other partitions to deactivate from us by first disengaging from all
1139 * references to our memory.
1006 */ 1140 */
1007static void 1141static void
1008xpc_die_disengage(void) 1142xpc_die_deactivate(void)
1009{ 1143{
1010 struct xpc_partition *part; 1144 struct xpc_partition *part;
1011 short partid; 1145 short partid;
1012 unsigned long engaged; 1146 int any_engaged;
1013 long time, printmsg_time, disengage_request_timeout; 1147 long keep_waiting;
1148 long wait_to_print;
1014 1149
1015 /* keep xpc_hb_checker thread from doing anything (just in case) */ 1150 /* keep xpc_hb_checker thread from doing anything (just in case) */
1016 xpc_exiting = 1; 1151 xpc_exiting = 1;
1017 1152
1018 xpc_vars->heartbeating_to_mask = 0; /* indicate we're deactivated */ 1153 xpc_disallow_all_hbs(); /*indicate we're deactivated */
1019 1154
1020 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { 1155 for (partid = 0; partid < xp_max_npartitions; partid++) {
1021 part = &xpc_partitions[partid]; 1156 part = &xpc_partitions[partid];
1022 1157
1023 if (!XPC_SUPPORTS_DISENGAGE_REQUEST(part-> 1158 if (xpc_partition_engaged(partid) ||
1024 remote_vars_version)) { 1159 part->act_state != XPC_P_AS_INACTIVE) {
1025 1160 xpc_request_partition_deactivation(part);
1026 /* just in case it was left set by an earlier XPC */ 1161 xpc_indicate_partition_disengaged(part);
1027 xpc_clear_partition_engaged(1UL << partid);
1028 continue;
1029 }
1030
1031 if (xpc_partition_engaged(1UL << partid) ||
1032 part->act_state != XPC_P_INACTIVE) {
1033 xpc_request_partition_disengage(part);
1034 xpc_mark_partition_disengaged(part);
1035 xpc_IPI_send_disengage(part);
1036 } 1162 }
1037 } 1163 }
1038 1164
1039 time = rtc_time(); 1165 /*
1040 printmsg_time = time + 1166 * Though we requested that all other partitions deactivate from us,
1041 (XPC_DISENGAGE_PRINTMSG_INTERVAL * sn_rtc_cycles_per_second); 1167 * we only wait until they've all disengaged or we've reached the
1042 disengage_request_timeout = time + 1168 * defined timelimit.
1043 (xpc_disengage_request_timelimit * sn_rtc_cycles_per_second); 1169 *
1044 1170 * Given that one iteration through the following while-loop takes
1045 /* wait for all other partitions to disengage from us */ 1171 * approximately 200 microseconds, calculate the #of loops to take
1172 * before bailing and the #of loops before printing a waiting message.
1173 */
1174 keep_waiting = xpc_disengage_timelimit * 1000 * 5;
1175 wait_to_print = XPC_DEACTIVATE_PRINTMSG_INTERVAL * 1000 * 5;
1046 1176
1047 while (1) { 1177 while (1) {
1048 engaged = xpc_partition_engaged(-1UL); 1178 any_engaged = xpc_any_partition_engaged();
1049 if (!engaged) { 1179 if (!any_engaged) {
1050 dev_info(xpc_part, "all partitions have disengaged\n"); 1180 dev_info(xpc_part, "all partitions have deactivated\n");
1051 break; 1181 break;
1052 } 1182 }
1053 1183
1054 time = rtc_time(); 1184 if (!keep_waiting--) {
1055 if (time >= disengage_request_timeout) { 1185 for (partid = 0; partid < xp_max_npartitions;
1056 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { 1186 partid++) {
1057 if (engaged & (1UL << partid)) { 1187 if (xpc_partition_engaged(partid)) {
1058 dev_info(xpc_part, "disengage from " 1188 dev_info(xpc_part, "deactivate from "
1059 "remote partition %d timed " 1189 "remote partition %d timed "
1060 "out\n", partid); 1190 "out\n", partid);
1061 } 1191 }
@@ -1063,15 +1193,15 @@ xpc_die_disengage(void)
1063 break; 1193 break;
1064 } 1194 }
1065 1195
1066 if (time >= printmsg_time) { 1196 if (!wait_to_print--) {
1067 dev_info(xpc_part, "waiting for remote partitions to " 1197 dev_info(xpc_part, "waiting for remote partitions to "
1068 "disengage, timeout in %ld seconds\n", 1198 "deactivate, timeout in %ld seconds\n",
1069 (disengage_request_timeout - time) / 1199 keep_waiting / (1000 * 5));
1070 sn_rtc_cycles_per_second); 1200 wait_to_print = XPC_DEACTIVATE_PRINTMSG_INTERVAL *
1071 printmsg_time = time + 1201 1000 * 5;
1072 (XPC_DISENGAGE_PRINTMSG_INTERVAL *
1073 sn_rtc_cycles_per_second);
1074 } 1202 }
1203
1204 udelay(200);
1075 } 1205 }
1076} 1206}
1077 1207
@@ -1086,10 +1216,11 @@ xpc_die_disengage(void)
1086static int 1216static int
1087xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused) 1217xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
1088{ 1218{
1219#ifdef CONFIG_IA64 /* !!! temporary kludge */
1089 switch (event) { 1220 switch (event) {
1090 case DIE_MACHINE_RESTART: 1221 case DIE_MACHINE_RESTART:
1091 case DIE_MACHINE_HALT: 1222 case DIE_MACHINE_HALT:
1092 xpc_die_disengage(); 1223 xpc_die_deactivate();
1093 break; 1224 break;
1094 1225
1095 case DIE_KDEBUG_ENTER: 1226 case DIE_KDEBUG_ENTER:
@@ -1100,8 +1231,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
1100 /* fall through */ 1231 /* fall through */
1101 case DIE_MCA_MONARCH_ENTER: 1232 case DIE_MCA_MONARCH_ENTER:
1102 case DIE_INIT_MONARCH_ENTER: 1233 case DIE_INIT_MONARCH_ENTER:
1103 xpc_vars->heartbeat++; 1234 xpc_offline_heartbeat();
1104 xpc_vars->heartbeat_offline = 1;
1105 break; 1235 break;
1106 1236
1107 case DIE_KDEBUG_LEAVE: 1237 case DIE_KDEBUG_LEAVE:
@@ -1112,10 +1242,12 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
1112 /* fall through */ 1242 /* fall through */
1113 case DIE_MCA_MONARCH_LEAVE: 1243 case DIE_MCA_MONARCH_LEAVE:
1114 case DIE_INIT_MONARCH_LEAVE: 1244 case DIE_INIT_MONARCH_LEAVE:
1115 xpc_vars->heartbeat++; 1245 xpc_online_heartbeat();
1116 xpc_vars->heartbeat_offline = 0;
1117 break; 1246 break;
1118 } 1247 }
1248#else
1249 xpc_die_deactivate();
1250#endif
1119 1251
1120 return NOTIFY_DONE; 1252 return NOTIFY_DONE;
1121} 1253}
@@ -1124,105 +1256,52 @@ int __init
1124xpc_init(void) 1256xpc_init(void)
1125{ 1257{
1126 int ret; 1258 int ret;
1127 short partid;
1128 struct xpc_partition *part;
1129 struct task_struct *kthread; 1259 struct task_struct *kthread;
1130 size_t buf_size;
1131
1132 if (!ia64_platform_is("sn2"))
1133 return -ENODEV;
1134
1135 buf_size = max(XPC_RP_VARS_SIZE,
1136 XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES);
1137 xpc_remote_copy_buffer = xpc_kmalloc_cacheline_aligned(buf_size,
1138 GFP_KERNEL,
1139 &xpc_remote_copy_buffer_base);
1140 if (xpc_remote_copy_buffer == NULL)
1141 return -ENOMEM;
1142 1260
1143 snprintf(xpc_part->bus_id, BUS_ID_SIZE, "part"); 1261 snprintf(xpc_part->bus_id, BUS_ID_SIZE, "part");
1144 snprintf(xpc_chan->bus_id, BUS_ID_SIZE, "chan"); 1262 snprintf(xpc_chan->bus_id, BUS_ID_SIZE, "chan");
1145 1263
1146 xpc_sysctl = register_sysctl_table(xpc_sys_dir); 1264 if (is_shub()) {
1147 1265 /*
1148 /* 1266 * The ia64-sn2 architecture supports at most 64 partitions.
1149 * The first few fields of each entry of xpc_partitions[] need to 1267 * And the inability to unregister remote amos restricts us
1150 * be initialized now so that calls to xpc_connect() and 1268 * further to only support exactly 64 partitions on this
1151 * xpc_disconnect() can be made prior to the activation of any remote 1269 * architecture, no less.
1152 * partition. NOTE THAT NONE OF THE OTHER FIELDS BELONGING TO THESE 1270 */
1153 * ENTRIES ARE MEANINGFUL UNTIL AFTER AN ENTRY'S CORRESPONDING 1271 if (xp_max_npartitions != 64) {
1154 * PARTITION HAS BEEN ACTIVATED. 1272 dev_err(xpc_part, "max #of partitions not set to 64\n");
1155 */ 1273 ret = -EINVAL;
1156 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { 1274 } else {
1157 part = &xpc_partitions[partid]; 1275 ret = xpc_init_sn2();
1158 1276 }
1159 DBUG_ON((u64)part != L1_CACHE_ALIGN((u64)part));
1160
1161 part->act_IRQ_rcvd = 0;
1162 spin_lock_init(&part->act_lock);
1163 part->act_state = XPC_P_INACTIVE;
1164 XPC_SET_REASON(part, 0, 0);
1165 1277
1166 init_timer(&part->disengage_request_timer); 1278 } else if (is_uv()) {
1167 part->disengage_request_timer.function = 1279 ret = xpc_init_uv();
1168 xpc_timeout_partition_disengage_request;
1169 part->disengage_request_timer.data = (unsigned long)part;
1170 1280
1171 part->setup_state = XPC_P_UNSET; 1281 } else {
1172 init_waitqueue_head(&part->teardown_wq); 1282 ret = -ENODEV;
1173 atomic_set(&part->references, 0);
1174 } 1283 }
1175 1284
1176 /* 1285 if (ret != 0)
1177 * Open up protections for IPI operations (and AMO operations on 1286 return ret;
1178 * Shub 1.1 systems).
1179 */
1180 xpc_allow_IPI_ops();
1181
1182 /*
1183 * Interrupts being processed will increment this atomic variable and
1184 * awaken the heartbeat thread which will process the interrupts.
1185 */
1186 atomic_set(&xpc_act_IRQ_rcvd, 0);
1187 1287
1188 /* 1288 ret = xpc_setup_partitions();
1189 * This is safe to do before the xpc_hb_checker thread has started
1190 * because the handler releases a wait queue. If an interrupt is
1191 * received before the thread is waiting, it will not go to sleep,
1192 * but rather immediately process the interrupt.
1193 */
1194 ret = request_irq(SGI_XPC_ACTIVATE, xpc_act_IRQ_handler, 0,
1195 "xpc hb", NULL);
1196 if (ret != 0) { 1289 if (ret != 0) {
1197 dev_err(xpc_part, "can't register ACTIVATE IRQ handler, " 1290 dev_err(xpc_part, "can't get memory for partition structure\n");
1198 "errno=%d\n", -ret); 1291 goto out_1;
1199
1200 xpc_restrict_IPI_ops();
1201
1202 if (xpc_sysctl)
1203 unregister_sysctl_table(xpc_sysctl);
1204
1205 kfree(xpc_remote_copy_buffer_base);
1206 return -EBUSY;
1207 } 1292 }
1208 1293
1294 xpc_sysctl = register_sysctl_table(xpc_sys_dir);
1295
1209 /* 1296 /*
1210 * Fill the partition reserved page with the information needed by 1297 * Fill the partition reserved page with the information needed by
1211 * other partitions to discover we are alive and establish initial 1298 * other partitions to discover we are alive and establish initial
1212 * communications. 1299 * communications.
1213 */ 1300 */
1214 xpc_rsvd_page = xpc_rsvd_page_init(); 1301 ret = xpc_setup_rsvd_page();
1215 if (xpc_rsvd_page == NULL) { 1302 if (ret != 0) {
1216 dev_err(xpc_part, "could not setup our reserved page\n"); 1303 dev_err(xpc_part, "can't setup our reserved page\n");
1217 1304 goto out_2;
1218 free_irq(SGI_XPC_ACTIVATE, NULL);
1219 xpc_restrict_IPI_ops();
1220
1221 if (xpc_sysctl)
1222 unregister_sysctl_table(xpc_sysctl);
1223
1224 kfree(xpc_remote_copy_buffer_base);
1225 return -EBUSY;
1226 } 1305 }
1227 1306
1228 /* add ourselves to the reboot_notifier_list */ 1307 /* add ourselves to the reboot_notifier_list */
@@ -1235,9 +1314,6 @@ xpc_init(void)
1235 if (ret != 0) 1314 if (ret != 0)
1236 dev_warn(xpc_part, "can't register die notifier\n"); 1315 dev_warn(xpc_part, "can't register die notifier\n");
1237 1316
1238 init_timer(&xpc_hb_timer);
1239 xpc_hb_timer.function = xpc_hb_beater;
1240
1241 /* 1317 /*
1242 * The real work-horse behind xpc. This processes incoming 1318 * The real work-horse behind xpc. This processes incoming
1243 * interrupts and monitors remote heartbeats. 1319 * interrupts and monitors remote heartbeats.
@@ -1245,25 +1321,8 @@ xpc_init(void)
1245 kthread = kthread_run(xpc_hb_checker, NULL, XPC_HB_CHECK_THREAD_NAME); 1321 kthread = kthread_run(xpc_hb_checker, NULL, XPC_HB_CHECK_THREAD_NAME);
1246 if (IS_ERR(kthread)) { 1322 if (IS_ERR(kthread)) {
1247 dev_err(xpc_part, "failed while forking hb check thread\n"); 1323 dev_err(xpc_part, "failed while forking hb check thread\n");
1248 1324 ret = -EBUSY;
1249 /* indicate to others that our reserved page is uninitialized */ 1325 goto out_3;
1250 xpc_rsvd_page->vars_pa = 0;
1251
1252 /* take ourselves off of the reboot_notifier_list */
1253 (void)unregister_reboot_notifier(&xpc_reboot_notifier);
1254
1255 /* take ourselves off of the die_notifier list */
1256 (void)unregister_die_notifier(&xpc_die_notifier);
1257
1258 del_timer_sync(&xpc_hb_timer);
1259 free_irq(SGI_XPC_ACTIVATE, NULL);
1260 xpc_restrict_IPI_ops();
1261
1262 if (xpc_sysctl)
1263 unregister_sysctl_table(xpc_sysctl);
1264
1265 kfree(xpc_remote_copy_buffer_base);
1266 return -EBUSY;
1267 } 1326 }
1268 1327
1269 /* 1328 /*
@@ -1285,11 +1344,28 @@ xpc_init(void)
1285 1344
1286 /* set the interface to point at XPC's functions */ 1345 /* set the interface to point at XPC's functions */
1287 xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect, 1346 xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect,
1288 xpc_initiate_allocate, xpc_initiate_send, 1347 xpc_initiate_send, xpc_initiate_send_notify,
1289 xpc_initiate_send_notify, xpc_initiate_received, 1348 xpc_initiate_received, xpc_initiate_partid_to_nasids);
1290 xpc_initiate_partid_to_nasids);
1291 1349
1292 return 0; 1350 return 0;
1351
1352 /* initialization was not successful */
1353out_3:
1354 xpc_teardown_rsvd_page();
1355
1356 (void)unregister_die_notifier(&xpc_die_notifier);
1357 (void)unregister_reboot_notifier(&xpc_reboot_notifier);
1358out_2:
1359 if (xpc_sysctl)
1360 unregister_sysctl_table(xpc_sysctl);
1361
1362 xpc_teardown_partitions();
1363out_1:
1364 if (is_shub())
1365 xpc_exit_sn2();
1366 else
1367 xpc_exit_uv();
1368 return ret;
1293} 1369}
1294 1370
1295module_init(xpc_init); 1371module_init(xpc_init);
@@ -1314,9 +1390,9 @@ module_param(xpc_hb_check_interval, int, 0);
1314MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between " 1390MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between "
1315 "heartbeat checks."); 1391 "heartbeat checks.");
1316 1392
1317module_param(xpc_disengage_request_timelimit, int, 0); 1393module_param(xpc_disengage_timelimit, int, 0);
1318MODULE_PARM_DESC(xpc_disengage_request_timelimit, "Number of seconds to wait " 1394MODULE_PARM_DESC(xpc_disengage_timelimit, "Number of seconds to wait "
1319 "for disengage request to complete."); 1395 "for disengage to complete.");
1320 1396
1321module_param(xpc_kdebug_ignore, int, 0); 1397module_param(xpc_kdebug_ignore, int, 0);
1322MODULE_PARM_DESC(xpc_kdebug_ignore, "Should lack of heartbeat be ignored by " 1398MODULE_PARM_DESC(xpc_kdebug_ignore, "Should lack of heartbeat be ignored by "
diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c
index 7dd4b5812c42..6722f6fe4dc7 100644
--- a/drivers/misc/sgi-xp/xpc_partition.c
+++ b/drivers/misc/sgi-xp/xpc_partition.c
@@ -15,57 +15,22 @@
15 * 15 *
16 */ 16 */
17 17
18#include <linux/kernel.h> 18#include <linux/device.h>
19#include <linux/sysctl.h> 19#include <linux/hardirq.h>
20#include <linux/cache.h>
21#include <linux/mmzone.h>
22#include <linux/nodemask.h>
23#include <asm/uncached.h>
24#include <asm/sn/bte.h>
25#include <asm/sn/intr.h>
26#include <asm/sn/sn_sal.h>
27#include <asm/sn/nodepda.h>
28#include <asm/sn/addrs.h>
29#include "xpc.h" 20#include "xpc.h"
30 21
31/* XPC is exiting flag */ 22/* XPC is exiting flag */
32int xpc_exiting; 23int xpc_exiting;
33 24
34/* SH_IPI_ACCESS shub register value on startup */
35static u64 xpc_sh1_IPI_access;
36static u64 xpc_sh2_IPI_access0;
37static u64 xpc_sh2_IPI_access1;
38static u64 xpc_sh2_IPI_access2;
39static u64 xpc_sh2_IPI_access3;
40
41/* original protection values for each node */
42u64 xpc_prot_vec[MAX_NUMNODES];
43
44/* this partition's reserved page pointers */ 25/* this partition's reserved page pointers */
45struct xpc_rsvd_page *xpc_rsvd_page; 26struct xpc_rsvd_page *xpc_rsvd_page;
46static u64 *xpc_part_nasids; 27static unsigned long *xpc_part_nasids;
47static u64 *xpc_mach_nasids; 28unsigned long *xpc_mach_nasids;
48struct xpc_vars *xpc_vars;
49struct xpc_vars_part *xpc_vars_part;
50 29
51static int xp_nasid_mask_bytes; /* actual size in bytes of nasid mask */ 30static int xpc_nasid_mask_nbytes; /* #of bytes in nasid mask */
52static int xp_nasid_mask_words; /* actual size in words of nasid mask */ 31int xpc_nasid_mask_nlongs; /* #of longs in nasid mask */
53
54/*
55 * For performance reasons, each entry of xpc_partitions[] is cacheline
56 * aligned. And xpc_partitions[] is padded with an additional entry at the
57 * end so that the last legitimate entry doesn't share its cacheline with
58 * another variable.
59 */
60struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1];
61 32
62/* 33struct xpc_partition *xpc_partitions;
63 * Generic buffer used to store a local copy of portions of a remote
64 * partition's reserved page (either its header and part_nasids mask,
65 * or its vars).
66 */
67char *xpc_remote_copy_buffer;
68void *xpc_remote_copy_buffer_base;
69 34
70/* 35/*
71 * Guarantee that the kmalloc'd memory is cacheline aligned. 36 * Guarantee that the kmalloc'd memory is cacheline aligned.
@@ -95,56 +60,59 @@ xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
95 * Given a nasid, get the physical address of the partition's reserved page 60 * Given a nasid, get the physical address of the partition's reserved page
96 * for that nasid. This function returns 0 on any error. 61 * for that nasid. This function returns 0 on any error.
97 */ 62 */
98static u64 63static unsigned long
99xpc_get_rsvd_page_pa(int nasid) 64xpc_get_rsvd_page_pa(int nasid)
100{ 65{
101 bte_result_t bte_res; 66 enum xp_retval ret;
102 s64 status;
103 u64 cookie = 0; 67 u64 cookie = 0;
104 u64 rp_pa = nasid; /* seed with nasid */ 68 unsigned long rp_pa = nasid; /* seed with nasid */
105 u64 len = 0; 69 size_t len = 0;
106 u64 buf = buf; 70 size_t buf_len = 0;
107 u64 buf_len = 0; 71 void *buf = buf;
108 void *buf_base = NULL; 72 void *buf_base = NULL;
109 73
110 while (1) { 74 while (1) {
111 75
112 status = sn_partition_reserved_page_pa(buf, &cookie, &rp_pa, 76 /* !!! rp_pa will need to be _gpa on UV.
113 &len); 77 * ??? So do we save it into the architecture specific parts
78 * ??? of the xpc_partition structure? Do we rename this
79 * ??? function or have two versions? Rename rp_pa for UV to
80 * ??? rp_gpa?
81 */
82 ret = xpc_get_partition_rsvd_page_pa(buf, &cookie, &rp_pa,
83 &len);
114 84
115 dev_dbg(xpc_part, "SAL returned with status=%li, cookie=" 85 dev_dbg(xpc_part, "SAL returned with ret=%d, cookie=0x%016lx, "
116 "0x%016lx, address=0x%016lx, len=0x%016lx\n", 86 "address=0x%016lx, len=0x%016lx\n", ret,
117 status, cookie, rp_pa, len); 87 (unsigned long)cookie, rp_pa, len);
118 88
119 if (status != SALRET_MORE_PASSES) 89 if (ret != xpNeedMoreInfo)
120 break; 90 break;
121 91
92 /* !!! L1_CACHE_ALIGN() is only a sn2-bte_copy requirement */
122 if (L1_CACHE_ALIGN(len) > buf_len) { 93 if (L1_CACHE_ALIGN(len) > buf_len) {
123 kfree(buf_base); 94 kfree(buf_base);
124 buf_len = L1_CACHE_ALIGN(len); 95 buf_len = L1_CACHE_ALIGN(len);
125 buf = (u64)xpc_kmalloc_cacheline_aligned(buf_len, 96 buf = xpc_kmalloc_cacheline_aligned(buf_len, GFP_KERNEL,
126 GFP_KERNEL, 97 &buf_base);
127 &buf_base);
128 if (buf_base == NULL) { 98 if (buf_base == NULL) {
129 dev_err(xpc_part, "unable to kmalloc " 99 dev_err(xpc_part, "unable to kmalloc "
130 "len=0x%016lx\n", buf_len); 100 "len=0x%016lx\n", buf_len);
131 status = SALRET_ERROR; 101 ret = xpNoMemory;
132 break; 102 break;
133 } 103 }
134 } 104 }
135 105
136 bte_res = xp_bte_copy(rp_pa, buf, buf_len, 106 ret = xp_remote_memcpy(xp_pa(buf), rp_pa, buf_len);
137 (BTE_NOTIFY | BTE_WACQUIRE), NULL); 107 if (ret != xpSuccess) {
138 if (bte_res != BTE_SUCCESS) { 108 dev_dbg(xpc_part, "xp_remote_memcpy failed %d\n", ret);
139 dev_dbg(xpc_part, "xp_bte_copy failed %i\n", bte_res);
140 status = SALRET_ERROR;
141 break; 109 break;
142 } 110 }
143 } 111 }
144 112
145 kfree(buf_base); 113 kfree(buf_base);
146 114
147 if (status != SALRET_OK) 115 if (ret != xpSuccess)
148 rp_pa = 0; 116 rp_pa = 0;
149 117
150 dev_dbg(xpc_part, "reserved page at phys address 0x%016lx\n", rp_pa); 118 dev_dbg(xpc_part, "reserved page at phys address 0x%016lx\n", rp_pa);
@@ -156,300 +124,77 @@ xpc_get_rsvd_page_pa(int nasid)
156 * other partitions to discover we are alive and establish initial 124 * other partitions to discover we are alive and establish initial
157 * communications. 125 * communications.
158 */ 126 */
159struct xpc_rsvd_page * 127int
160xpc_rsvd_page_init(void) 128xpc_setup_rsvd_page(void)
161{ 129{
130 int ret;
162 struct xpc_rsvd_page *rp; 131 struct xpc_rsvd_page *rp;
163 AMO_t *amos_page; 132 unsigned long rp_pa;
164 u64 rp_pa, nasid_array = 0; 133 unsigned long new_ts_jiffies;
165 int i, ret;
166 134
167 /* get the local reserved page's address */ 135 /* get the local reserved page's address */
168 136
169 preempt_disable(); 137 preempt_disable();
170 rp_pa = xpc_get_rsvd_page_pa(cpuid_to_nasid(smp_processor_id())); 138 rp_pa = xpc_get_rsvd_page_pa(xp_cpu_to_nasid(smp_processor_id()));
171 preempt_enable(); 139 preempt_enable();
172 if (rp_pa == 0) { 140 if (rp_pa == 0) {
173 dev_err(xpc_part, "SAL failed to locate the reserved page\n"); 141 dev_err(xpc_part, "SAL failed to locate the reserved page\n");
174 return NULL; 142 return -ESRCH;
175 } 143 }
176 rp = (struct xpc_rsvd_page *)__va(rp_pa); 144 rp = (struct xpc_rsvd_page *)__va(rp_pa);
177 145
178 if (rp->partid != sn_partition_id) { 146 if (rp->SAL_version < 3) {
179 dev_err(xpc_part, "the reserved page's partid of %d should be " 147 /* SAL_versions < 3 had a SAL_partid defined as a u8 */
180 "%d\n", rp->partid, sn_partition_id); 148 rp->SAL_partid &= 0xff;
181 return NULL; 149 }
150 BUG_ON(rp->SAL_partid != xp_partition_id);
151
152 if (rp->SAL_partid < 0 || rp->SAL_partid >= xp_max_npartitions) {
153 dev_err(xpc_part, "the reserved page's partid of %d is outside "
154 "supported range (< 0 || >= %d)\n", rp->SAL_partid,
155 xp_max_npartitions);
156 return -EINVAL;
182 } 157 }
183 158
184 rp->version = XPC_RP_VERSION; 159 rp->version = XPC_RP_VERSION;
160 rp->max_npartitions = xp_max_npartitions;
185 161
186 /* establish the actual sizes of the nasid masks */ 162 /* establish the actual sizes of the nasid masks */
187 if (rp->SAL_version == 1) { 163 if (rp->SAL_version == 1) {
188 /* SAL_version 1 didn't set the nasids_size field */ 164 /* SAL_version 1 didn't set the nasids_size field */
189 rp->nasids_size = 128; 165 rp->SAL_nasids_size = 128;
190 } 166 }
191 xp_nasid_mask_bytes = rp->nasids_size; 167 xpc_nasid_mask_nbytes = rp->SAL_nasids_size;
192 xp_nasid_mask_words = xp_nasid_mask_bytes / 8; 168 xpc_nasid_mask_nlongs = BITS_TO_LONGS(rp->SAL_nasids_size *
169 BITS_PER_BYTE);
193 170
194 /* setup the pointers to the various items in the reserved page */ 171 /* setup the pointers to the various items in the reserved page */
195 xpc_part_nasids = XPC_RP_PART_NASIDS(rp); 172 xpc_part_nasids = XPC_RP_PART_NASIDS(rp);
196 xpc_mach_nasids = XPC_RP_MACH_NASIDS(rp); 173 xpc_mach_nasids = XPC_RP_MACH_NASIDS(rp);
197 xpc_vars = XPC_RP_VARS(rp);
198 xpc_vars_part = XPC_RP_VARS_PART(rp);
199
200 /*
201 * Before clearing xpc_vars, see if a page of AMOs had been previously
202 * allocated. If not we'll need to allocate one and set permissions
203 * so that cross-partition AMOs are allowed.
204 *
205 * The allocated AMO page needs MCA reporting to remain disabled after
206 * XPC has unloaded. To make this work, we keep a copy of the pointer
207 * to this page (i.e., amos_page) in the struct xpc_vars structure,
208 * which is pointed to by the reserved page, and re-use that saved copy
209 * on subsequent loads of XPC. This AMO page is never freed, and its
210 * memory protections are never restricted.
211 */
212 amos_page = xpc_vars->amos_page;
213 if (amos_page == NULL) {
214 amos_page = (AMO_t *)TO_AMO(uncached_alloc_page(0, 1));
215 if (amos_page == NULL) {
216 dev_err(xpc_part, "can't allocate page of AMOs\n");
217 return NULL;
218 }
219
220 /*
221 * Open up AMO-R/W to cpu. This is done for Shub 1.1 systems
222 * when xpc_allow_IPI_ops() is called via xpc_hb_init().
223 */
224 if (!enable_shub_wars_1_1()) {
225 ret = sn_change_memprotect(ia64_tpa((u64)amos_page),
226 PAGE_SIZE,
227 SN_MEMPROT_ACCESS_CLASS_1,
228 &nasid_array);
229 if (ret != 0) {
230 dev_err(xpc_part, "can't change memory "
231 "protections\n");
232 uncached_free_page(__IA64_UNCACHED_OFFSET |
233 TO_PHYS((u64)amos_page), 1);
234 return NULL;
235 }
236 }
237 } else if (!IS_AMO_ADDRESS((u64)amos_page)) {
238 /*
239 * EFI's XPBOOT can also set amos_page in the reserved page,
240 * but it happens to leave it as an uncached physical address
241 * and we need it to be an uncached virtual, so we'll have to
242 * convert it.
243 */
244 if (!IS_AMO_PHYS_ADDRESS((u64)amos_page)) {
245 dev_err(xpc_part, "previously used amos_page address "
246 "is bad = 0x%p\n", (void *)amos_page);
247 return NULL;
248 }
249 amos_page = (AMO_t *)TO_AMO((u64)amos_page);
250 }
251
252 /* clear xpc_vars */
253 memset(xpc_vars, 0, sizeof(struct xpc_vars));
254
255 xpc_vars->version = XPC_V_VERSION;
256 xpc_vars->act_nasid = cpuid_to_nasid(0);
257 xpc_vars->act_phys_cpuid = cpu_physical_id(0);
258 xpc_vars->vars_part_pa = __pa(xpc_vars_part);
259 xpc_vars->amos_page_pa = ia64_tpa((u64)amos_page);
260 xpc_vars->amos_page = amos_page; /* save for next load of XPC */
261
262 /* clear xpc_vars_part */
263 memset((u64 *)xpc_vars_part, 0, sizeof(struct xpc_vars_part) *
264 XP_MAX_PARTITIONS);
265
266 /* initialize the activate IRQ related AMO variables */
267 for (i = 0; i < xp_nasid_mask_words; i++)
268 (void)xpc_IPI_init(XPC_ACTIVATE_IRQ_AMOS + i);
269
270 /* initialize the engaged remote partitions related AMO variables */
271 (void)xpc_IPI_init(XPC_ENGAGED_PARTITIONS_AMO);
272 (void)xpc_IPI_init(XPC_DISENGAGE_REQUEST_AMO);
273 174
274 /* timestamp of when reserved page was setup by XPC */ 175 ret = xpc_setup_rsvd_page_sn(rp);
275 rp->stamp = CURRENT_TIME; 176 if (ret != 0)
177 return ret;
276 178
277 /* 179 /*
180 * Set timestamp of when reserved page was setup by XPC.
278 * This signifies to the remote partition that our reserved 181 * This signifies to the remote partition that our reserved
279 * page is initialized. 182 * page is initialized.
280 */ 183 */
281 rp->vars_pa = __pa(xpc_vars); 184 new_ts_jiffies = jiffies;
185 if (new_ts_jiffies == 0 || new_ts_jiffies == rp->ts_jiffies)
186 new_ts_jiffies++;
187 rp->ts_jiffies = new_ts_jiffies;
282 188
283 return rp; 189 xpc_rsvd_page = rp;
190 return 0;
284} 191}
285 192
286/*
287 * Change protections to allow IPI operations (and AMO operations on
288 * Shub 1.1 systems).
289 */
290void 193void
291xpc_allow_IPI_ops(void) 194xpc_teardown_rsvd_page(void)
292{ 195{
293 int node; 196 /* a zero timestamp indicates our rsvd page is not initialized */
294 int nasid; 197 xpc_rsvd_page->ts_jiffies = 0;
295
296 /* >>> Change SH_IPI_ACCESS code to use SAL call once it is available */
297
298 if (is_shub2()) {
299 xpc_sh2_IPI_access0 =
300 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS0));
301 xpc_sh2_IPI_access1 =
302 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS1));
303 xpc_sh2_IPI_access2 =
304 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS2));
305 xpc_sh2_IPI_access3 =
306 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS3));
307
308 for_each_online_node(node) {
309 nasid = cnodeid_to_nasid(node);
310 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
311 -1UL);
312 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
313 -1UL);
314 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
315 -1UL);
316 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
317 -1UL);
318 }
319
320 } else {
321 xpc_sh1_IPI_access =
322 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH1_IPI_ACCESS));
323
324 for_each_online_node(node) {
325 nasid = cnodeid_to_nasid(node);
326 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
327 -1UL);
328
329 /*
330 * Since the BIST collides with memory operations on
331 * SHUB 1.1 sn_change_memprotect() cannot be used.
332 */
333 if (enable_shub_wars_1_1()) {
334 /* open up everything */
335 xpc_prot_vec[node] = (u64)HUB_L((u64 *)
336 GLOBAL_MMR_ADDR
337 (nasid,
338 SH1_MD_DQLP_MMR_DIR_PRIVEC0));
339 HUB_S((u64 *)
340 GLOBAL_MMR_ADDR(nasid,
341 SH1_MD_DQLP_MMR_DIR_PRIVEC0),
342 -1UL);
343 HUB_S((u64 *)
344 GLOBAL_MMR_ADDR(nasid,
345 SH1_MD_DQRP_MMR_DIR_PRIVEC0),
346 -1UL);
347 }
348 }
349 }
350}
351
352/*
353 * Restrict protections to disallow IPI operations (and AMO operations on
354 * Shub 1.1 systems).
355 */
356void
357xpc_restrict_IPI_ops(void)
358{
359 int node;
360 int nasid;
361
362 /* >>> Change SH_IPI_ACCESS code to use SAL call once it is available */
363
364 if (is_shub2()) {
365
366 for_each_online_node(node) {
367 nasid = cnodeid_to_nasid(node);
368 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
369 xpc_sh2_IPI_access0);
370 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
371 xpc_sh2_IPI_access1);
372 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
373 xpc_sh2_IPI_access2);
374 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
375 xpc_sh2_IPI_access3);
376 }
377
378 } else {
379
380 for_each_online_node(node) {
381 nasid = cnodeid_to_nasid(node);
382 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
383 xpc_sh1_IPI_access);
384
385 if (enable_shub_wars_1_1()) {
386 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid,
387 SH1_MD_DQLP_MMR_DIR_PRIVEC0),
388 xpc_prot_vec[node]);
389 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid,
390 SH1_MD_DQRP_MMR_DIR_PRIVEC0),
391 xpc_prot_vec[node]);
392 }
393 }
394 }
395}
396
397/*
398 * At periodic intervals, scan through all active partitions and ensure
399 * their heartbeat is still active. If not, the partition is deactivated.
400 */
401void
402xpc_check_remote_hb(void)
403{
404 struct xpc_vars *remote_vars;
405 struct xpc_partition *part;
406 short partid;
407 bte_result_t bres;
408
409 remote_vars = (struct xpc_vars *)xpc_remote_copy_buffer;
410
411 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
412
413 if (xpc_exiting)
414 break;
415
416 if (partid == sn_partition_id)
417 continue;
418
419 part = &xpc_partitions[partid];
420
421 if (part->act_state == XPC_P_INACTIVE ||
422 part->act_state == XPC_P_DEACTIVATING) {
423 continue;
424 }
425
426 /* pull the remote_hb cache line */
427 bres = xp_bte_copy(part->remote_vars_pa,
428 (u64)remote_vars,
429 XPC_RP_VARS_SIZE,
430 (BTE_NOTIFY | BTE_WACQUIRE), NULL);
431 if (bres != BTE_SUCCESS) {
432 XPC_DEACTIVATE_PARTITION(part,
433 xpc_map_bte_errors(bres));
434 continue;
435 }
436
437 dev_dbg(xpc_part, "partid = %d, heartbeat = %ld, last_heartbeat"
438 " = %ld, heartbeat_offline = %ld, HB_mask = 0x%lx\n",
439 partid, remote_vars->heartbeat, part->last_heartbeat,
440 remote_vars->heartbeat_offline,
441 remote_vars->heartbeating_to_mask);
442
443 if (((remote_vars->heartbeat == part->last_heartbeat) &&
444 (remote_vars->heartbeat_offline == 0)) ||
445 !xpc_hb_allowed(sn_partition_id, remote_vars)) {
446
447 XPC_DEACTIVATE_PARTITION(part, xpNoHeartbeat);
448 continue;
449 }
450
451 part->last_heartbeat = remote_vars->heartbeat;
452 }
453} 198}
454 199
455/* 200/*
@@ -459,11 +204,12 @@ xpc_check_remote_hb(void)
459 * is large enough to contain a copy of their reserved page header and 204 * is large enough to contain a copy of their reserved page header and
460 * part_nasids mask. 205 * part_nasids mask.
461 */ 206 */
462static enum xp_retval 207enum xp_retval
463xpc_get_remote_rp(int nasid, u64 *discovered_nasids, 208xpc_get_remote_rp(int nasid, unsigned long *discovered_nasids,
464 struct xpc_rsvd_page *remote_rp, u64 *remote_rp_pa) 209 struct xpc_rsvd_page *remote_rp, unsigned long *remote_rp_pa)
465{ 210{
466 int bres, i; 211 int l;
212 enum xp_retval ret;
467 213
468 /* get the reserved page's physical address */ 214 /* get the reserved page's physical address */
469 215
@@ -472,355 +218,45 @@ xpc_get_remote_rp(int nasid, u64 *discovered_nasids,
472 return xpNoRsvdPageAddr; 218 return xpNoRsvdPageAddr;
473 219
474 /* pull over the reserved page header and part_nasids mask */ 220 /* pull over the reserved page header and part_nasids mask */
475 bres = xp_bte_copy(*remote_rp_pa, (u64)remote_rp, 221 ret = xp_remote_memcpy(xp_pa(remote_rp), *remote_rp_pa,
476 XPC_RP_HEADER_SIZE + xp_nasid_mask_bytes, 222 XPC_RP_HEADER_SIZE + xpc_nasid_mask_nbytes);
477 (BTE_NOTIFY | BTE_WACQUIRE), NULL); 223 if (ret != xpSuccess)
478 if (bres != BTE_SUCCESS) 224 return ret;
479 return xpc_map_bte_errors(bres);
480 225
481 if (discovered_nasids != NULL) { 226 if (discovered_nasids != NULL) {
482 u64 *remote_part_nasids = XPC_RP_PART_NASIDS(remote_rp); 227 unsigned long *remote_part_nasids =
483 228 XPC_RP_PART_NASIDS(remote_rp);
484 for (i = 0; i < xp_nasid_mask_words; i++)
485 discovered_nasids[i] |= remote_part_nasids[i];
486 }
487
488 /* check that the partid is for another partition */
489 229
490 if (remote_rp->partid < 1 || 230 for (l = 0; l < xpc_nasid_mask_nlongs; l++)
491 remote_rp->partid > (XP_MAX_PARTITIONS - 1)) { 231 discovered_nasids[l] |= remote_part_nasids[l];
492 return xpInvalidPartid;
493 } 232 }
494 233
495 if (remote_rp->partid == sn_partition_id) 234 /* zero timestamp indicates the reserved page has not been setup */
496 return xpLocalPartid; 235 if (remote_rp->ts_jiffies == 0)
236 return xpRsvdPageNotSet;
497 237
498 if (XPC_VERSION_MAJOR(remote_rp->version) != 238 if (XPC_VERSION_MAJOR(remote_rp->version) !=
499 XPC_VERSION_MAJOR(XPC_RP_VERSION)) { 239 XPC_VERSION_MAJOR(XPC_RP_VERSION)) {
500 return xpBadVersion; 240 return xpBadVersion;
501 } 241 }
502 242
503 return xpSuccess; 243 /* check that both remote and local partids are valid for each side */
504} 244 if (remote_rp->SAL_partid < 0 ||
505 245 remote_rp->SAL_partid >= xp_max_npartitions ||
506/* 246 remote_rp->max_npartitions <= xp_partition_id) {
507 * Get a copy of the remote partition's XPC variables from the reserved page. 247 return xpInvalidPartid;
508 *
509 * remote_vars points to a buffer that is cacheline aligned for BTE copies and
510 * assumed to be of size XPC_RP_VARS_SIZE.
511 */
512static enum xp_retval
513xpc_get_remote_vars(u64 remote_vars_pa, struct xpc_vars *remote_vars)
514{
515 int bres;
516
517 if (remote_vars_pa == 0)
518 return xpVarsNotSet;
519
520 /* pull over the cross partition variables */
521 bres = xp_bte_copy(remote_vars_pa, (u64)remote_vars, XPC_RP_VARS_SIZE,
522 (BTE_NOTIFY | BTE_WACQUIRE), NULL);
523 if (bres != BTE_SUCCESS)
524 return xpc_map_bte_errors(bres);
525
526 if (XPC_VERSION_MAJOR(remote_vars->version) !=
527 XPC_VERSION_MAJOR(XPC_V_VERSION)) {
528 return xpBadVersion;
529 }
530
531 return xpSuccess;
532}
533
534/*
535 * Update the remote partition's info.
536 */
537static void
538xpc_update_partition_info(struct xpc_partition *part, u8 remote_rp_version,
539 struct timespec *remote_rp_stamp, u64 remote_rp_pa,
540 u64 remote_vars_pa, struct xpc_vars *remote_vars)
541{
542 part->remote_rp_version = remote_rp_version;
543 dev_dbg(xpc_part, " remote_rp_version = 0x%016x\n",
544 part->remote_rp_version);
545
546 part->remote_rp_stamp = *remote_rp_stamp;
547 dev_dbg(xpc_part, " remote_rp_stamp (tv_sec = 0x%lx tv_nsec = 0x%lx\n",
548 part->remote_rp_stamp.tv_sec, part->remote_rp_stamp.tv_nsec);
549
550 part->remote_rp_pa = remote_rp_pa;
551 dev_dbg(xpc_part, " remote_rp_pa = 0x%016lx\n", part->remote_rp_pa);
552
553 part->remote_vars_pa = remote_vars_pa;
554 dev_dbg(xpc_part, " remote_vars_pa = 0x%016lx\n",
555 part->remote_vars_pa);
556
557 part->last_heartbeat = remote_vars->heartbeat;
558 dev_dbg(xpc_part, " last_heartbeat = 0x%016lx\n",
559 part->last_heartbeat);
560
561 part->remote_vars_part_pa = remote_vars->vars_part_pa;
562 dev_dbg(xpc_part, " remote_vars_part_pa = 0x%016lx\n",
563 part->remote_vars_part_pa);
564
565 part->remote_act_nasid = remote_vars->act_nasid;
566 dev_dbg(xpc_part, " remote_act_nasid = 0x%x\n",
567 part->remote_act_nasid);
568
569 part->remote_act_phys_cpuid = remote_vars->act_phys_cpuid;
570 dev_dbg(xpc_part, " remote_act_phys_cpuid = 0x%x\n",
571 part->remote_act_phys_cpuid);
572
573 part->remote_amos_page_pa = remote_vars->amos_page_pa;
574 dev_dbg(xpc_part, " remote_amos_page_pa = 0x%lx\n",
575 part->remote_amos_page_pa);
576
577 part->remote_vars_version = remote_vars->version;
578 dev_dbg(xpc_part, " remote_vars_version = 0x%x\n",
579 part->remote_vars_version);
580}
581
582/*
583 * Prior code has determined the nasid which generated an IPI. Inspect
584 * that nasid to determine if its partition needs to be activated or
585 * deactivated.
586 *
587 * A partition is consider "awaiting activation" if our partition
588 * flags indicate it is not active and it has a heartbeat. A
589 * partition is considered "awaiting deactivation" if our partition
590 * flags indicate it is active but it has no heartbeat or it is not
591 * sending its heartbeat to us.
592 *
593 * To determine the heartbeat, the remote nasid must have a properly
594 * initialized reserved page.
595 */
596static void
597xpc_identify_act_IRQ_req(int nasid)
598{
599 struct xpc_rsvd_page *remote_rp;
600 struct xpc_vars *remote_vars;
601 u64 remote_rp_pa;
602 u64 remote_vars_pa;
603 int remote_rp_version;
604 int reactivate = 0;
605 int stamp_diff;
606 struct timespec remote_rp_stamp = { 0, 0 };
607 short partid;
608 struct xpc_partition *part;
609 enum xp_retval ret;
610
611 /* pull over the reserved page structure */
612
613 remote_rp = (struct xpc_rsvd_page *)xpc_remote_copy_buffer;
614
615 ret = xpc_get_remote_rp(nasid, NULL, remote_rp, &remote_rp_pa);
616 if (ret != xpSuccess) {
617 dev_warn(xpc_part, "unable to get reserved page from nasid %d, "
618 "which sent interrupt, reason=%d\n", nasid, ret);
619 return;
620 }
621
622 remote_vars_pa = remote_rp->vars_pa;
623 remote_rp_version = remote_rp->version;
624 if (XPC_SUPPORTS_RP_STAMP(remote_rp_version))
625 remote_rp_stamp = remote_rp->stamp;
626
627 partid = remote_rp->partid;
628 part = &xpc_partitions[partid];
629
630 /* pull over the cross partition variables */
631
632 remote_vars = (struct xpc_vars *)xpc_remote_copy_buffer;
633
634 ret = xpc_get_remote_vars(remote_vars_pa, remote_vars);
635 if (ret != xpSuccess) {
636
637 dev_warn(xpc_part, "unable to get XPC variables from nasid %d, "
638 "which sent interrupt, reason=%d\n", nasid, ret);
639
640 XPC_DEACTIVATE_PARTITION(part, ret);
641 return;
642 }
643
644 part->act_IRQ_rcvd++;
645
646 dev_dbg(xpc_part, "partid for nasid %d is %d; IRQs = %d; HB = "
647 "%ld:0x%lx\n", (int)nasid, (int)partid, part->act_IRQ_rcvd,
648 remote_vars->heartbeat, remote_vars->heartbeating_to_mask);
649
650 if (xpc_partition_disengaged(part) &&
651 part->act_state == XPC_P_INACTIVE) {
652
653 xpc_update_partition_info(part, remote_rp_version,
654 &remote_rp_stamp, remote_rp_pa,
655 remote_vars_pa, remote_vars);
656
657 if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) {
658 if (xpc_partition_disengage_requested(1UL << partid)) {
659 /*
660 * Other side is waiting on us to disengage,
661 * even though we already have.
662 */
663 return;
664 }
665 } else {
666 /* other side doesn't support disengage requests */
667 xpc_clear_partition_disengage_request(1UL << partid);
668 }
669
670 xpc_activate_partition(part);
671 return;
672 }
673
674 DBUG_ON(part->remote_rp_version == 0);
675 DBUG_ON(part->remote_vars_version == 0);
676
677 if (!XPC_SUPPORTS_RP_STAMP(part->remote_rp_version)) {
678 DBUG_ON(XPC_SUPPORTS_DISENGAGE_REQUEST(part->
679 remote_vars_version));
680
681 if (!XPC_SUPPORTS_RP_STAMP(remote_rp_version)) {
682 DBUG_ON(XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->
683 version));
684 /* see if the other side rebooted */
685 if (part->remote_amos_page_pa ==
686 remote_vars->amos_page_pa &&
687 xpc_hb_allowed(sn_partition_id, remote_vars)) {
688 /* doesn't look that way, so ignore the IPI */
689 return;
690 }
691 }
692
693 /*
694 * Other side rebooted and previous XPC didn't support the
695 * disengage request, so we don't need to do anything special.
696 */
697
698 xpc_update_partition_info(part, remote_rp_version,
699 &remote_rp_stamp, remote_rp_pa,
700 remote_vars_pa, remote_vars);
701 part->reactivate_nasid = nasid;
702 XPC_DEACTIVATE_PARTITION(part, xpReactivating);
703 return;
704 }
705
706 DBUG_ON(!XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version));
707
708 if (!XPC_SUPPORTS_RP_STAMP(remote_rp_version)) {
709 DBUG_ON(!XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->version));
710
711 /*
712 * Other side rebooted and previous XPC did support the
713 * disengage request, but the new one doesn't.
714 */
715
716 xpc_clear_partition_engaged(1UL << partid);
717 xpc_clear_partition_disengage_request(1UL << partid);
718
719 xpc_update_partition_info(part, remote_rp_version,
720 &remote_rp_stamp, remote_rp_pa,
721 remote_vars_pa, remote_vars);
722 reactivate = 1;
723
724 } else {
725 DBUG_ON(!XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->version));
726
727 stamp_diff = xpc_compare_stamps(&part->remote_rp_stamp,
728 &remote_rp_stamp);
729 if (stamp_diff != 0) {
730 DBUG_ON(stamp_diff >= 0);
731
732 /*
733 * Other side rebooted and the previous XPC did support
734 * the disengage request, as does the new one.
735 */
736
737 DBUG_ON(xpc_partition_engaged(1UL << partid));
738 DBUG_ON(xpc_partition_disengage_requested(1UL <<
739 partid));
740
741 xpc_update_partition_info(part, remote_rp_version,
742 &remote_rp_stamp,
743 remote_rp_pa, remote_vars_pa,
744 remote_vars);
745 reactivate = 1;
746 }
747 }
748
749 if (part->disengage_request_timeout > 0 &&
750 !xpc_partition_disengaged(part)) {
751 /* still waiting on other side to disengage from us */
752 return;
753 }
754
755 if (reactivate) {
756 part->reactivate_nasid = nasid;
757 XPC_DEACTIVATE_PARTITION(part, xpReactivating);
758
759 } else if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version) &&
760 xpc_partition_disengage_requested(1UL << partid)) {
761 XPC_DEACTIVATE_PARTITION(part, xpOtherGoingDown);
762 } 248 }
763}
764 249
765/* 250 if (remote_rp->SAL_partid == xp_partition_id)
766 * Loop through the activation AMO variables and process any bits 251 return xpLocalPartid;
767 * which are set. Each bit indicates a nasid sending a partition
768 * activation or deactivation request.
769 *
770 * Return #of IRQs detected.
771 */
772int
773xpc_identify_act_IRQ_sender(void)
774{
775 int word, bit;
776 u64 nasid_mask;
777 u64 nasid; /* remote nasid */
778 int n_IRQs_detected = 0;
779 AMO_t *act_amos;
780
781 act_amos = xpc_vars->amos_page + XPC_ACTIVATE_IRQ_AMOS;
782
783 /* scan through act AMO variable looking for non-zero entries */
784 for (word = 0; word < xp_nasid_mask_words; word++) {
785
786 if (xpc_exiting)
787 break;
788
789 nasid_mask = xpc_IPI_receive(&act_amos[word]);
790 if (nasid_mask == 0) {
791 /* no IRQs from nasids in this variable */
792 continue;
793 }
794
795 dev_dbg(xpc_part, "AMO[%d] gave back 0x%lx\n", word,
796 nasid_mask);
797
798 /*
799 * If this nasid has been added to the machine since
800 * our partition was reset, this will retain the
801 * remote nasid in our reserved pages machine mask.
802 * This is used in the event of module reload.
803 */
804 xpc_mach_nasids[word] |= nasid_mask;
805
806 /* locate the nasid(s) which sent interrupts */
807 252
808 for (bit = 0; bit < (8 * sizeof(u64)); bit++) { 253 return xpSuccess;
809 if (nasid_mask & (1UL << bit)) {
810 n_IRQs_detected++;
811 nasid = XPC_NASID_FROM_W_B(word, bit);
812 dev_dbg(xpc_part, "interrupt from nasid %ld\n",
813 nasid);
814 xpc_identify_act_IRQ_req(nasid);
815 }
816 }
817 }
818 return n_IRQs_detected;
819} 254}
820 255
821/* 256/*
822 * See if the other side has responded to a partition disengage request 257 * See if the other side has responded to a partition deactivate request
823 * from us. 258 * from us. Though we requested the remote partition to deactivate with regard
259 * to us, we really only need to wait for the other side to disengage from us.
824 */ 260 */
825int 261int
826xpc_partition_disengaged(struct xpc_partition *part) 262xpc_partition_disengaged(struct xpc_partition *part)
@@ -828,41 +264,37 @@ xpc_partition_disengaged(struct xpc_partition *part)
828 short partid = XPC_PARTID(part); 264 short partid = XPC_PARTID(part);
829 int disengaged; 265 int disengaged;
830 266
831 disengaged = (xpc_partition_engaged(1UL << partid) == 0); 267 disengaged = !xpc_partition_engaged(partid);
832 if (part->disengage_request_timeout) { 268 if (part->disengage_timeout) {
833 if (!disengaged) { 269 if (!disengaged) {
834 if (time_before(jiffies, 270 if (time_is_after_jiffies(part->disengage_timeout)) {
835 part->disengage_request_timeout)) {
836 /* timelimit hasn't been reached yet */ 271 /* timelimit hasn't been reached yet */
837 return 0; 272 return 0;
838 } 273 }
839 274
840 /* 275 /*
841 * Other side hasn't responded to our disengage 276 * Other side hasn't responded to our deactivate
842 * request in a timely fashion, so assume it's dead. 277 * request in a timely fashion, so assume it's dead.
843 */ 278 */
844 279
845 dev_info(xpc_part, "disengage from remote partition %d " 280 dev_info(xpc_part, "deactivate request to remote "
846 "timed out\n", partid); 281 "partition %d timed out\n", partid);
847 xpc_disengage_request_timedout = 1; 282 xpc_disengage_timedout = 1;
848 xpc_clear_partition_engaged(1UL << partid); 283 xpc_assume_partition_disengaged(partid);
849 disengaged = 1; 284 disengaged = 1;
850 } 285 }
851 part->disengage_request_timeout = 0; 286 part->disengage_timeout = 0;
852 287
853 /* cancel the timer function, provided it's not us */ 288 /* cancel the timer function, provided it's not us */
854 if (!in_interrupt()) { 289 if (!in_interrupt())
855 del_singleshot_timer_sync(&part-> 290 del_singleshot_timer_sync(&part->disengage_timer);
856 disengage_request_timer);
857 }
858 291
859 DBUG_ON(part->act_state != XPC_P_DEACTIVATING && 292 DBUG_ON(part->act_state != XPC_P_AS_DEACTIVATING &&
860 part->act_state != XPC_P_INACTIVE); 293 part->act_state != XPC_P_AS_INACTIVE);
861 if (part->act_state != XPC_P_INACTIVE) 294 if (part->act_state != XPC_P_AS_INACTIVE)
862 xpc_wakeup_channel_mgr(part); 295 xpc_wakeup_channel_mgr(part);
863 296
864 if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) 297 xpc_cancel_partition_deactivation_request(part);
865 xpc_cancel_partition_disengage_request(part);
866 } 298 }
867 return disengaged; 299 return disengaged;
868} 300}
@@ -879,8 +311,8 @@ xpc_mark_partition_active(struct xpc_partition *part)
879 dev_dbg(xpc_part, "setting partition %d to ACTIVE\n", XPC_PARTID(part)); 311 dev_dbg(xpc_part, "setting partition %d to ACTIVE\n", XPC_PARTID(part));
880 312
881 spin_lock_irqsave(&part->act_lock, irq_flags); 313 spin_lock_irqsave(&part->act_lock, irq_flags);
882 if (part->act_state == XPC_P_ACTIVATING) { 314 if (part->act_state == XPC_P_AS_ACTIVATING) {
883 part->act_state = XPC_P_ACTIVE; 315 part->act_state = XPC_P_AS_ACTIVE;
884 ret = xpSuccess; 316 ret = xpSuccess;
885 } else { 317 } else {
886 DBUG_ON(part->reason == xpSuccess); 318 DBUG_ON(part->reason == xpSuccess);
@@ -892,7 +324,7 @@ xpc_mark_partition_active(struct xpc_partition *part)
892} 324}
893 325
894/* 326/*
895 * Notify XPC that the partition is down. 327 * Start the process of deactivating the specified partition.
896 */ 328 */
897void 329void
898xpc_deactivate_partition(const int line, struct xpc_partition *part, 330xpc_deactivate_partition(const int line, struct xpc_partition *part,
@@ -902,16 +334,16 @@ xpc_deactivate_partition(const int line, struct xpc_partition *part,
902 334
903 spin_lock_irqsave(&part->act_lock, irq_flags); 335 spin_lock_irqsave(&part->act_lock, irq_flags);
904 336
905 if (part->act_state == XPC_P_INACTIVE) { 337 if (part->act_state == XPC_P_AS_INACTIVE) {
906 XPC_SET_REASON(part, reason, line); 338 XPC_SET_REASON(part, reason, line);
907 spin_unlock_irqrestore(&part->act_lock, irq_flags); 339 spin_unlock_irqrestore(&part->act_lock, irq_flags);
908 if (reason == xpReactivating) { 340 if (reason == xpReactivating) {
909 /* we interrupt ourselves to reactivate partition */ 341 /* we interrupt ourselves to reactivate partition */
910 xpc_IPI_send_reactivate(part); 342 xpc_request_partition_reactivation(part);
911 } 343 }
912 return; 344 return;
913 } 345 }
914 if (part->act_state == XPC_P_DEACTIVATING) { 346 if (part->act_state == XPC_P_AS_DEACTIVATING) {
915 if ((part->reason == xpUnloading && reason != xpUnloading) || 347 if ((part->reason == xpUnloading && reason != xpUnloading) ||
916 reason == xpReactivating) { 348 reason == xpReactivating) {
917 XPC_SET_REASON(part, reason, line); 349 XPC_SET_REASON(part, reason, line);
@@ -920,22 +352,18 @@ xpc_deactivate_partition(const int line, struct xpc_partition *part,
920 return; 352 return;
921 } 353 }
922 354
923 part->act_state = XPC_P_DEACTIVATING; 355 part->act_state = XPC_P_AS_DEACTIVATING;
924 XPC_SET_REASON(part, reason, line); 356 XPC_SET_REASON(part, reason, line);
925 357
926 spin_unlock_irqrestore(&part->act_lock, irq_flags); 358 spin_unlock_irqrestore(&part->act_lock, irq_flags);
927 359
928 if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) { 360 /* ask remote partition to deactivate with regard to us */
929 xpc_request_partition_disengage(part); 361 xpc_request_partition_deactivation(part);
930 xpc_IPI_send_disengage(part);
931 362
932 /* set a timelimit on the disengage request */ 363 /* set a timelimit on the disengage phase of the deactivation request */
933 part->disengage_request_timeout = jiffies + 364 part->disengage_timeout = jiffies + (xpc_disengage_timelimit * HZ);
934 (xpc_disengage_request_timelimit * HZ); 365 part->disengage_timer.expires = part->disengage_timeout;
935 part->disengage_request_timer.expires = 366 add_timer(&part->disengage_timer);
936 part->disengage_request_timeout;
937 add_timer(&part->disengage_request_timer);
938 }
939 367
940 dev_dbg(xpc_part, "bringing partition %d down, reason = %d\n", 368 dev_dbg(xpc_part, "bringing partition %d down, reason = %d\n",
941 XPC_PARTID(part), reason); 369 XPC_PARTID(part), reason);
@@ -955,7 +383,7 @@ xpc_mark_partition_inactive(struct xpc_partition *part)
955 XPC_PARTID(part)); 383 XPC_PARTID(part));
956 384
957 spin_lock_irqsave(&part->act_lock, irq_flags); 385 spin_lock_irqsave(&part->act_lock, irq_flags);
958 part->act_state = XPC_P_INACTIVE; 386 part->act_state = XPC_P_AS_INACTIVE;
959 spin_unlock_irqrestore(&part->act_lock, irq_flags); 387 spin_unlock_irqrestore(&part->act_lock, irq_flags);
960 part->remote_rp_pa = 0; 388 part->remote_rp_pa = 0;
961} 389}
@@ -974,28 +402,22 @@ xpc_discovery(void)
974{ 402{
975 void *remote_rp_base; 403 void *remote_rp_base;
976 struct xpc_rsvd_page *remote_rp; 404 struct xpc_rsvd_page *remote_rp;
977 struct xpc_vars *remote_vars; 405 unsigned long remote_rp_pa;
978 u64 remote_rp_pa;
979 u64 remote_vars_pa;
980 int region; 406 int region;
981 int region_size; 407 int region_size;
982 int max_regions; 408 int max_regions;
983 int nasid; 409 int nasid;
984 struct xpc_rsvd_page *rp; 410 struct xpc_rsvd_page *rp;
985 short partid; 411 unsigned long *discovered_nasids;
986 struct xpc_partition *part;
987 u64 *discovered_nasids;
988 enum xp_retval ret; 412 enum xp_retval ret;
989 413
990 remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RP_HEADER_SIZE + 414 remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RP_HEADER_SIZE +
991 xp_nasid_mask_bytes, 415 xpc_nasid_mask_nbytes,
992 GFP_KERNEL, &remote_rp_base); 416 GFP_KERNEL, &remote_rp_base);
993 if (remote_rp == NULL) 417 if (remote_rp == NULL)
994 return; 418 return;
995 419
996 remote_vars = (struct xpc_vars *)remote_rp; 420 discovered_nasids = kzalloc(sizeof(long) * xpc_nasid_mask_nlongs,
997
998 discovered_nasids = kzalloc(sizeof(u64) * xp_nasid_mask_words,
999 GFP_KERNEL); 421 GFP_KERNEL);
1000 if (discovered_nasids == NULL) { 422 if (discovered_nasids == NULL) {
1001 kfree(remote_rp_base); 423 kfree(remote_rp_base);
@@ -1010,7 +432,7 @@ xpc_discovery(void)
1010 * protection is in regards to memory, IOI and IPI. 432 * protection is in regards to memory, IOI and IPI.
1011 */ 433 */
1012 max_regions = 64; 434 max_regions = 64;
1013 region_size = sn_region_size; 435 region_size = xp_region_size;
1014 436
1015 switch (region_size) { 437 switch (region_size) {
1016 case 128: 438 case 128:
@@ -1038,28 +460,28 @@ xpc_discovery(void)
1038 460
1039 dev_dbg(xpc_part, "checking nasid %d\n", nasid); 461 dev_dbg(xpc_part, "checking nasid %d\n", nasid);
1040 462
1041 if (XPC_NASID_IN_ARRAY(nasid, xpc_part_nasids)) { 463 if (test_bit(nasid / 2, xpc_part_nasids)) {
1042 dev_dbg(xpc_part, "PROM indicates Nasid %d is " 464 dev_dbg(xpc_part, "PROM indicates Nasid %d is "
1043 "part of the local partition; skipping " 465 "part of the local partition; skipping "
1044 "region\n", nasid); 466 "region\n", nasid);
1045 break; 467 break;
1046 } 468 }
1047 469
1048 if (!(XPC_NASID_IN_ARRAY(nasid, xpc_mach_nasids))) { 470 if (!(test_bit(nasid / 2, xpc_mach_nasids))) {
1049 dev_dbg(xpc_part, "PROM indicates Nasid %d was " 471 dev_dbg(xpc_part, "PROM indicates Nasid %d was "
1050 "not on Numa-Link network at reset\n", 472 "not on Numa-Link network at reset\n",
1051 nasid); 473 nasid);
1052 continue; 474 continue;
1053 } 475 }
1054 476
1055 if (XPC_NASID_IN_ARRAY(nasid, discovered_nasids)) { 477 if (test_bit(nasid / 2, discovered_nasids)) {
1056 dev_dbg(xpc_part, "Nasid %d is part of a " 478 dev_dbg(xpc_part, "Nasid %d is part of a "
1057 "partition which was previously " 479 "partition which was previously "
1058 "discovered\n", nasid); 480 "discovered\n", nasid);
1059 continue; 481 continue;
1060 } 482 }
1061 483
1062 /* pull over the reserved page structure */ 484 /* pull over the rsvd page header & part_nasids mask */
1063 485
1064 ret = xpc_get_remote_rp(nasid, discovered_nasids, 486 ret = xpc_get_remote_rp(nasid, discovered_nasids,
1065 remote_rp, &remote_rp_pa); 487 remote_rp, &remote_rp_pa);
@@ -1074,72 +496,8 @@ xpc_discovery(void)
1074 continue; 496 continue;
1075 } 497 }
1076 498
1077 remote_vars_pa = remote_rp->vars_pa; 499 xpc_request_partition_activation(remote_rp,
1078 500 remote_rp_pa, nasid);
1079 partid = remote_rp->partid;
1080 part = &xpc_partitions[partid];
1081
1082 /* pull over the cross partition variables */
1083
1084 ret = xpc_get_remote_vars(remote_vars_pa, remote_vars);
1085 if (ret != xpSuccess) {
1086 dev_dbg(xpc_part, "unable to get XPC variables "
1087 "from nasid %d, reason=%d\n", nasid,
1088 ret);
1089
1090 XPC_DEACTIVATE_PARTITION(part, ret);
1091 continue;
1092 }
1093
1094 if (part->act_state != XPC_P_INACTIVE) {
1095 dev_dbg(xpc_part, "partition %d on nasid %d is "
1096 "already activating\n", partid, nasid);
1097 break;
1098 }
1099
1100 /*
1101 * Register the remote partition's AMOs with SAL so it
1102 * can handle and cleanup errors within that address
1103 * range should the remote partition go down. We don't
1104 * unregister this range because it is difficult to
1105 * tell when outstanding writes to the remote partition
1106 * are finished and thus when it is thus safe to
1107 * unregister. This should not result in wasted space
1108 * in the SAL xp_addr_region table because we should
1109 * get the same page for remote_act_amos_pa after
1110 * module reloads and system reboots.
1111 */
1112 if (sn_register_xp_addr_region
1113 (remote_vars->amos_page_pa, PAGE_SIZE, 1) < 0) {
1114 dev_dbg(xpc_part,
1115 "partition %d failed to "
1116 "register xp_addr region 0x%016lx\n",
1117 partid, remote_vars->amos_page_pa);
1118
1119 XPC_SET_REASON(part, xpPhysAddrRegFailed,
1120 __LINE__);
1121 break;
1122 }
1123
1124 /*
1125 * The remote nasid is valid and available.
1126 * Send an interrupt to that nasid to notify
1127 * it that we are ready to begin activation.
1128 */
1129 dev_dbg(xpc_part, "sending an interrupt to AMO 0x%lx, "
1130 "nasid %d, phys_cpuid 0x%x\n",
1131 remote_vars->amos_page_pa,
1132 remote_vars->act_nasid,
1133 remote_vars->act_phys_cpuid);
1134
1135 if (XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->
1136 version)) {
1137 part->remote_amos_page_pa =
1138 remote_vars->amos_page_pa;
1139 xpc_mark_partition_disengaged(part);
1140 xpc_cancel_partition_disengage_request(part);
1141 }
1142 xpc_IPI_send_activate(remote_vars);
1143 } 501 }
1144 } 502 }
1145 503
@@ -1155,20 +513,16 @@ enum xp_retval
1155xpc_initiate_partid_to_nasids(short partid, void *nasid_mask) 513xpc_initiate_partid_to_nasids(short partid, void *nasid_mask)
1156{ 514{
1157 struct xpc_partition *part; 515 struct xpc_partition *part;
1158 u64 part_nasid_pa; 516 unsigned long part_nasid_pa;
1159 int bte_res;
1160 517
1161 part = &xpc_partitions[partid]; 518 part = &xpc_partitions[partid];
1162 if (part->remote_rp_pa == 0) 519 if (part->remote_rp_pa == 0)
1163 return xpPartitionDown; 520 return xpPartitionDown;
1164 521
1165 memset(nasid_mask, 0, XP_NASID_MASK_BYTES); 522 memset(nasid_mask, 0, xpc_nasid_mask_nbytes);
1166
1167 part_nasid_pa = (u64)XPC_RP_PART_NASIDS(part->remote_rp_pa);
1168 523
1169 bte_res = xp_bte_copy(part_nasid_pa, (u64)nasid_mask, 524 part_nasid_pa = (unsigned long)XPC_RP_PART_NASIDS(part->remote_rp_pa);
1170 xp_nasid_mask_bytes, (BTE_NOTIFY | BTE_WACQUIRE),
1171 NULL);
1172 525
1173 return xpc_map_bte_errors(bte_res); 526 return xp_remote_memcpy(xp_pa(nasid_mask), part_nasid_pa,
527 xpc_nasid_mask_nbytes);
1174} 528}
diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
new file mode 100644
index 000000000000..b4882ccf6344
--- /dev/null
+++ b/drivers/misc/sgi-xp/xpc_sn2.c
@@ -0,0 +1,2404 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
7 */
8
9/*
10 * Cross Partition Communication (XPC) sn2-based functions.
11 *
12 * Architecture specific implementation of common functions.
13 *
14 */
15
16#include <linux/delay.h>
17#include <asm/uncached.h>
18#include <asm/sn/mspec.h>
19#include <asm/sn/sn_sal.h>
20#include "xpc.h"
21
22/*
23 * Define the number of u64s required to represent all the C-brick nasids
24 * as a bitmap. The cross-partition kernel modules deal only with
25 * C-brick nasids, thus the need for bitmaps which don't account for
26 * odd-numbered (non C-brick) nasids.
27 */
28#define XPC_MAX_PHYSNODES_SN2 (MAX_NUMALINK_NODES / 2)
29#define XP_NASID_MASK_BYTES_SN2 ((XPC_MAX_PHYSNODES_SN2 + 7) / 8)
30#define XP_NASID_MASK_WORDS_SN2 ((XPC_MAX_PHYSNODES_SN2 + 63) / 64)
31
32/*
33 * Memory for XPC's amo variables is allocated by the MSPEC driver. These
34 * pages are located in the lowest granule. The lowest granule uses 4k pages
35 * for cached references and an alternate TLB handler to never provide a
36 * cacheable mapping for the entire region. This will prevent speculative
37 * reading of cached copies of our lines from being issued which will cause
38 * a PI FSB Protocol error to be generated by the SHUB. For XPC, we need 64
39 * amo variables (based on XP_MAX_NPARTITIONS_SN2) to identify the senders of
40 * NOTIFY IRQs, 128 amo variables (based on XP_NASID_MASK_WORDS_SN2) to identify
41 * the senders of ACTIVATE IRQs, 1 amo variable to identify which remote
42 * partitions (i.e., XPCs) consider themselves currently engaged with the
43 * local XPC and 1 amo variable to request partition deactivation.
44 */
45#define XPC_NOTIFY_IRQ_AMOS_SN2 0
46#define XPC_ACTIVATE_IRQ_AMOS_SN2 (XPC_NOTIFY_IRQ_AMOS_SN2 + \
47 XP_MAX_NPARTITIONS_SN2)
48#define XPC_ENGAGED_PARTITIONS_AMO_SN2 (XPC_ACTIVATE_IRQ_AMOS_SN2 + \
49 XP_NASID_MASK_WORDS_SN2)
50#define XPC_DEACTIVATE_REQUEST_AMO_SN2 (XPC_ENGAGED_PARTITIONS_AMO_SN2 + 1)
51
52/*
53 * Buffer used to store a local copy of portions of a remote partition's
54 * reserved page (either its header and part_nasids mask, or its vars).
55 */
56static void *xpc_remote_copy_buffer_base_sn2;
57static char *xpc_remote_copy_buffer_sn2;
58
59static struct xpc_vars_sn2 *xpc_vars_sn2;
60static struct xpc_vars_part_sn2 *xpc_vars_part_sn2;
61
62static int
63xpc_setup_partitions_sn_sn2(void)
64{
65 /* nothing needs to be done */
66 return 0;
67}
68
69/* SH_IPI_ACCESS shub register value on startup */
70static u64 xpc_sh1_IPI_access_sn2;
71static u64 xpc_sh2_IPI_access0_sn2;
72static u64 xpc_sh2_IPI_access1_sn2;
73static u64 xpc_sh2_IPI_access2_sn2;
74static u64 xpc_sh2_IPI_access3_sn2;
75
76/*
77 * Change protections to allow IPI operations.
78 */
79static void
80xpc_allow_IPI_ops_sn2(void)
81{
82 int node;
83 int nasid;
84
85 /* !!! The following should get moved into SAL. */
86 if (is_shub2()) {
87 xpc_sh2_IPI_access0_sn2 =
88 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS0));
89 xpc_sh2_IPI_access1_sn2 =
90 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS1));
91 xpc_sh2_IPI_access2_sn2 =
92 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS2));
93 xpc_sh2_IPI_access3_sn2 =
94 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS3));
95
96 for_each_online_node(node) {
97 nasid = cnodeid_to_nasid(node);
98 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
99 -1UL);
100 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
101 -1UL);
102 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
103 -1UL);
104 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
105 -1UL);
106 }
107 } else {
108 xpc_sh1_IPI_access_sn2 =
109 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH1_IPI_ACCESS));
110
111 for_each_online_node(node) {
112 nasid = cnodeid_to_nasid(node);
113 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
114 -1UL);
115 }
116 }
117}
118
119/*
120 * Restrict protections to disallow IPI operations.
121 */
122static void
123xpc_disallow_IPI_ops_sn2(void)
124{
125 int node;
126 int nasid;
127
128 /* !!! The following should get moved into SAL. */
129 if (is_shub2()) {
130 for_each_online_node(node) {
131 nasid = cnodeid_to_nasid(node);
132 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
133 xpc_sh2_IPI_access0_sn2);
134 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
135 xpc_sh2_IPI_access1_sn2);
136 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
137 xpc_sh2_IPI_access2_sn2);
138 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
139 xpc_sh2_IPI_access3_sn2);
140 }
141 } else {
142 for_each_online_node(node) {
143 nasid = cnodeid_to_nasid(node);
144 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
145 xpc_sh1_IPI_access_sn2);
146 }
147 }
148}
149
150/*
151 * The following set of functions are used for the sending and receiving of
152 * IRQs (also known as IPIs). There are two flavors of IRQs, one that is
153 * associated with partition activity (SGI_XPC_ACTIVATE) and the other that
154 * is associated with channel activity (SGI_XPC_NOTIFY).
155 */
156
157static u64
158xpc_receive_IRQ_amo_sn2(struct amo *amo)
159{
160 return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_CLEAR);
161}
162
163static enum xp_retval
164xpc_send_IRQ_sn2(struct amo *amo, u64 flag, int nasid, int phys_cpuid,
165 int vector)
166{
167 int ret = 0;
168 unsigned long irq_flags;
169
170 local_irq_save(irq_flags);
171
172 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR, flag);
173 sn_send_IPI_phys(nasid, phys_cpuid, vector, 0);
174
175 /*
176 * We must always use the nofault function regardless of whether we
177 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
178 * didn't, we'd never know that the other partition is down and would
179 * keep sending IRQs and amos to it until the heartbeat times out.
180 */
181 ret = xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->variable),
182 xp_nofault_PIOR_target));
183
184 local_irq_restore(irq_flags);
185
186 return (ret == 0) ? xpSuccess : xpPioReadError;
187}
188
189static struct amo *
190xpc_init_IRQ_amo_sn2(int index)
191{
192 struct amo *amo = xpc_vars_sn2->amos_page + index;
193
194 (void)xpc_receive_IRQ_amo_sn2(amo); /* clear amo variable */
195 return amo;
196}
197
198/*
199 * Functions associated with SGI_XPC_ACTIVATE IRQ.
200 */
201
202/*
203 * Notify the heartbeat check thread that an activate IRQ has been received.
204 */
205static irqreturn_t
206xpc_handle_activate_IRQ_sn2(int irq, void *dev_id)
207{
208 unsigned long irq_flags;
209
210 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
211 xpc_activate_IRQ_rcvd++;
212 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
213
214 wake_up_interruptible(&xpc_activate_IRQ_wq);
215 return IRQ_HANDLED;
216}
217
218/*
219 * Flag the appropriate amo variable and send an IRQ to the specified node.
220 */
221static void
222xpc_send_activate_IRQ_sn2(unsigned long amos_page_pa, int from_nasid,
223 int to_nasid, int to_phys_cpuid)
224{
225 struct amo *amos = (struct amo *)__va(amos_page_pa +
226 (XPC_ACTIVATE_IRQ_AMOS_SN2 *
227 sizeof(struct amo)));
228
229 (void)xpc_send_IRQ_sn2(&amos[BIT_WORD(from_nasid / 2)],
230 BIT_MASK(from_nasid / 2), to_nasid,
231 to_phys_cpuid, SGI_XPC_ACTIVATE);
232}
233
234static void
235xpc_send_local_activate_IRQ_sn2(int from_nasid)
236{
237 unsigned long irq_flags;
238 struct amo *amos = (struct amo *)__va(xpc_vars_sn2->amos_page_pa +
239 (XPC_ACTIVATE_IRQ_AMOS_SN2 *
240 sizeof(struct amo)));
241
242 /* fake the sending and receipt of an activate IRQ from remote nasid */
243 FETCHOP_STORE_OP(TO_AMO((u64)&amos[BIT_WORD(from_nasid / 2)].variable),
244 FETCHOP_OR, BIT_MASK(from_nasid / 2));
245
246 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
247 xpc_activate_IRQ_rcvd++;
248 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
249
250 wake_up_interruptible(&xpc_activate_IRQ_wq);
251}
252
253/*
254 * Functions associated with SGI_XPC_NOTIFY IRQ.
255 */
256
257/*
258 * Check to see if any chctl flags were sent from the specified partition.
259 */
260static void
261xpc_check_for_sent_chctl_flags_sn2(struct xpc_partition *part)
262{
263 union xpc_channel_ctl_flags chctl;
264 unsigned long irq_flags;
265
266 chctl.all_flags = xpc_receive_IRQ_amo_sn2(part->sn.sn2.
267 local_chctl_amo_va);
268 if (chctl.all_flags == 0)
269 return;
270
271 spin_lock_irqsave(&part->chctl_lock, irq_flags);
272 part->chctl.all_flags |= chctl.all_flags;
273 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
274
275 dev_dbg(xpc_chan, "received notify IRQ from partid=%d, chctl.all_flags="
276 "0x%lx\n", XPC_PARTID(part), chctl.all_flags);
277
278 xpc_wakeup_channel_mgr(part);
279}
280
281/*
282 * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified
283 * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more
284 * than one partition, we use an amo structure per partition to indicate
285 * whether a partition has sent an IRQ or not. If it has, then wake up the
286 * associated kthread to handle it.
287 *
288 * All SGI_XPC_NOTIFY IRQs received by XPC are the result of IRQs sent by XPC
289 * running on other partitions.
290 *
291 * Noteworthy Arguments:
292 *
293 * irq - Interrupt ReQuest number. NOT USED.
294 *
295 * dev_id - partid of IRQ's potential sender.
296 */
297static irqreturn_t
298xpc_handle_notify_IRQ_sn2(int irq, void *dev_id)
299{
300 short partid = (short)(u64)dev_id;
301 struct xpc_partition *part = &xpc_partitions[partid];
302
303 DBUG_ON(partid < 0 || partid >= XP_MAX_NPARTITIONS_SN2);
304
305 if (xpc_part_ref(part)) {
306 xpc_check_for_sent_chctl_flags_sn2(part);
307
308 xpc_part_deref(part);
309 }
310 return IRQ_HANDLED;
311}
312
313/*
314 * Check to see if xpc_handle_notify_IRQ_sn2() dropped any IRQs on the floor
315 * because the write to their associated amo variable completed after the IRQ
316 * was received.
317 */
318static void
319xpc_check_for_dropped_notify_IRQ_sn2(struct xpc_partition *part)
320{
321 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
322
323 if (xpc_part_ref(part)) {
324 xpc_check_for_sent_chctl_flags_sn2(part);
325
326 part_sn2->dropped_notify_IRQ_timer.expires = jiffies +
327 XPC_DROPPED_NOTIFY_IRQ_WAIT_INTERVAL;
328 add_timer(&part_sn2->dropped_notify_IRQ_timer);
329 xpc_part_deref(part);
330 }
331}
332
333/*
334 * Send a notify IRQ to the remote partition that is associated with the
335 * specified channel.
336 */
337static void
338xpc_send_notify_IRQ_sn2(struct xpc_channel *ch, u8 chctl_flag,
339 char *chctl_flag_string, unsigned long *irq_flags)
340{
341 struct xpc_partition *part = &xpc_partitions[ch->partid];
342 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
343 union xpc_channel_ctl_flags chctl = { 0 };
344 enum xp_retval ret;
345
346 if (likely(part->act_state != XPC_P_AS_DEACTIVATING)) {
347 chctl.flags[ch->number] = chctl_flag;
348 ret = xpc_send_IRQ_sn2(part_sn2->remote_chctl_amo_va,
349 chctl.all_flags,
350 part_sn2->notify_IRQ_nasid,
351 part_sn2->notify_IRQ_phys_cpuid,
352 SGI_XPC_NOTIFY);
353 dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n",
354 chctl_flag_string, ch->partid, ch->number, ret);
355 if (unlikely(ret != xpSuccess)) {
356 if (irq_flags != NULL)
357 spin_unlock_irqrestore(&ch->lock, *irq_flags);
358 XPC_DEACTIVATE_PARTITION(part, ret);
359 if (irq_flags != NULL)
360 spin_lock_irqsave(&ch->lock, *irq_flags);
361 }
362 }
363}
364
365#define XPC_SEND_NOTIFY_IRQ_SN2(_ch, _ipi_f, _irq_f) \
366 xpc_send_notify_IRQ_sn2(_ch, _ipi_f, #_ipi_f, _irq_f)
367
368/*
369 * Make it look like the remote partition, which is associated with the
370 * specified channel, sent us a notify IRQ. This faked IRQ will be handled
371 * by xpc_check_for_dropped_notify_IRQ_sn2().
372 */
373static void
374xpc_send_local_notify_IRQ_sn2(struct xpc_channel *ch, u8 chctl_flag,
375 char *chctl_flag_string)
376{
377 struct xpc_partition *part = &xpc_partitions[ch->partid];
378 union xpc_channel_ctl_flags chctl = { 0 };
379
380 chctl.flags[ch->number] = chctl_flag;
381 FETCHOP_STORE_OP(TO_AMO((u64)&part->sn.sn2.local_chctl_amo_va->
382 variable), FETCHOP_OR, chctl.all_flags);
383 dev_dbg(xpc_chan, "%s sent local from partid=%d, channel=%d\n",
384 chctl_flag_string, ch->partid, ch->number);
385}
386
387#define XPC_SEND_LOCAL_NOTIFY_IRQ_SN2(_ch, _ipi_f) \
388 xpc_send_local_notify_IRQ_sn2(_ch, _ipi_f, #_ipi_f)
389
390static void
391xpc_send_chctl_closerequest_sn2(struct xpc_channel *ch,
392 unsigned long *irq_flags)
393{
394 struct xpc_openclose_args *args = ch->sn.sn2.local_openclose_args;
395
396 args->reason = ch->reason;
397 XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_CLOSEREQUEST, irq_flags);
398}
399
400static void
401xpc_send_chctl_closereply_sn2(struct xpc_channel *ch, unsigned long *irq_flags)
402{
403 XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_CLOSEREPLY, irq_flags);
404}
405
406static void
407xpc_send_chctl_openrequest_sn2(struct xpc_channel *ch, unsigned long *irq_flags)
408{
409 struct xpc_openclose_args *args = ch->sn.sn2.local_openclose_args;
410
411 args->entry_size = ch->entry_size;
412 args->local_nentries = ch->local_nentries;
413 XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_OPENREQUEST, irq_flags);
414}
415
416static void
417xpc_send_chctl_openreply_sn2(struct xpc_channel *ch, unsigned long *irq_flags)
418{
419 struct xpc_openclose_args *args = ch->sn.sn2.local_openclose_args;
420
421 args->remote_nentries = ch->remote_nentries;
422 args->local_nentries = ch->local_nentries;
423 args->local_msgqueue_pa = xp_pa(ch->sn.sn2.local_msgqueue);
424 XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_OPENREPLY, irq_flags);
425}
426
427static void
428xpc_send_chctl_msgrequest_sn2(struct xpc_channel *ch)
429{
430 XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_MSGREQUEST, NULL);
431}
432
433static void
434xpc_send_chctl_local_msgrequest_sn2(struct xpc_channel *ch)
435{
436 XPC_SEND_LOCAL_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_MSGREQUEST);
437}
438
439static void
440xpc_save_remote_msgqueue_pa_sn2(struct xpc_channel *ch,
441 unsigned long msgqueue_pa)
442{
443 ch->sn.sn2.remote_msgqueue_pa = msgqueue_pa;
444}
445
446/*
447 * This next set of functions are used to keep track of when a partition is
448 * potentially engaged in accessing memory belonging to another partition.
449 */
450
451static void
452xpc_indicate_partition_engaged_sn2(struct xpc_partition *part)
453{
454 unsigned long irq_flags;
455 struct amo *amo = (struct amo *)__va(part->sn.sn2.remote_amos_page_pa +
456 (XPC_ENGAGED_PARTITIONS_AMO_SN2 *
457 sizeof(struct amo)));
458
459 local_irq_save(irq_flags);
460
461 /* set bit corresponding to our partid in remote partition's amo */
462 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR,
463 BIT(sn_partition_id));
464
465 /*
466 * We must always use the nofault function regardless of whether we
467 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
468 * didn't, we'd never know that the other partition is down and would
469 * keep sending IRQs and amos to it until the heartbeat times out.
470 */
471 (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
472 variable),
473 xp_nofault_PIOR_target));
474
475 local_irq_restore(irq_flags);
476}
477
478static void
479xpc_indicate_partition_disengaged_sn2(struct xpc_partition *part)
480{
481 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
482 unsigned long irq_flags;
483 struct amo *amo = (struct amo *)__va(part_sn2->remote_amos_page_pa +
484 (XPC_ENGAGED_PARTITIONS_AMO_SN2 *
485 sizeof(struct amo)));
486
487 local_irq_save(irq_flags);
488
489 /* clear bit corresponding to our partid in remote partition's amo */
490 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
491 ~BIT(sn_partition_id));
492
493 /*
494 * We must always use the nofault function regardless of whether we
495 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
496 * didn't, we'd never know that the other partition is down and would
497 * keep sending IRQs and amos to it until the heartbeat times out.
498 */
499 (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
500 variable),
501 xp_nofault_PIOR_target));
502
503 local_irq_restore(irq_flags);
504
505 /*
506 * Send activate IRQ to get other side to see that we've cleared our
507 * bit in their engaged partitions amo.
508 */
509 xpc_send_activate_IRQ_sn2(part_sn2->remote_amos_page_pa,
510 cnodeid_to_nasid(0),
511 part_sn2->activate_IRQ_nasid,
512 part_sn2->activate_IRQ_phys_cpuid);
513}
514
515static void
516xpc_assume_partition_disengaged_sn2(short partid)
517{
518 struct amo *amo = xpc_vars_sn2->amos_page +
519 XPC_ENGAGED_PARTITIONS_AMO_SN2;
520
521 /* clear bit(s) based on partid mask in our partition's amo */
522 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
523 ~BIT(partid));
524}
525
526static int
527xpc_partition_engaged_sn2(short partid)
528{
529 struct amo *amo = xpc_vars_sn2->amos_page +
530 XPC_ENGAGED_PARTITIONS_AMO_SN2;
531
532 /* our partition's amo variable ANDed with partid mask */
533 return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) &
534 BIT(partid)) != 0;
535}
536
537static int
538xpc_any_partition_engaged_sn2(void)
539{
540 struct amo *amo = xpc_vars_sn2->amos_page +
541 XPC_ENGAGED_PARTITIONS_AMO_SN2;
542
543 /* our partition's amo variable */
544 return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) != 0;
545}
546
547/* original protection values for each node */
548static u64 xpc_prot_vec_sn2[MAX_NUMNODES];
549
550/*
551 * Change protections to allow amo operations on non-Shub 1.1 systems.
552 */
553static enum xp_retval
554xpc_allow_amo_ops_sn2(struct amo *amos_page)
555{
556 u64 nasid_array = 0;
557 int ret;
558
559 /*
560 * On SHUB 1.1, we cannot call sn_change_memprotect() since the BIST
561 * collides with memory operations. On those systems we call
562 * xpc_allow_amo_ops_shub_wars_1_1_sn2() instead.
563 */
564 if (!enable_shub_wars_1_1()) {
565 ret = sn_change_memprotect(ia64_tpa((u64)amos_page), PAGE_SIZE,
566 SN_MEMPROT_ACCESS_CLASS_1,
567 &nasid_array);
568 if (ret != 0)
569 return xpSalError;
570 }
571 return xpSuccess;
572}
573
574/*
575 * Change protections to allow amo operations on Shub 1.1 systems.
576 */
577static void
578xpc_allow_amo_ops_shub_wars_1_1_sn2(void)
579{
580 int node;
581 int nasid;
582
583 if (!enable_shub_wars_1_1())
584 return;
585
586 for_each_online_node(node) {
587 nasid = cnodeid_to_nasid(node);
588 /* save current protection values */
589 xpc_prot_vec_sn2[node] =
590 (u64)HUB_L((u64 *)GLOBAL_MMR_ADDR(nasid,
591 SH1_MD_DQLP_MMR_DIR_PRIVEC0));
592 /* open up everything */
593 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid,
594 SH1_MD_DQLP_MMR_DIR_PRIVEC0),
595 -1UL);
596 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid,
597 SH1_MD_DQRP_MMR_DIR_PRIVEC0),
598 -1UL);
599 }
600}
601
602static enum xp_retval
603xpc_get_partition_rsvd_page_pa_sn2(void *buf, u64 *cookie, unsigned long *rp_pa,
604 size_t *len)
605{
606 s64 status;
607 enum xp_retval ret;
608
609 status = sn_partition_reserved_page_pa((u64)buf, cookie, rp_pa, len);
610 if (status == SALRET_OK)
611 ret = xpSuccess;
612 else if (status == SALRET_MORE_PASSES)
613 ret = xpNeedMoreInfo;
614 else
615 ret = xpSalError;
616
617 return ret;
618}
619
620
621static int
622xpc_setup_rsvd_page_sn_sn2(struct xpc_rsvd_page *rp)
623{
624 struct amo *amos_page;
625 int i;
626 int ret;
627
628 xpc_vars_sn2 = XPC_RP_VARS(rp);
629
630 rp->sn.vars_pa = xp_pa(xpc_vars_sn2);
631
632 /* vars_part array follows immediately after vars */
633 xpc_vars_part_sn2 = (struct xpc_vars_part_sn2 *)((u8 *)XPC_RP_VARS(rp) +
634 XPC_RP_VARS_SIZE);
635
636 /*
637 * Before clearing xpc_vars_sn2, see if a page of amos had been
638 * previously allocated. If not we'll need to allocate one and set
639 * permissions so that cross-partition amos are allowed.
640 *
641 * The allocated amo page needs MCA reporting to remain disabled after
642 * XPC has unloaded. To make this work, we keep a copy of the pointer
643 * to this page (i.e., amos_page) in the struct xpc_vars_sn2 structure,
644 * which is pointed to by the reserved page, and re-use that saved copy
645 * on subsequent loads of XPC. This amo page is never freed, and its
646 * memory protections are never restricted.
647 */
648 amos_page = xpc_vars_sn2->amos_page;
649 if (amos_page == NULL) {
650 amos_page = (struct amo *)TO_AMO(uncached_alloc_page(0, 1));
651 if (amos_page == NULL) {
652 dev_err(xpc_part, "can't allocate page of amos\n");
653 return -ENOMEM;
654 }
655
656 /*
657 * Open up amo-R/W to cpu. This is done on Shub 1.1 systems
658 * when xpc_allow_amo_ops_shub_wars_1_1_sn2() is called.
659 */
660 ret = xpc_allow_amo_ops_sn2(amos_page);
661 if (ret != xpSuccess) {
662 dev_err(xpc_part, "can't allow amo operations\n");
663 uncached_free_page(__IA64_UNCACHED_OFFSET |
664 TO_PHYS((u64)amos_page), 1);
665 return -EPERM;
666 }
667 }
668
669 /* clear xpc_vars_sn2 */
670 memset(xpc_vars_sn2, 0, sizeof(struct xpc_vars_sn2));
671
672 xpc_vars_sn2->version = XPC_V_VERSION;
673 xpc_vars_sn2->activate_IRQ_nasid = cpuid_to_nasid(0);
674 xpc_vars_sn2->activate_IRQ_phys_cpuid = cpu_physical_id(0);
675 xpc_vars_sn2->vars_part_pa = xp_pa(xpc_vars_part_sn2);
676 xpc_vars_sn2->amos_page_pa = ia64_tpa((u64)amos_page);
677 xpc_vars_sn2->amos_page = amos_page; /* save for next load of XPC */
678
679 /* clear xpc_vars_part_sn2 */
680 memset((u64 *)xpc_vars_part_sn2, 0, sizeof(struct xpc_vars_part_sn2) *
681 XP_MAX_NPARTITIONS_SN2);
682
683 /* initialize the activate IRQ related amo variables */
684 for (i = 0; i < xpc_nasid_mask_nlongs; i++)
685 (void)xpc_init_IRQ_amo_sn2(XPC_ACTIVATE_IRQ_AMOS_SN2 + i);
686
687 /* initialize the engaged remote partitions related amo variables */
688 (void)xpc_init_IRQ_amo_sn2(XPC_ENGAGED_PARTITIONS_AMO_SN2);
689 (void)xpc_init_IRQ_amo_sn2(XPC_DEACTIVATE_REQUEST_AMO_SN2);
690
691 return 0;
692}
693
694static void
695xpc_increment_heartbeat_sn2(void)
696{
697 xpc_vars_sn2->heartbeat++;
698}
699
700static void
701xpc_offline_heartbeat_sn2(void)
702{
703 xpc_increment_heartbeat_sn2();
704 xpc_vars_sn2->heartbeat_offline = 1;
705}
706
707static void
708xpc_online_heartbeat_sn2(void)
709{
710 xpc_increment_heartbeat_sn2();
711 xpc_vars_sn2->heartbeat_offline = 0;
712}
713
714static void
715xpc_heartbeat_init_sn2(void)
716{
717 DBUG_ON(xpc_vars_sn2 == NULL);
718
719 bitmap_zero(xpc_vars_sn2->heartbeating_to_mask, XP_MAX_NPARTITIONS_SN2);
720 xpc_heartbeating_to_mask = &xpc_vars_sn2->heartbeating_to_mask[0];
721 xpc_online_heartbeat_sn2();
722}
723
724static void
725xpc_heartbeat_exit_sn2(void)
726{
727 xpc_offline_heartbeat_sn2();
728}
729
730static enum xp_retval
731xpc_get_remote_heartbeat_sn2(struct xpc_partition *part)
732{
733 struct xpc_vars_sn2 *remote_vars;
734 enum xp_retval ret;
735
736 remote_vars = (struct xpc_vars_sn2 *)xpc_remote_copy_buffer_sn2;
737
738 /* pull the remote vars structure that contains the heartbeat */
739 ret = xp_remote_memcpy(xp_pa(remote_vars),
740 part->sn.sn2.remote_vars_pa,
741 XPC_RP_VARS_SIZE);
742 if (ret != xpSuccess)
743 return ret;
744
745 dev_dbg(xpc_part, "partid=%d, heartbeat=%ld, last_heartbeat=%ld, "
746 "heartbeat_offline=%ld, HB_mask[0]=0x%lx\n", XPC_PARTID(part),
747 remote_vars->heartbeat, part->last_heartbeat,
748 remote_vars->heartbeat_offline,
749 remote_vars->heartbeating_to_mask[0]);
750
751 if ((remote_vars->heartbeat == part->last_heartbeat &&
752 remote_vars->heartbeat_offline == 0) ||
753 !xpc_hb_allowed(sn_partition_id,
754 &remote_vars->heartbeating_to_mask)) {
755 ret = xpNoHeartbeat;
756 } else {
757 part->last_heartbeat = remote_vars->heartbeat;
758 }
759
760 return ret;
761}
762
763/*
764 * Get a copy of the remote partition's XPC variables from the reserved page.
765 *
766 * remote_vars points to a buffer that is cacheline aligned for BTE copies and
767 * assumed to be of size XPC_RP_VARS_SIZE.
768 */
769static enum xp_retval
770xpc_get_remote_vars_sn2(unsigned long remote_vars_pa,
771 struct xpc_vars_sn2 *remote_vars)
772{
773 enum xp_retval ret;
774
775 if (remote_vars_pa == 0)
776 return xpVarsNotSet;
777
778 /* pull over the cross partition variables */
779 ret = xp_remote_memcpy(xp_pa(remote_vars), remote_vars_pa,
780 XPC_RP_VARS_SIZE);
781 if (ret != xpSuccess)
782 return ret;
783
784 if (XPC_VERSION_MAJOR(remote_vars->version) !=
785 XPC_VERSION_MAJOR(XPC_V_VERSION)) {
786 return xpBadVersion;
787 }
788
789 return xpSuccess;
790}
791
792static void
793xpc_request_partition_activation_sn2(struct xpc_rsvd_page *remote_rp,
794 unsigned long remote_rp_pa, int nasid)
795{
796 xpc_send_local_activate_IRQ_sn2(nasid);
797}
798
799static void
800xpc_request_partition_reactivation_sn2(struct xpc_partition *part)
801{
802 xpc_send_local_activate_IRQ_sn2(part->sn.sn2.activate_IRQ_nasid);
803}
804
805static void
806xpc_request_partition_deactivation_sn2(struct xpc_partition *part)
807{
808 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
809 unsigned long irq_flags;
810 struct amo *amo = (struct amo *)__va(part_sn2->remote_amos_page_pa +
811 (XPC_DEACTIVATE_REQUEST_AMO_SN2 *
812 sizeof(struct amo)));
813
814 local_irq_save(irq_flags);
815
816 /* set bit corresponding to our partid in remote partition's amo */
817 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR,
818 BIT(sn_partition_id));
819
820 /*
821 * We must always use the nofault function regardless of whether we
822 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
823 * didn't, we'd never know that the other partition is down and would
824 * keep sending IRQs and amos to it until the heartbeat times out.
825 */
826 (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
827 variable),
828 xp_nofault_PIOR_target));
829
830 local_irq_restore(irq_flags);
831
832 /*
833 * Send activate IRQ to get other side to see that we've set our
834 * bit in their deactivate request amo.
835 */
836 xpc_send_activate_IRQ_sn2(part_sn2->remote_amos_page_pa,
837 cnodeid_to_nasid(0),
838 part_sn2->activate_IRQ_nasid,
839 part_sn2->activate_IRQ_phys_cpuid);
840}
841
842static void
843xpc_cancel_partition_deactivation_request_sn2(struct xpc_partition *part)
844{
845 unsigned long irq_flags;
846 struct amo *amo = (struct amo *)__va(part->sn.sn2.remote_amos_page_pa +
847 (XPC_DEACTIVATE_REQUEST_AMO_SN2 *
848 sizeof(struct amo)));
849
850 local_irq_save(irq_flags);
851
852 /* clear bit corresponding to our partid in remote partition's amo */
853 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
854 ~BIT(sn_partition_id));
855
856 /*
857 * We must always use the nofault function regardless of whether we
858 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
859 * didn't, we'd never know that the other partition is down and would
860 * keep sending IRQs and amos to it until the heartbeat times out.
861 */
862 (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
863 variable),
864 xp_nofault_PIOR_target));
865
866 local_irq_restore(irq_flags);
867}
868
869static int
870xpc_partition_deactivation_requested_sn2(short partid)
871{
872 struct amo *amo = xpc_vars_sn2->amos_page +
873 XPC_DEACTIVATE_REQUEST_AMO_SN2;
874
875 /* our partition's amo variable ANDed with partid mask */
876 return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) &
877 BIT(partid)) != 0;
878}
879
880/*
881 * Update the remote partition's info.
882 */
883static void
884xpc_update_partition_info_sn2(struct xpc_partition *part, u8 remote_rp_version,
885 unsigned long *remote_rp_ts_jiffies,
886 unsigned long remote_rp_pa,
887 unsigned long remote_vars_pa,
888 struct xpc_vars_sn2 *remote_vars)
889{
890 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
891
892 part->remote_rp_version = remote_rp_version;
893 dev_dbg(xpc_part, " remote_rp_version = 0x%016x\n",
894 part->remote_rp_version);
895
896 part->remote_rp_ts_jiffies = *remote_rp_ts_jiffies;
897 dev_dbg(xpc_part, " remote_rp_ts_jiffies = 0x%016lx\n",
898 part->remote_rp_ts_jiffies);
899
900 part->remote_rp_pa = remote_rp_pa;
901 dev_dbg(xpc_part, " remote_rp_pa = 0x%016lx\n", part->remote_rp_pa);
902
903 part_sn2->remote_vars_pa = remote_vars_pa;
904 dev_dbg(xpc_part, " remote_vars_pa = 0x%016lx\n",
905 part_sn2->remote_vars_pa);
906
907 part->last_heartbeat = remote_vars->heartbeat;
908 dev_dbg(xpc_part, " last_heartbeat = 0x%016lx\n",
909 part->last_heartbeat);
910
911 part_sn2->remote_vars_part_pa = remote_vars->vars_part_pa;
912 dev_dbg(xpc_part, " remote_vars_part_pa = 0x%016lx\n",
913 part_sn2->remote_vars_part_pa);
914
915 part_sn2->activate_IRQ_nasid = remote_vars->activate_IRQ_nasid;
916 dev_dbg(xpc_part, " activate_IRQ_nasid = 0x%x\n",
917 part_sn2->activate_IRQ_nasid);
918
919 part_sn2->activate_IRQ_phys_cpuid =
920 remote_vars->activate_IRQ_phys_cpuid;
921 dev_dbg(xpc_part, " activate_IRQ_phys_cpuid = 0x%x\n",
922 part_sn2->activate_IRQ_phys_cpuid);
923
924 part_sn2->remote_amos_page_pa = remote_vars->amos_page_pa;
925 dev_dbg(xpc_part, " remote_amos_page_pa = 0x%lx\n",
926 part_sn2->remote_amos_page_pa);
927
928 part_sn2->remote_vars_version = remote_vars->version;
929 dev_dbg(xpc_part, " remote_vars_version = 0x%x\n",
930 part_sn2->remote_vars_version);
931}
932
933/*
934 * Prior code has determined the nasid which generated a activate IRQ.
935 * Inspect that nasid to determine if its partition needs to be activated
936 * or deactivated.
937 *
938 * A partition is considered "awaiting activation" if our partition
939 * flags indicate it is not active and it has a heartbeat. A
940 * partition is considered "awaiting deactivation" if our partition
941 * flags indicate it is active but it has no heartbeat or it is not
942 * sending its heartbeat to us.
943 *
944 * To determine the heartbeat, the remote nasid must have a properly
945 * initialized reserved page.
946 */
947static void
948xpc_identify_activate_IRQ_req_sn2(int nasid)
949{
950 struct xpc_rsvd_page *remote_rp;
951 struct xpc_vars_sn2 *remote_vars;
952 unsigned long remote_rp_pa;
953 unsigned long remote_vars_pa;
954 int remote_rp_version;
955 int reactivate = 0;
956 unsigned long remote_rp_ts_jiffies = 0;
957 short partid;
958 struct xpc_partition *part;
959 struct xpc_partition_sn2 *part_sn2;
960 enum xp_retval ret;
961
962 /* pull over the reserved page structure */
963
964 remote_rp = (struct xpc_rsvd_page *)xpc_remote_copy_buffer_sn2;
965
966 ret = xpc_get_remote_rp(nasid, NULL, remote_rp, &remote_rp_pa);
967 if (ret != xpSuccess) {
968 dev_warn(xpc_part, "unable to get reserved page from nasid %d, "
969 "which sent interrupt, reason=%d\n", nasid, ret);
970 return;
971 }
972
973 remote_vars_pa = remote_rp->sn.vars_pa;
974 remote_rp_version = remote_rp->version;
975 remote_rp_ts_jiffies = remote_rp->ts_jiffies;
976
977 partid = remote_rp->SAL_partid;
978 part = &xpc_partitions[partid];
979 part_sn2 = &part->sn.sn2;
980
981 /* pull over the cross partition variables */
982
983 remote_vars = (struct xpc_vars_sn2 *)xpc_remote_copy_buffer_sn2;
984
985 ret = xpc_get_remote_vars_sn2(remote_vars_pa, remote_vars);
986 if (ret != xpSuccess) {
987 dev_warn(xpc_part, "unable to get XPC variables from nasid %d, "
988 "which sent interrupt, reason=%d\n", nasid, ret);
989
990 XPC_DEACTIVATE_PARTITION(part, ret);
991 return;
992 }
993
994 part->activate_IRQ_rcvd++;
995
996 dev_dbg(xpc_part, "partid for nasid %d is %d; IRQs = %d; HB = "
997 "%ld:0x%lx\n", (int)nasid, (int)partid, part->activate_IRQ_rcvd,
998 remote_vars->heartbeat, remote_vars->heartbeating_to_mask[0]);
999
1000 if (xpc_partition_disengaged(part) &&
1001 part->act_state == XPC_P_AS_INACTIVE) {
1002
1003 xpc_update_partition_info_sn2(part, remote_rp_version,
1004 &remote_rp_ts_jiffies,
1005 remote_rp_pa, remote_vars_pa,
1006 remote_vars);
1007
1008 if (xpc_partition_deactivation_requested_sn2(partid)) {
1009 /*
1010 * Other side is waiting on us to deactivate even though
1011 * we already have.
1012 */
1013 return;
1014 }
1015
1016 xpc_activate_partition(part);
1017 return;
1018 }
1019
1020 DBUG_ON(part->remote_rp_version == 0);
1021 DBUG_ON(part_sn2->remote_vars_version == 0);
1022
1023 if (remote_rp_ts_jiffies != part->remote_rp_ts_jiffies) {
1024
1025 /* the other side rebooted */
1026
1027 DBUG_ON(xpc_partition_engaged_sn2(partid));
1028 DBUG_ON(xpc_partition_deactivation_requested_sn2(partid));
1029
1030 xpc_update_partition_info_sn2(part, remote_rp_version,
1031 &remote_rp_ts_jiffies,
1032 remote_rp_pa, remote_vars_pa,
1033 remote_vars);
1034 reactivate = 1;
1035 }
1036
1037 if (part->disengage_timeout > 0 && !xpc_partition_disengaged(part)) {
1038 /* still waiting on other side to disengage from us */
1039 return;
1040 }
1041
1042 if (reactivate)
1043 XPC_DEACTIVATE_PARTITION(part, xpReactivating);
1044 else if (xpc_partition_deactivation_requested_sn2(partid))
1045 XPC_DEACTIVATE_PARTITION(part, xpOtherGoingDown);
1046}
1047
1048/*
1049 * Loop through the activation amo variables and process any bits
1050 * which are set. Each bit indicates a nasid sending a partition
1051 * activation or deactivation request.
1052 *
1053 * Return #of IRQs detected.
1054 */
1055int
1056xpc_identify_activate_IRQ_sender_sn2(void)
1057{
1058 int l;
1059 int b;
1060 unsigned long nasid_mask_long;
1061 u64 nasid; /* remote nasid */
1062 int n_IRQs_detected = 0;
1063 struct amo *act_amos;
1064
1065 act_amos = xpc_vars_sn2->amos_page + XPC_ACTIVATE_IRQ_AMOS_SN2;
1066
1067 /* scan through activate amo variables looking for non-zero entries */
1068 for (l = 0; l < xpc_nasid_mask_nlongs; l++) {
1069
1070 if (xpc_exiting)
1071 break;
1072
1073 nasid_mask_long = xpc_receive_IRQ_amo_sn2(&act_amos[l]);
1074
1075 b = find_first_bit(&nasid_mask_long, BITS_PER_LONG);
1076 if (b >= BITS_PER_LONG) {
1077 /* no IRQs from nasids in this amo variable */
1078 continue;
1079 }
1080
1081 dev_dbg(xpc_part, "amo[%d] gave back 0x%lx\n", l,
1082 nasid_mask_long);
1083
1084 /*
1085 * If this nasid has been added to the machine since
1086 * our partition was reset, this will retain the
1087 * remote nasid in our reserved pages machine mask.
1088 * This is used in the event of module reload.
1089 */
1090 xpc_mach_nasids[l] |= nasid_mask_long;
1091
1092 /* locate the nasid(s) which sent interrupts */
1093
1094 do {
1095 n_IRQs_detected++;
1096 nasid = (l * BITS_PER_LONG + b) * 2;
1097 dev_dbg(xpc_part, "interrupt from nasid %ld\n", nasid);
1098 xpc_identify_activate_IRQ_req_sn2(nasid);
1099
1100 b = find_next_bit(&nasid_mask_long, BITS_PER_LONG,
1101 b + 1);
1102 } while (b < BITS_PER_LONG);
1103 }
1104 return n_IRQs_detected;
1105}
1106
1107static void
1108xpc_process_activate_IRQ_rcvd_sn2(void)
1109{
1110 unsigned long irq_flags;
1111 int n_IRQs_expected;
1112 int n_IRQs_detected;
1113
1114 DBUG_ON(xpc_activate_IRQ_rcvd == 0);
1115
1116 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
1117 n_IRQs_expected = xpc_activate_IRQ_rcvd;
1118 xpc_activate_IRQ_rcvd = 0;
1119 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
1120
1121 n_IRQs_detected = xpc_identify_activate_IRQ_sender_sn2();
1122 if (n_IRQs_detected < n_IRQs_expected) {
1123 /* retry once to help avoid missing amo */
1124 (void)xpc_identify_activate_IRQ_sender_sn2();
1125 }
1126}
1127
1128/*
1129 * Setup the channel structures that are sn2 specific.
1130 */
1131static enum xp_retval
1132xpc_setup_ch_structures_sn_sn2(struct xpc_partition *part)
1133{
1134 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
1135 struct xpc_channel_sn2 *ch_sn2;
1136 enum xp_retval retval;
1137 int ret;
1138 int cpuid;
1139 int ch_number;
1140 struct timer_list *timer;
1141 short partid = XPC_PARTID(part);
1142
1143 /* allocate all the required GET/PUT values */
1144
1145 part_sn2->local_GPs =
1146 xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE, GFP_KERNEL,
1147 &part_sn2->local_GPs_base);
1148 if (part_sn2->local_GPs == NULL) {
1149 dev_err(xpc_chan, "can't get memory for local get/put "
1150 "values\n");
1151 return xpNoMemory;
1152 }
1153
1154 part_sn2->remote_GPs =
1155 xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE, GFP_KERNEL,
1156 &part_sn2->remote_GPs_base);
1157 if (part_sn2->remote_GPs == NULL) {
1158 dev_err(xpc_chan, "can't get memory for remote get/put "
1159 "values\n");
1160 retval = xpNoMemory;
1161 goto out_1;
1162 }
1163
1164 part_sn2->remote_GPs_pa = 0;
1165
1166 /* allocate all the required open and close args */
1167
1168 part_sn2->local_openclose_args =
1169 xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE,
1170 GFP_KERNEL, &part_sn2->
1171 local_openclose_args_base);
1172 if (part_sn2->local_openclose_args == NULL) {
1173 dev_err(xpc_chan, "can't get memory for local connect args\n");
1174 retval = xpNoMemory;
1175 goto out_2;
1176 }
1177
1178 part_sn2->remote_openclose_args_pa = 0;
1179
1180 part_sn2->local_chctl_amo_va = xpc_init_IRQ_amo_sn2(partid);
1181
1182 part_sn2->notify_IRQ_nasid = 0;
1183 part_sn2->notify_IRQ_phys_cpuid = 0;
1184 part_sn2->remote_chctl_amo_va = NULL;
1185
1186 sprintf(part_sn2->notify_IRQ_owner, "xpc%02d", partid);
1187 ret = request_irq(SGI_XPC_NOTIFY, xpc_handle_notify_IRQ_sn2,
1188 IRQF_SHARED, part_sn2->notify_IRQ_owner,
1189 (void *)(u64)partid);
1190 if (ret != 0) {
1191 dev_err(xpc_chan, "can't register NOTIFY IRQ handler, "
1192 "errno=%d\n", -ret);
1193 retval = xpLackOfResources;
1194 goto out_3;
1195 }
1196
1197 /* Setup a timer to check for dropped notify IRQs */
1198 timer = &part_sn2->dropped_notify_IRQ_timer;
1199 init_timer(timer);
1200 timer->function =
1201 (void (*)(unsigned long))xpc_check_for_dropped_notify_IRQ_sn2;
1202 timer->data = (unsigned long)part;
1203 timer->expires = jiffies + XPC_DROPPED_NOTIFY_IRQ_WAIT_INTERVAL;
1204 add_timer(timer);
1205
1206 for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
1207 ch_sn2 = &part->channels[ch_number].sn.sn2;
1208
1209 ch_sn2->local_GP = &part_sn2->local_GPs[ch_number];
1210 ch_sn2->local_openclose_args =
1211 &part_sn2->local_openclose_args[ch_number];
1212
1213 mutex_init(&ch_sn2->msg_to_pull_mutex);
1214 }
1215
1216 /*
1217 * Setup the per partition specific variables required by the
1218 * remote partition to establish channel connections with us.
1219 *
1220 * The setting of the magic # indicates that these per partition
1221 * specific variables are ready to be used.
1222 */
1223 xpc_vars_part_sn2[partid].GPs_pa = xp_pa(part_sn2->local_GPs);
1224 xpc_vars_part_sn2[partid].openclose_args_pa =
1225 xp_pa(part_sn2->local_openclose_args);
1226 xpc_vars_part_sn2[partid].chctl_amo_pa =
1227 xp_pa(part_sn2->local_chctl_amo_va);
1228 cpuid = raw_smp_processor_id(); /* any CPU in this partition will do */
1229 xpc_vars_part_sn2[partid].notify_IRQ_nasid = cpuid_to_nasid(cpuid);
1230 xpc_vars_part_sn2[partid].notify_IRQ_phys_cpuid =
1231 cpu_physical_id(cpuid);
1232 xpc_vars_part_sn2[partid].nchannels = part->nchannels;
1233 xpc_vars_part_sn2[partid].magic = XPC_VP_MAGIC1_SN2;
1234
1235 return xpSuccess;
1236
1237 /* setup of ch structures failed */
1238out_3:
1239 kfree(part_sn2->local_openclose_args_base);
1240 part_sn2->local_openclose_args = NULL;
1241out_2:
1242 kfree(part_sn2->remote_GPs_base);
1243 part_sn2->remote_GPs = NULL;
1244out_1:
1245 kfree(part_sn2->local_GPs_base);
1246 part_sn2->local_GPs = NULL;
1247 return retval;
1248}
1249
1250/*
1251 * Teardown the channel structures that are sn2 specific.
1252 */
1253static void
1254xpc_teardown_ch_structures_sn_sn2(struct xpc_partition *part)
1255{
1256 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
1257 short partid = XPC_PARTID(part);
1258
1259 /*
1260 * Indicate that the variables specific to the remote partition are no
1261 * longer available for its use.
1262 */
1263 xpc_vars_part_sn2[partid].magic = 0;
1264
1265 /* in case we've still got outstanding timers registered... */
1266 del_timer_sync(&part_sn2->dropped_notify_IRQ_timer);
1267 free_irq(SGI_XPC_NOTIFY, (void *)(u64)partid);
1268
1269 kfree(part_sn2->local_openclose_args_base);
1270 part_sn2->local_openclose_args = NULL;
1271 kfree(part_sn2->remote_GPs_base);
1272 part_sn2->remote_GPs = NULL;
1273 kfree(part_sn2->local_GPs_base);
1274 part_sn2->local_GPs = NULL;
1275 part_sn2->local_chctl_amo_va = NULL;
1276}
1277
1278/*
1279 * Create a wrapper that hides the underlying mechanism for pulling a cacheline
1280 * (or multiple cachelines) from a remote partition.
1281 *
1282 * src_pa must be a cacheline aligned physical address on the remote partition.
1283 * dst must be a cacheline aligned virtual address on this partition.
1284 * cnt must be cacheline sized
1285 */
1286/* ??? Replace this function by call to xp_remote_memcpy() or bte_copy()? */
1287static enum xp_retval
1288xpc_pull_remote_cachelines_sn2(struct xpc_partition *part, void *dst,
1289 const unsigned long src_pa, size_t cnt)
1290{
1291 enum xp_retval ret;
1292
1293 DBUG_ON(src_pa != L1_CACHE_ALIGN(src_pa));
1294 DBUG_ON((unsigned long)dst != L1_CACHE_ALIGN((unsigned long)dst));
1295 DBUG_ON(cnt != L1_CACHE_ALIGN(cnt));
1296
1297 if (part->act_state == XPC_P_AS_DEACTIVATING)
1298 return part->reason;
1299
1300 ret = xp_remote_memcpy(xp_pa(dst), src_pa, cnt);
1301 if (ret != xpSuccess) {
1302 dev_dbg(xpc_chan, "xp_remote_memcpy() from partition %d failed,"
1303 " ret=%d\n", XPC_PARTID(part), ret);
1304 }
1305 return ret;
1306}
1307
1308/*
1309 * Pull the remote per partition specific variables from the specified
1310 * partition.
1311 */
1312static enum xp_retval
1313xpc_pull_remote_vars_part_sn2(struct xpc_partition *part)
1314{
1315 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
1316 u8 buffer[L1_CACHE_BYTES * 2];
1317 struct xpc_vars_part_sn2 *pulled_entry_cacheline =
1318 (struct xpc_vars_part_sn2 *)L1_CACHE_ALIGN((u64)buffer);
1319 struct xpc_vars_part_sn2 *pulled_entry;
1320 unsigned long remote_entry_cacheline_pa;
1321 unsigned long remote_entry_pa;
1322 short partid = XPC_PARTID(part);
1323 enum xp_retval ret;
1324
1325 /* pull the cacheline that contains the variables we're interested in */
1326
1327 DBUG_ON(part_sn2->remote_vars_part_pa !=
1328 L1_CACHE_ALIGN(part_sn2->remote_vars_part_pa));
1329 DBUG_ON(sizeof(struct xpc_vars_part_sn2) != L1_CACHE_BYTES / 2);
1330
1331 remote_entry_pa = part_sn2->remote_vars_part_pa +
1332 sn_partition_id * sizeof(struct xpc_vars_part_sn2);
1333
1334 remote_entry_cacheline_pa = (remote_entry_pa & ~(L1_CACHE_BYTES - 1));
1335
1336 pulled_entry = (struct xpc_vars_part_sn2 *)((u64)pulled_entry_cacheline
1337 + (remote_entry_pa &
1338 (L1_CACHE_BYTES - 1)));
1339
1340 ret = xpc_pull_remote_cachelines_sn2(part, pulled_entry_cacheline,
1341 remote_entry_cacheline_pa,
1342 L1_CACHE_BYTES);
1343 if (ret != xpSuccess) {
1344 dev_dbg(xpc_chan, "failed to pull XPC vars_part from "
1345 "partition %d, ret=%d\n", partid, ret);
1346 return ret;
1347 }
1348
1349 /* see if they've been set up yet */
1350
1351 if (pulled_entry->magic != XPC_VP_MAGIC1_SN2 &&
1352 pulled_entry->magic != XPC_VP_MAGIC2_SN2) {
1353
1354 if (pulled_entry->magic != 0) {
1355 dev_dbg(xpc_chan, "partition %d's XPC vars_part for "
1356 "partition %d has bad magic value (=0x%lx)\n",
1357 partid, sn_partition_id, pulled_entry->magic);
1358 return xpBadMagic;
1359 }
1360
1361 /* they've not been initialized yet */
1362 return xpRetry;
1363 }
1364
1365 if (xpc_vars_part_sn2[partid].magic == XPC_VP_MAGIC1_SN2) {
1366
1367 /* validate the variables */
1368
1369 if (pulled_entry->GPs_pa == 0 ||
1370 pulled_entry->openclose_args_pa == 0 ||
1371 pulled_entry->chctl_amo_pa == 0) {
1372
1373 dev_err(xpc_chan, "partition %d's XPC vars_part for "
1374 "partition %d are not valid\n", partid,
1375 sn_partition_id);
1376 return xpInvalidAddress;
1377 }
1378
1379 /* the variables we imported look to be valid */
1380
1381 part_sn2->remote_GPs_pa = pulled_entry->GPs_pa;
1382 part_sn2->remote_openclose_args_pa =
1383 pulled_entry->openclose_args_pa;
1384 part_sn2->remote_chctl_amo_va =
1385 (struct amo *)__va(pulled_entry->chctl_amo_pa);
1386 part_sn2->notify_IRQ_nasid = pulled_entry->notify_IRQ_nasid;
1387 part_sn2->notify_IRQ_phys_cpuid =
1388 pulled_entry->notify_IRQ_phys_cpuid;
1389
1390 if (part->nchannels > pulled_entry->nchannels)
1391 part->nchannels = pulled_entry->nchannels;
1392
1393 /* let the other side know that we've pulled their variables */
1394
1395 xpc_vars_part_sn2[partid].magic = XPC_VP_MAGIC2_SN2;
1396 }
1397
1398 if (pulled_entry->magic == XPC_VP_MAGIC1_SN2)
1399 return xpRetry;
1400
1401 return xpSuccess;
1402}
1403
1404/*
1405 * Establish first contact with the remote partititon. This involves pulling
1406 * the XPC per partition variables from the remote partition and waiting for
1407 * the remote partition to pull ours.
1408 */
1409static enum xp_retval
1410xpc_make_first_contact_sn2(struct xpc_partition *part)
1411{
1412 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
1413 enum xp_retval ret;
1414
1415 /*
1416 * Register the remote partition's amos with SAL so it can handle
1417 * and cleanup errors within that address range should the remote
1418 * partition go down. We don't unregister this range because it is
1419 * difficult to tell when outstanding writes to the remote partition
1420 * are finished and thus when it is safe to unregister. This should
1421 * not result in wasted space in the SAL xp_addr_region table because
1422 * we should get the same page for remote_amos_page_pa after module
1423 * reloads and system reboots.
1424 */
1425 if (sn_register_xp_addr_region(part_sn2->remote_amos_page_pa,
1426 PAGE_SIZE, 1) < 0) {
1427 dev_warn(xpc_part, "xpc_activating(%d) failed to register "
1428 "xp_addr region\n", XPC_PARTID(part));
1429
1430 ret = xpPhysAddrRegFailed;
1431 XPC_DEACTIVATE_PARTITION(part, ret);
1432 return ret;
1433 }
1434
1435 /*
1436 * Send activate IRQ to get other side to activate if they've not
1437 * already begun to do so.
1438 */
1439 xpc_send_activate_IRQ_sn2(part_sn2->remote_amos_page_pa,
1440 cnodeid_to_nasid(0),
1441 part_sn2->activate_IRQ_nasid,
1442 part_sn2->activate_IRQ_phys_cpuid);
1443
1444 while ((ret = xpc_pull_remote_vars_part_sn2(part)) != xpSuccess) {
1445 if (ret != xpRetry) {
1446 XPC_DEACTIVATE_PARTITION(part, ret);
1447 return ret;
1448 }
1449
1450 dev_dbg(xpc_part, "waiting to make first contact with "
1451 "partition %d\n", XPC_PARTID(part));
1452
1453 /* wait a 1/4 of a second or so */
1454 (void)msleep_interruptible(250);
1455
1456 if (part->act_state == XPC_P_AS_DEACTIVATING)
1457 return part->reason;
1458 }
1459
1460 return xpSuccess;
1461}
1462
1463/*
1464 * Get the chctl flags and pull the openclose args and/or remote GPs as needed.
1465 */
1466static u64
1467xpc_get_chctl_all_flags_sn2(struct xpc_partition *part)
1468{
1469 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
1470 unsigned long irq_flags;
1471 union xpc_channel_ctl_flags chctl;
1472 enum xp_retval ret;
1473
1474 /*
1475 * See if there are any chctl flags to be handled.
1476 */
1477
1478 spin_lock_irqsave(&part->chctl_lock, irq_flags);
1479 chctl = part->chctl;
1480 if (chctl.all_flags != 0)
1481 part->chctl.all_flags = 0;
1482
1483 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
1484
1485 if (xpc_any_openclose_chctl_flags_set(&chctl)) {
1486 ret = xpc_pull_remote_cachelines_sn2(part, part->
1487 remote_openclose_args,
1488 part_sn2->
1489 remote_openclose_args_pa,
1490 XPC_OPENCLOSE_ARGS_SIZE);
1491 if (ret != xpSuccess) {
1492 XPC_DEACTIVATE_PARTITION(part, ret);
1493
1494 dev_dbg(xpc_chan, "failed to pull openclose args from "
1495 "partition %d, ret=%d\n", XPC_PARTID(part),
1496 ret);
1497
1498 /* don't bother processing chctl flags anymore */
1499 chctl.all_flags = 0;
1500 }
1501 }
1502
1503 if (xpc_any_msg_chctl_flags_set(&chctl)) {
1504 ret = xpc_pull_remote_cachelines_sn2(part, part_sn2->remote_GPs,
1505 part_sn2->remote_GPs_pa,
1506 XPC_GP_SIZE);
1507 if (ret != xpSuccess) {
1508 XPC_DEACTIVATE_PARTITION(part, ret);
1509
1510 dev_dbg(xpc_chan, "failed to pull GPs from partition "
1511 "%d, ret=%d\n", XPC_PARTID(part), ret);
1512
1513 /* don't bother processing chctl flags anymore */
1514 chctl.all_flags = 0;
1515 }
1516 }
1517
1518 return chctl.all_flags;
1519}
1520
1521/*
1522 * Allocate the local message queue and the notify queue.
1523 */
1524static enum xp_retval
1525xpc_allocate_local_msgqueue_sn2(struct xpc_channel *ch)
1526{
1527 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
1528 unsigned long irq_flags;
1529 int nentries;
1530 size_t nbytes;
1531
1532 for (nentries = ch->local_nentries; nentries > 0; nentries--) {
1533
1534 nbytes = nentries * ch->entry_size;
1535 ch_sn2->local_msgqueue =
1536 xpc_kzalloc_cacheline_aligned(nbytes, GFP_KERNEL,
1537 &ch_sn2->local_msgqueue_base);
1538 if (ch_sn2->local_msgqueue == NULL)
1539 continue;
1540
1541 nbytes = nentries * sizeof(struct xpc_notify_sn2);
1542 ch_sn2->notify_queue = kzalloc(nbytes, GFP_KERNEL);
1543 if (ch_sn2->notify_queue == NULL) {
1544 kfree(ch_sn2->local_msgqueue_base);
1545 ch_sn2->local_msgqueue = NULL;
1546 continue;
1547 }
1548
1549 spin_lock_irqsave(&ch->lock, irq_flags);
1550 if (nentries < ch->local_nentries) {
1551 dev_dbg(xpc_chan, "nentries=%d local_nentries=%d, "
1552 "partid=%d, channel=%d\n", nentries,
1553 ch->local_nentries, ch->partid, ch->number);
1554
1555 ch->local_nentries = nentries;
1556 }
1557 spin_unlock_irqrestore(&ch->lock, irq_flags);
1558 return xpSuccess;
1559 }
1560
1561 dev_dbg(xpc_chan, "can't get memory for local message queue and notify "
1562 "queue, partid=%d, channel=%d\n", ch->partid, ch->number);
1563 return xpNoMemory;
1564}
1565
1566/*
1567 * Allocate the cached remote message queue.
1568 */
1569static enum xp_retval
1570xpc_allocate_remote_msgqueue_sn2(struct xpc_channel *ch)
1571{
1572 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
1573 unsigned long irq_flags;
1574 int nentries;
1575 size_t nbytes;
1576
1577 DBUG_ON(ch->remote_nentries <= 0);
1578
1579 for (nentries = ch->remote_nentries; nentries > 0; nentries--) {
1580
1581 nbytes = nentries * ch->entry_size;
1582 ch_sn2->remote_msgqueue =
1583 xpc_kzalloc_cacheline_aligned(nbytes, GFP_KERNEL, &ch_sn2->
1584 remote_msgqueue_base);
1585 if (ch_sn2->remote_msgqueue == NULL)
1586 continue;
1587
1588 spin_lock_irqsave(&ch->lock, irq_flags);
1589 if (nentries < ch->remote_nentries) {
1590 dev_dbg(xpc_chan, "nentries=%d remote_nentries=%d, "
1591 "partid=%d, channel=%d\n", nentries,
1592 ch->remote_nentries, ch->partid, ch->number);
1593
1594 ch->remote_nentries = nentries;
1595 }
1596 spin_unlock_irqrestore(&ch->lock, irq_flags);
1597 return xpSuccess;
1598 }
1599
1600 dev_dbg(xpc_chan, "can't get memory for cached remote message queue, "
1601 "partid=%d, channel=%d\n", ch->partid, ch->number);
1602 return xpNoMemory;
1603}
1604
1605/*
1606 * Allocate message queues and other stuff associated with a channel.
1607 *
1608 * Note: Assumes all of the channel sizes are filled in.
1609 */
1610static enum xp_retval
1611xpc_setup_msg_structures_sn2(struct xpc_channel *ch)
1612{
1613 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
1614 enum xp_retval ret;
1615
1616 DBUG_ON(ch->flags & XPC_C_SETUP);
1617
1618 ret = xpc_allocate_local_msgqueue_sn2(ch);
1619 if (ret == xpSuccess) {
1620
1621 ret = xpc_allocate_remote_msgqueue_sn2(ch);
1622 if (ret != xpSuccess) {
1623 kfree(ch_sn2->local_msgqueue_base);
1624 ch_sn2->local_msgqueue = NULL;
1625 kfree(ch_sn2->notify_queue);
1626 ch_sn2->notify_queue = NULL;
1627 }
1628 }
1629 return ret;
1630}
1631
1632/*
1633 * Free up message queues and other stuff that were allocated for the specified
1634 * channel.
1635 */
1636static void
1637xpc_teardown_msg_structures_sn2(struct xpc_channel *ch)
1638{
1639 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
1640
1641 DBUG_ON(!spin_is_locked(&ch->lock));
1642
1643 ch_sn2->remote_msgqueue_pa = 0;
1644
1645 ch_sn2->local_GP->get = 0;
1646 ch_sn2->local_GP->put = 0;
1647 ch_sn2->remote_GP.get = 0;
1648 ch_sn2->remote_GP.put = 0;
1649 ch_sn2->w_local_GP.get = 0;
1650 ch_sn2->w_local_GP.put = 0;
1651 ch_sn2->w_remote_GP.get = 0;
1652 ch_sn2->w_remote_GP.put = 0;
1653 ch_sn2->next_msg_to_pull = 0;
1654
1655 if (ch->flags & XPC_C_SETUP) {
1656 dev_dbg(xpc_chan, "ch->flags=0x%x, partid=%d, channel=%d\n",
1657 ch->flags, ch->partid, ch->number);
1658
1659 kfree(ch_sn2->local_msgqueue_base);
1660 ch_sn2->local_msgqueue = NULL;
1661 kfree(ch_sn2->remote_msgqueue_base);
1662 ch_sn2->remote_msgqueue = NULL;
1663 kfree(ch_sn2->notify_queue);
1664 ch_sn2->notify_queue = NULL;
1665 }
1666}
1667
1668/*
1669 * Notify those who wanted to be notified upon delivery of their message.
1670 */
1671static void
1672xpc_notify_senders_sn2(struct xpc_channel *ch, enum xp_retval reason, s64 put)
1673{
1674 struct xpc_notify_sn2 *notify;
1675 u8 notify_type;
1676 s64 get = ch->sn.sn2.w_remote_GP.get - 1;
1677
1678 while (++get < put && atomic_read(&ch->n_to_notify) > 0) {
1679
1680 notify = &ch->sn.sn2.notify_queue[get % ch->local_nentries];
1681
1682 /*
1683 * See if the notify entry indicates it was associated with
1684 * a message who's sender wants to be notified. It is possible
1685 * that it is, but someone else is doing or has done the
1686 * notification.
1687 */
1688 notify_type = notify->type;
1689 if (notify_type == 0 ||
1690 cmpxchg(&notify->type, notify_type, 0) != notify_type) {
1691 continue;
1692 }
1693
1694 DBUG_ON(notify_type != XPC_N_CALL);
1695
1696 atomic_dec(&ch->n_to_notify);
1697
1698 if (notify->func != NULL) {
1699 dev_dbg(xpc_chan, "notify->func() called, notify=0x%p "
1700 "msg_number=%ld partid=%d channel=%d\n",
1701 (void *)notify, get, ch->partid, ch->number);
1702
1703 notify->func(reason, ch->partid, ch->number,
1704 notify->key);
1705
1706 dev_dbg(xpc_chan, "notify->func() returned, notify=0x%p"
1707 " msg_number=%ld partid=%d channel=%d\n",
1708 (void *)notify, get, ch->partid, ch->number);
1709 }
1710 }
1711}
1712
1713static void
1714xpc_notify_senders_of_disconnect_sn2(struct xpc_channel *ch)
1715{
1716 xpc_notify_senders_sn2(ch, ch->reason, ch->sn.sn2.w_local_GP.put);
1717}
1718
1719/*
1720 * Clear some of the msg flags in the local message queue.
1721 */
1722static inline void
1723xpc_clear_local_msgqueue_flags_sn2(struct xpc_channel *ch)
1724{
1725 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
1726 struct xpc_msg_sn2 *msg;
1727 s64 get;
1728
1729 get = ch_sn2->w_remote_GP.get;
1730 do {
1731 msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->local_msgqueue +
1732 (get % ch->local_nentries) *
1733 ch->entry_size);
1734 msg->flags = 0;
1735 } while (++get < ch_sn2->remote_GP.get);
1736}
1737
1738/*
1739 * Clear some of the msg flags in the remote message queue.
1740 */
1741static inline void
1742xpc_clear_remote_msgqueue_flags_sn2(struct xpc_channel *ch)
1743{
1744 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
1745 struct xpc_msg_sn2 *msg;
1746 s64 put;
1747
1748 put = ch_sn2->w_remote_GP.put;
1749 do {
1750 msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->remote_msgqueue +
1751 (put % ch->remote_nentries) *
1752 ch->entry_size);
1753 msg->flags = 0;
1754 } while (++put < ch_sn2->remote_GP.put);
1755}
1756
1757static int
1758xpc_n_of_deliverable_payloads_sn2(struct xpc_channel *ch)
1759{
1760 return ch->sn.sn2.w_remote_GP.put - ch->sn.sn2.w_local_GP.get;
1761}
1762
1763static void
1764xpc_process_msg_chctl_flags_sn2(struct xpc_partition *part, int ch_number)
1765{
1766 struct xpc_channel *ch = &part->channels[ch_number];
1767 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
1768 int npayloads_sent;
1769
1770 ch_sn2->remote_GP = part->sn.sn2.remote_GPs[ch_number];
1771
1772 /* See what, if anything, has changed for each connected channel */
1773
1774 xpc_msgqueue_ref(ch);
1775
1776 if (ch_sn2->w_remote_GP.get == ch_sn2->remote_GP.get &&
1777 ch_sn2->w_remote_GP.put == ch_sn2->remote_GP.put) {
1778 /* nothing changed since GPs were last pulled */
1779 xpc_msgqueue_deref(ch);
1780 return;
1781 }
1782
1783 if (!(ch->flags & XPC_C_CONNECTED)) {
1784 xpc_msgqueue_deref(ch);
1785 return;
1786 }
1787
1788 /*
1789 * First check to see if messages recently sent by us have been
1790 * received by the other side. (The remote GET value will have
1791 * changed since we last looked at it.)
1792 */
1793
1794 if (ch_sn2->w_remote_GP.get != ch_sn2->remote_GP.get) {
1795
1796 /*
1797 * We need to notify any senders that want to be notified
1798 * that their sent messages have been received by their
1799 * intended recipients. We need to do this before updating
1800 * w_remote_GP.get so that we don't allocate the same message
1801 * queue entries prematurely (see xpc_allocate_msg()).
1802 */
1803 if (atomic_read(&ch->n_to_notify) > 0) {
1804 /*
1805 * Notify senders that messages sent have been
1806 * received and delivered by the other side.
1807 */
1808 xpc_notify_senders_sn2(ch, xpMsgDelivered,
1809 ch_sn2->remote_GP.get);
1810 }
1811
1812 /*
1813 * Clear msg->flags in previously sent messages, so that
1814 * they're ready for xpc_allocate_msg().
1815 */
1816 xpc_clear_local_msgqueue_flags_sn2(ch);
1817
1818 ch_sn2->w_remote_GP.get = ch_sn2->remote_GP.get;
1819
1820 dev_dbg(xpc_chan, "w_remote_GP.get changed to %ld, partid=%d, "
1821 "channel=%d\n", ch_sn2->w_remote_GP.get, ch->partid,
1822 ch->number);
1823
1824 /*
1825 * If anyone was waiting for message queue entries to become
1826 * available, wake them up.
1827 */
1828 if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
1829 wake_up(&ch->msg_allocate_wq);
1830 }
1831
1832 /*
1833 * Now check for newly sent messages by the other side. (The remote
1834 * PUT value will have changed since we last looked at it.)
1835 */
1836
1837 if (ch_sn2->w_remote_GP.put != ch_sn2->remote_GP.put) {
1838 /*
1839 * Clear msg->flags in previously received messages, so that
1840 * they're ready for xpc_get_deliverable_payload_sn2().
1841 */
1842 xpc_clear_remote_msgqueue_flags_sn2(ch);
1843
1844 ch_sn2->w_remote_GP.put = ch_sn2->remote_GP.put;
1845
1846 dev_dbg(xpc_chan, "w_remote_GP.put changed to %ld, partid=%d, "
1847 "channel=%d\n", ch_sn2->w_remote_GP.put, ch->partid,
1848 ch->number);
1849
1850 npayloads_sent = xpc_n_of_deliverable_payloads_sn2(ch);
1851 if (npayloads_sent > 0) {
1852 dev_dbg(xpc_chan, "msgs waiting to be copied and "
1853 "delivered=%d, partid=%d, channel=%d\n",
1854 npayloads_sent, ch->partid, ch->number);
1855
1856 if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE)
1857 xpc_activate_kthreads(ch, npayloads_sent);
1858 }
1859 }
1860
1861 xpc_msgqueue_deref(ch);
1862}
1863
1864static struct xpc_msg_sn2 *
1865xpc_pull_remote_msg_sn2(struct xpc_channel *ch, s64 get)
1866{
1867 struct xpc_partition *part = &xpc_partitions[ch->partid];
1868 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
1869 unsigned long remote_msg_pa;
1870 struct xpc_msg_sn2 *msg;
1871 u32 msg_index;
1872 u32 nmsgs;
1873 u64 msg_offset;
1874 enum xp_retval ret;
1875
1876 if (mutex_lock_interruptible(&ch_sn2->msg_to_pull_mutex) != 0) {
1877 /* we were interrupted by a signal */
1878 return NULL;
1879 }
1880
1881 while (get >= ch_sn2->next_msg_to_pull) {
1882
1883 /* pull as many messages as are ready and able to be pulled */
1884
1885 msg_index = ch_sn2->next_msg_to_pull % ch->remote_nentries;
1886
1887 DBUG_ON(ch_sn2->next_msg_to_pull >= ch_sn2->w_remote_GP.put);
1888 nmsgs = ch_sn2->w_remote_GP.put - ch_sn2->next_msg_to_pull;
1889 if (msg_index + nmsgs > ch->remote_nentries) {
1890 /* ignore the ones that wrap the msg queue for now */
1891 nmsgs = ch->remote_nentries - msg_index;
1892 }
1893
1894 msg_offset = msg_index * ch->entry_size;
1895 msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->remote_msgqueue +
1896 msg_offset);
1897 remote_msg_pa = ch_sn2->remote_msgqueue_pa + msg_offset;
1898
1899 ret = xpc_pull_remote_cachelines_sn2(part, msg, remote_msg_pa,
1900 nmsgs * ch->entry_size);
1901 if (ret != xpSuccess) {
1902
1903 dev_dbg(xpc_chan, "failed to pull %d msgs starting with"
1904 " msg %ld from partition %d, channel=%d, "
1905 "ret=%d\n", nmsgs, ch_sn2->next_msg_to_pull,
1906 ch->partid, ch->number, ret);
1907
1908 XPC_DEACTIVATE_PARTITION(part, ret);
1909
1910 mutex_unlock(&ch_sn2->msg_to_pull_mutex);
1911 return NULL;
1912 }
1913
1914 ch_sn2->next_msg_to_pull += nmsgs;
1915 }
1916
1917 mutex_unlock(&ch_sn2->msg_to_pull_mutex);
1918
1919 /* return the message we were looking for */
1920 msg_offset = (get % ch->remote_nentries) * ch->entry_size;
1921 msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->remote_msgqueue + msg_offset);
1922
1923 return msg;
1924}
1925
1926/*
1927 * Get the next deliverable message's payload.
1928 */
1929static void *
1930xpc_get_deliverable_payload_sn2(struct xpc_channel *ch)
1931{
1932 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
1933 struct xpc_msg_sn2 *msg;
1934 void *payload = NULL;
1935 s64 get;
1936
1937 do {
1938 if (ch->flags & XPC_C_DISCONNECTING)
1939 break;
1940
1941 get = ch_sn2->w_local_GP.get;
1942 rmb(); /* guarantee that .get loads before .put */
1943 if (get == ch_sn2->w_remote_GP.put)
1944 break;
1945
1946 /* There are messages waiting to be pulled and delivered.
1947 * We need to try to secure one for ourselves. We'll do this
1948 * by trying to increment w_local_GP.get and hope that no one
1949 * else beats us to it. If they do, we'll we'll simply have
1950 * to try again for the next one.
1951 */
1952
1953 if (cmpxchg(&ch_sn2->w_local_GP.get, get, get + 1) == get) {
1954 /* we got the entry referenced by get */
1955
1956 dev_dbg(xpc_chan, "w_local_GP.get changed to %ld, "
1957 "partid=%d, channel=%d\n", get + 1,
1958 ch->partid, ch->number);
1959
1960 /* pull the message from the remote partition */
1961
1962 msg = xpc_pull_remote_msg_sn2(ch, get);
1963
1964 DBUG_ON(msg != NULL && msg->number != get);
1965 DBUG_ON(msg != NULL && (msg->flags & XPC_M_SN2_DONE));
1966 DBUG_ON(msg != NULL && !(msg->flags & XPC_M_SN2_READY));
1967
1968 payload = &msg->payload;
1969 break;
1970 }
1971
1972 } while (1);
1973
1974 return payload;
1975}
1976
1977/*
1978 * Now we actually send the messages that are ready to be sent by advancing
1979 * the local message queue's Put value and then send a chctl msgrequest to the
1980 * recipient partition.
1981 */
1982static void
1983xpc_send_msgs_sn2(struct xpc_channel *ch, s64 initial_put)
1984{
1985 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
1986 struct xpc_msg_sn2 *msg;
1987 s64 put = initial_put + 1;
1988 int send_msgrequest = 0;
1989
1990 while (1) {
1991
1992 while (1) {
1993 if (put == ch_sn2->w_local_GP.put)
1994 break;
1995
1996 msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->
1997 local_msgqueue + (put %
1998 ch->local_nentries) *
1999 ch->entry_size);
2000
2001 if (!(msg->flags & XPC_M_SN2_READY))
2002 break;
2003
2004 put++;
2005 }
2006
2007 if (put == initial_put) {
2008 /* nothing's changed */
2009 break;
2010 }
2011
2012 if (cmpxchg_rel(&ch_sn2->local_GP->put, initial_put, put) !=
2013 initial_put) {
2014 /* someone else beat us to it */
2015 DBUG_ON(ch_sn2->local_GP->put < initial_put);
2016 break;
2017 }
2018
2019 /* we just set the new value of local_GP->put */
2020
2021 dev_dbg(xpc_chan, "local_GP->put changed to %ld, partid=%d, "
2022 "channel=%d\n", put, ch->partid, ch->number);
2023
2024 send_msgrequest = 1;
2025
2026 /*
2027 * We need to ensure that the message referenced by
2028 * local_GP->put is not XPC_M_SN2_READY or that local_GP->put
2029 * equals w_local_GP.put, so we'll go have a look.
2030 */
2031 initial_put = put;
2032 }
2033
2034 if (send_msgrequest)
2035 xpc_send_chctl_msgrequest_sn2(ch);
2036}
2037
2038/*
2039 * Allocate an entry for a message from the message queue associated with the
2040 * specified channel.
2041 */
2042static enum xp_retval
2043xpc_allocate_msg_sn2(struct xpc_channel *ch, u32 flags,
2044 struct xpc_msg_sn2 **address_of_msg)
2045{
2046 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
2047 struct xpc_msg_sn2 *msg;
2048 enum xp_retval ret;
2049 s64 put;
2050
2051 /*
2052 * Get the next available message entry from the local message queue.
2053 * If none are available, we'll make sure that we grab the latest
2054 * GP values.
2055 */
2056 ret = xpTimeout;
2057
2058 while (1) {
2059
2060 put = ch_sn2->w_local_GP.put;
2061 rmb(); /* guarantee that .put loads before .get */
2062 if (put - ch_sn2->w_remote_GP.get < ch->local_nentries) {
2063
2064 /* There are available message entries. We need to try
2065 * to secure one for ourselves. We'll do this by trying
2066 * to increment w_local_GP.put as long as someone else
2067 * doesn't beat us to it. If they do, we'll have to
2068 * try again.
2069 */
2070 if (cmpxchg(&ch_sn2->w_local_GP.put, put, put + 1) ==
2071 put) {
2072 /* we got the entry referenced by put */
2073 break;
2074 }
2075 continue; /* try again */
2076 }
2077
2078 /*
2079 * There aren't any available msg entries at this time.
2080 *
2081 * In waiting for a message entry to become available,
2082 * we set a timeout in case the other side is not sending
2083 * completion interrupts. This lets us fake a notify IRQ
2084 * that will cause the notify IRQ handler to fetch the latest
2085 * GP values as if an interrupt was sent by the other side.
2086 */
2087 if (ret == xpTimeout)
2088 xpc_send_chctl_local_msgrequest_sn2(ch);
2089
2090 if (flags & XPC_NOWAIT)
2091 return xpNoWait;
2092
2093 ret = xpc_allocate_msg_wait(ch);
2094 if (ret != xpInterrupted && ret != xpTimeout)
2095 return ret;
2096 }
2097
2098 /* get the message's address and initialize it */
2099 msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->local_msgqueue +
2100 (put % ch->local_nentries) *
2101 ch->entry_size);
2102
2103 DBUG_ON(msg->flags != 0);
2104 msg->number = put;
2105
2106 dev_dbg(xpc_chan, "w_local_GP.put changed to %ld; msg=0x%p, "
2107 "msg_number=%ld, partid=%d, channel=%d\n", put + 1,
2108 (void *)msg, msg->number, ch->partid, ch->number);
2109
2110 *address_of_msg = msg;
2111 return xpSuccess;
2112}
2113
2114/*
2115 * Common code that does the actual sending of the message by advancing the
2116 * local message queue's Put value and sends a chctl msgrequest to the
2117 * partition the message is being sent to.
2118 */
2119static enum xp_retval
2120xpc_send_payload_sn2(struct xpc_channel *ch, u32 flags, void *payload,
2121 u16 payload_size, u8 notify_type, xpc_notify_func func,
2122 void *key)
2123{
2124 enum xp_retval ret = xpSuccess;
2125 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
2126 struct xpc_msg_sn2 *msg = msg;
2127 struct xpc_notify_sn2 *notify = notify;
2128 s64 msg_number;
2129 s64 put;
2130
2131 DBUG_ON(notify_type == XPC_N_CALL && func == NULL);
2132
2133 if (XPC_MSG_SIZE(payload_size) > ch->entry_size)
2134 return xpPayloadTooBig;
2135
2136 xpc_msgqueue_ref(ch);
2137
2138 if (ch->flags & XPC_C_DISCONNECTING) {
2139 ret = ch->reason;
2140 goto out_1;
2141 }
2142 if (!(ch->flags & XPC_C_CONNECTED)) {
2143 ret = xpNotConnected;
2144 goto out_1;
2145 }
2146
2147 ret = xpc_allocate_msg_sn2(ch, flags, &msg);
2148 if (ret != xpSuccess)
2149 goto out_1;
2150
2151 msg_number = msg->number;
2152
2153 if (notify_type != 0) {
2154 /*
2155 * Tell the remote side to send an ACK interrupt when the
2156 * message has been delivered.
2157 */
2158 msg->flags |= XPC_M_SN2_INTERRUPT;
2159
2160 atomic_inc(&ch->n_to_notify);
2161
2162 notify = &ch_sn2->notify_queue[msg_number % ch->local_nentries];
2163 notify->func = func;
2164 notify->key = key;
2165 notify->type = notify_type;
2166
2167 /* ??? Is a mb() needed here? */
2168
2169 if (ch->flags & XPC_C_DISCONNECTING) {
2170 /*
2171 * An error occurred between our last error check and
2172 * this one. We will try to clear the type field from
2173 * the notify entry. If we succeed then
2174 * xpc_disconnect_channel() didn't already process
2175 * the notify entry.
2176 */
2177 if (cmpxchg(&notify->type, notify_type, 0) ==
2178 notify_type) {
2179 atomic_dec(&ch->n_to_notify);
2180 ret = ch->reason;
2181 }
2182 goto out_1;
2183 }
2184 }
2185
2186 memcpy(&msg->payload, payload, payload_size);
2187
2188 msg->flags |= XPC_M_SN2_READY;
2189
2190 /*
2191 * The preceding store of msg->flags must occur before the following
2192 * load of local_GP->put.
2193 */
2194 mb();
2195
2196 /* see if the message is next in line to be sent, if so send it */
2197
2198 put = ch_sn2->local_GP->put;
2199 if (put == msg_number)
2200 xpc_send_msgs_sn2(ch, put);
2201
2202out_1:
2203 xpc_msgqueue_deref(ch);
2204 return ret;
2205}
2206
2207/*
2208 * Now we actually acknowledge the messages that have been delivered and ack'd
2209 * by advancing the cached remote message queue's Get value and if requested
2210 * send a chctl msgrequest to the message sender's partition.
2211 *
2212 * If a message has XPC_M_SN2_INTERRUPT set, send an interrupt to the partition
2213 * that sent the message.
2214 */
2215static void
2216xpc_acknowledge_msgs_sn2(struct xpc_channel *ch, s64 initial_get, u8 msg_flags)
2217{
2218 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
2219 struct xpc_msg_sn2 *msg;
2220 s64 get = initial_get + 1;
2221 int send_msgrequest = 0;
2222
2223 while (1) {
2224
2225 while (1) {
2226 if (get == ch_sn2->w_local_GP.get)
2227 break;
2228
2229 msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->
2230 remote_msgqueue + (get %
2231 ch->remote_nentries) *
2232 ch->entry_size);
2233
2234 if (!(msg->flags & XPC_M_SN2_DONE))
2235 break;
2236
2237 msg_flags |= msg->flags;
2238 get++;
2239 }
2240
2241 if (get == initial_get) {
2242 /* nothing's changed */
2243 break;
2244 }
2245
2246 if (cmpxchg_rel(&ch_sn2->local_GP->get, initial_get, get) !=
2247 initial_get) {
2248 /* someone else beat us to it */
2249 DBUG_ON(ch_sn2->local_GP->get <= initial_get);
2250 break;
2251 }
2252
2253 /* we just set the new value of local_GP->get */
2254
2255 dev_dbg(xpc_chan, "local_GP->get changed to %ld, partid=%d, "
2256 "channel=%d\n", get, ch->partid, ch->number);
2257
2258 send_msgrequest = (msg_flags & XPC_M_SN2_INTERRUPT);
2259
2260 /*
2261 * We need to ensure that the message referenced by
2262 * local_GP->get is not XPC_M_SN2_DONE or that local_GP->get
2263 * equals w_local_GP.get, so we'll go have a look.
2264 */
2265 initial_get = get;
2266 }
2267
2268 if (send_msgrequest)
2269 xpc_send_chctl_msgrequest_sn2(ch);
2270}
2271
2272static void
2273xpc_received_payload_sn2(struct xpc_channel *ch, void *payload)
2274{
2275 struct xpc_msg_sn2 *msg;
2276 s64 msg_number;
2277 s64 get;
2278
2279 msg = container_of(payload, struct xpc_msg_sn2, payload);
2280 msg_number = msg->number;
2281
2282 dev_dbg(xpc_chan, "msg=0x%p, msg_number=%ld, partid=%d, channel=%d\n",
2283 (void *)msg, msg_number, ch->partid, ch->number);
2284
2285 DBUG_ON((((u64)msg - (u64)ch->remote_msgqueue) / ch->entry_size) !=
2286 msg_number % ch->remote_nentries);
2287 DBUG_ON(msg->flags & XPC_M_SN2_DONE);
2288
2289 msg->flags |= XPC_M_SN2_DONE;
2290
2291 /*
2292 * The preceding store of msg->flags must occur before the following
2293 * load of local_GP->get.
2294 */
2295 mb();
2296
2297 /*
2298 * See if this message is next in line to be acknowledged as having
2299 * been delivered.
2300 */
2301 get = ch->sn.sn2.local_GP->get;
2302 if (get == msg_number)
2303 xpc_acknowledge_msgs_sn2(ch, get, msg->flags);
2304}
2305
2306int
2307xpc_init_sn2(void)
2308{
2309 int ret;
2310 size_t buf_size;
2311
2312 xpc_setup_partitions_sn = xpc_setup_partitions_sn_sn2;
2313 xpc_get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_sn2;
2314 xpc_setup_rsvd_page_sn = xpc_setup_rsvd_page_sn_sn2;
2315 xpc_increment_heartbeat = xpc_increment_heartbeat_sn2;
2316 xpc_offline_heartbeat = xpc_offline_heartbeat_sn2;
2317 xpc_online_heartbeat = xpc_online_heartbeat_sn2;
2318 xpc_heartbeat_init = xpc_heartbeat_init_sn2;
2319 xpc_heartbeat_exit = xpc_heartbeat_exit_sn2;
2320 xpc_get_remote_heartbeat = xpc_get_remote_heartbeat_sn2;
2321
2322 xpc_request_partition_activation = xpc_request_partition_activation_sn2;
2323 xpc_request_partition_reactivation =
2324 xpc_request_partition_reactivation_sn2;
2325 xpc_request_partition_deactivation =
2326 xpc_request_partition_deactivation_sn2;
2327 xpc_cancel_partition_deactivation_request =
2328 xpc_cancel_partition_deactivation_request_sn2;
2329
2330 xpc_process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2;
2331 xpc_setup_ch_structures_sn = xpc_setup_ch_structures_sn_sn2;
2332 xpc_teardown_ch_structures_sn = xpc_teardown_ch_structures_sn_sn2;
2333 xpc_make_first_contact = xpc_make_first_contact_sn2;
2334
2335 xpc_get_chctl_all_flags = xpc_get_chctl_all_flags_sn2;
2336 xpc_send_chctl_closerequest = xpc_send_chctl_closerequest_sn2;
2337 xpc_send_chctl_closereply = xpc_send_chctl_closereply_sn2;
2338 xpc_send_chctl_openrequest = xpc_send_chctl_openrequest_sn2;
2339 xpc_send_chctl_openreply = xpc_send_chctl_openreply_sn2;
2340
2341 xpc_save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_sn2;
2342
2343 xpc_setup_msg_structures = xpc_setup_msg_structures_sn2;
2344 xpc_teardown_msg_structures = xpc_teardown_msg_structures_sn2;
2345
2346 xpc_notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_sn2;
2347 xpc_process_msg_chctl_flags = xpc_process_msg_chctl_flags_sn2;
2348 xpc_n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_sn2;
2349 xpc_get_deliverable_payload = xpc_get_deliverable_payload_sn2;
2350
2351 xpc_indicate_partition_engaged = xpc_indicate_partition_engaged_sn2;
2352 xpc_indicate_partition_disengaged =
2353 xpc_indicate_partition_disengaged_sn2;
2354 xpc_partition_engaged = xpc_partition_engaged_sn2;
2355 xpc_any_partition_engaged = xpc_any_partition_engaged_sn2;
2356 xpc_assume_partition_disengaged = xpc_assume_partition_disengaged_sn2;
2357
2358 xpc_send_payload = xpc_send_payload_sn2;
2359 xpc_received_payload = xpc_received_payload_sn2;
2360
2361 if (offsetof(struct xpc_msg_sn2, payload) > XPC_MSG_HDR_MAX_SIZE) {
2362 dev_err(xpc_part, "header portion of struct xpc_msg_sn2 is "
2363 "larger than %d\n", XPC_MSG_HDR_MAX_SIZE);
2364 return -E2BIG;
2365 }
2366
2367 buf_size = max(XPC_RP_VARS_SIZE,
2368 XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES_SN2);
2369 xpc_remote_copy_buffer_sn2 = xpc_kmalloc_cacheline_aligned(buf_size,
2370 GFP_KERNEL,
2371 &xpc_remote_copy_buffer_base_sn2);
2372 if (xpc_remote_copy_buffer_sn2 == NULL) {
2373 dev_err(xpc_part, "can't get memory for remote copy buffer\n");
2374 return -ENOMEM;
2375 }
2376
2377 /* open up protections for IPI and [potentially] amo operations */
2378 xpc_allow_IPI_ops_sn2();
2379 xpc_allow_amo_ops_shub_wars_1_1_sn2();
2380
2381 /*
2382 * This is safe to do before the xpc_hb_checker thread has started
2383 * because the handler releases a wait queue. If an interrupt is
2384 * received before the thread is waiting, it will not go to sleep,
2385 * but rather immediately process the interrupt.
2386 */
2387 ret = request_irq(SGI_XPC_ACTIVATE, xpc_handle_activate_IRQ_sn2, 0,
2388 "xpc hb", NULL);
2389 if (ret != 0) {
2390 dev_err(xpc_part, "can't register ACTIVATE IRQ handler, "
2391 "errno=%d\n", -ret);
2392 xpc_disallow_IPI_ops_sn2();
2393 kfree(xpc_remote_copy_buffer_base_sn2);
2394 }
2395 return ret;
2396}
2397
2398void
2399xpc_exit_sn2(void)
2400{
2401 free_irq(SGI_XPC_ACTIVATE, NULL);
2402 xpc_disallow_IPI_ops_sn2();
2403 kfree(xpc_remote_copy_buffer_base_sn2);
2404}
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
new file mode 100644
index 000000000000..1ac694c01623
--- /dev/null
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -0,0 +1,1443 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
7 */
8
9/*
10 * Cross Partition Communication (XPC) uv-based functions.
11 *
12 * Architecture specific implementation of common functions.
13 *
14 */
15
16#include <linux/kernel.h>
17#include <linux/mm.h>
18#include <linux/interrupt.h>
19#include <linux/delay.h>
20#include <linux/device.h>
21#include <asm/uv/uv_hub.h>
22#include "../sgi-gru/gru.h"
23#include "../sgi-gru/grukservices.h"
24#include "xpc.h"
25
26static atomic64_t xpc_heartbeat_uv;
27static DECLARE_BITMAP(xpc_heartbeating_to_mask_uv, XP_MAX_NPARTITIONS_UV);
28
29#define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES)
30#define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES)
31
32#define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
33 XPC_ACTIVATE_MSG_SIZE_UV)
34#define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
35 XPC_NOTIFY_MSG_SIZE_UV)
36
37static void *xpc_activate_mq_uv;
38static void *xpc_notify_mq_uv;
39
40static int
41xpc_setup_partitions_sn_uv(void)
42{
43 short partid;
44 struct xpc_partition_uv *part_uv;
45
46 for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
47 part_uv = &xpc_partitions[partid].sn.uv;
48
49 spin_lock_init(&part_uv->flags_lock);
50 part_uv->remote_act_state = XPC_P_AS_INACTIVE;
51 }
52 return 0;
53}
54
55static void *
56xpc_create_gru_mq_uv(unsigned int mq_size, int cpuid, unsigned int irq,
57 irq_handler_t irq_handler)
58{
59 int ret;
60 int nid;
61 int mq_order;
62 struct page *page;
63 void *mq;
64
65 nid = cpu_to_node(cpuid);
66 mq_order = get_order(mq_size);
67 page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
68 mq_order);
69 if (page == NULL) {
70 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "
71 "bytes of memory on nid=%d for GRU mq\n", mq_size, nid);
72 return NULL;
73 }
74
75 mq = page_address(page);
76 ret = gru_create_message_queue(mq, mq_size);
77 if (ret != 0) {
78 dev_err(xpc_part, "gru_create_message_queue() returned "
79 "error=%d\n", ret);
80 free_pages((unsigned long)mq, mq_order);
81 return NULL;
82 }
83
84 /* !!! Need to do some other things to set up IRQ */
85
86 ret = request_irq(irq, irq_handler, 0, "xpc", NULL);
87 if (ret != 0) {
88 dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n",
89 irq, ret);
90 free_pages((unsigned long)mq, mq_order);
91 return NULL;
92 }
93
94 /* !!! enable generation of irq when GRU mq op occurs to this mq */
95
96 /* ??? allow other partitions to access GRU mq? */
97
98 return mq;
99}
100
101static void
102xpc_destroy_gru_mq_uv(void *mq, unsigned int mq_size, unsigned int irq)
103{
104 /* ??? disallow other partitions to access GRU mq? */
105
106 /* !!! disable generation of irq when GRU mq op occurs to this mq */
107
108 free_irq(irq, NULL);
109
110 free_pages((unsigned long)mq, get_order(mq_size));
111}
112
113static enum xp_retval
114xpc_send_gru_msg(unsigned long mq_gpa, void *msg, size_t msg_size)
115{
116 enum xp_retval xp_ret;
117 int ret;
118
119 while (1) {
120 ret = gru_send_message_gpa(mq_gpa, msg, msg_size);
121 if (ret == MQE_OK) {
122 xp_ret = xpSuccess;
123 break;
124 }
125
126 if (ret == MQE_QUEUE_FULL) {
127 dev_dbg(xpc_chan, "gru_send_message_gpa() returned "
128 "error=MQE_QUEUE_FULL\n");
129 /* !!! handle QLimit reached; delay & try again */
130 /* ??? Do we add a limit to the number of retries? */
131 (void)msleep_interruptible(10);
132 } else if (ret == MQE_CONGESTION) {
133 dev_dbg(xpc_chan, "gru_send_message_gpa() returned "
134 "error=MQE_CONGESTION\n");
135 /* !!! handle LB Overflow; simply try again */
136 /* ??? Do we add a limit to the number of retries? */
137 } else {
138 /* !!! Currently this is MQE_UNEXPECTED_CB_ERR */
139 dev_err(xpc_chan, "gru_send_message_gpa() returned "
140 "error=%d\n", ret);
141 xp_ret = xpGruSendMqError;
142 break;
143 }
144 }
145 return xp_ret;
146}
147
148static void
149xpc_process_activate_IRQ_rcvd_uv(void)
150{
151 unsigned long irq_flags;
152 short partid;
153 struct xpc_partition *part;
154 u8 act_state_req;
155
156 DBUG_ON(xpc_activate_IRQ_rcvd == 0);
157
158 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
159 for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
160 part = &xpc_partitions[partid];
161
162 if (part->sn.uv.act_state_req == 0)
163 continue;
164
165 xpc_activate_IRQ_rcvd--;
166 BUG_ON(xpc_activate_IRQ_rcvd < 0);
167
168 act_state_req = part->sn.uv.act_state_req;
169 part->sn.uv.act_state_req = 0;
170 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
171
172 if (act_state_req == XPC_P_ASR_ACTIVATE_UV) {
173 if (part->act_state == XPC_P_AS_INACTIVE)
174 xpc_activate_partition(part);
175 else if (part->act_state == XPC_P_AS_DEACTIVATING)
176 XPC_DEACTIVATE_PARTITION(part, xpReactivating);
177
178 } else if (act_state_req == XPC_P_ASR_REACTIVATE_UV) {
179 if (part->act_state == XPC_P_AS_INACTIVE)
180 xpc_activate_partition(part);
181 else
182 XPC_DEACTIVATE_PARTITION(part, xpReactivating);
183
184 } else if (act_state_req == XPC_P_ASR_DEACTIVATE_UV) {
185 XPC_DEACTIVATE_PARTITION(part, part->sn.uv.reason);
186
187 } else {
188 BUG();
189 }
190
191 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
192 if (xpc_activate_IRQ_rcvd == 0)
193 break;
194 }
195 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
196
197}
198
199static void
200xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
201 struct xpc_activate_mq_msghdr_uv *msg_hdr,
202 int *wakeup_hb_checker)
203{
204 unsigned long irq_flags;
205 struct xpc_partition_uv *part_uv = &part->sn.uv;
206 struct xpc_openclose_args *args;
207
208 part_uv->remote_act_state = msg_hdr->act_state;
209
210 switch (msg_hdr->type) {
211 case XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV:
212 /* syncing of remote_act_state was just done above */
213 break;
214
215 case XPC_ACTIVATE_MQ_MSG_INC_HEARTBEAT_UV: {
216 struct xpc_activate_mq_msg_heartbeat_req_uv *msg;
217
218 msg = container_of(msg_hdr,
219 struct xpc_activate_mq_msg_heartbeat_req_uv,
220 hdr);
221 part_uv->heartbeat = msg->heartbeat;
222 break;
223 }
224 case XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV: {
225 struct xpc_activate_mq_msg_heartbeat_req_uv *msg;
226
227 msg = container_of(msg_hdr,
228 struct xpc_activate_mq_msg_heartbeat_req_uv,
229 hdr);
230 part_uv->heartbeat = msg->heartbeat;
231
232 spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
233 part_uv->flags |= XPC_P_HEARTBEAT_OFFLINE_UV;
234 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
235 break;
236 }
237 case XPC_ACTIVATE_MQ_MSG_ONLINE_HEARTBEAT_UV: {
238 struct xpc_activate_mq_msg_heartbeat_req_uv *msg;
239
240 msg = container_of(msg_hdr,
241 struct xpc_activate_mq_msg_heartbeat_req_uv,
242 hdr);
243 part_uv->heartbeat = msg->heartbeat;
244
245 spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
246 part_uv->flags &= ~XPC_P_HEARTBEAT_OFFLINE_UV;
247 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
248 break;
249 }
250 case XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV: {
251 struct xpc_activate_mq_msg_activate_req_uv *msg;
252
253 /*
254 * ??? Do we deal here with ts_jiffies being different
255 * ??? if act_state != XPC_P_AS_INACTIVE instead of
256 * ??? below?
257 */
258 msg = container_of(msg_hdr, struct
259 xpc_activate_mq_msg_activate_req_uv, hdr);
260
261 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
262 if (part_uv->act_state_req == 0)
263 xpc_activate_IRQ_rcvd++;
264 part_uv->act_state_req = XPC_P_ASR_ACTIVATE_UV;
265 part->remote_rp_pa = msg->rp_gpa; /* !!! _pa is _gpa */
266 part->remote_rp_ts_jiffies = msg_hdr->rp_ts_jiffies;
267 part_uv->remote_activate_mq_gpa = msg->activate_mq_gpa;
268 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
269
270 (*wakeup_hb_checker)++;
271 break;
272 }
273 case XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV: {
274 struct xpc_activate_mq_msg_deactivate_req_uv *msg;
275
276 msg = container_of(msg_hdr, struct
277 xpc_activate_mq_msg_deactivate_req_uv, hdr);
278
279 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
280 if (part_uv->act_state_req == 0)
281 xpc_activate_IRQ_rcvd++;
282 part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV;
283 part_uv->reason = msg->reason;
284 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
285
286 (*wakeup_hb_checker)++;
287 return;
288 }
289 case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV: {
290 struct xpc_activate_mq_msg_chctl_closerequest_uv *msg;
291
292 msg = container_of(msg_hdr, struct
293 xpc_activate_mq_msg_chctl_closerequest_uv,
294 hdr);
295 args = &part->remote_openclose_args[msg->ch_number];
296 args->reason = msg->reason;
297
298 spin_lock_irqsave(&part->chctl_lock, irq_flags);
299 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREQUEST;
300 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
301
302 xpc_wakeup_channel_mgr(part);
303 break;
304 }
305 case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV: {
306 struct xpc_activate_mq_msg_chctl_closereply_uv *msg;
307
308 msg = container_of(msg_hdr, struct
309 xpc_activate_mq_msg_chctl_closereply_uv,
310 hdr);
311
312 spin_lock_irqsave(&part->chctl_lock, irq_flags);
313 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREPLY;
314 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
315
316 xpc_wakeup_channel_mgr(part);
317 break;
318 }
319 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV: {
320 struct xpc_activate_mq_msg_chctl_openrequest_uv *msg;
321
322 msg = container_of(msg_hdr, struct
323 xpc_activate_mq_msg_chctl_openrequest_uv,
324 hdr);
325 args = &part->remote_openclose_args[msg->ch_number];
326 args->entry_size = msg->entry_size;
327 args->local_nentries = msg->local_nentries;
328
329 spin_lock_irqsave(&part->chctl_lock, irq_flags);
330 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREQUEST;
331 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
332
333 xpc_wakeup_channel_mgr(part);
334 break;
335 }
336 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV: {
337 struct xpc_activate_mq_msg_chctl_openreply_uv *msg;
338
339 msg = container_of(msg_hdr, struct
340 xpc_activate_mq_msg_chctl_openreply_uv, hdr);
341 args = &part->remote_openclose_args[msg->ch_number];
342 args->remote_nentries = msg->remote_nentries;
343 args->local_nentries = msg->local_nentries;
344 args->local_msgqueue_pa = msg->local_notify_mq_gpa;
345
346 spin_lock_irqsave(&part->chctl_lock, irq_flags);
347 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREPLY;
348 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
349
350 xpc_wakeup_channel_mgr(part);
351 break;
352 }
353 case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV:
354 spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
355 part_uv->flags |= XPC_P_ENGAGED_UV;
356 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
357 break;
358
359 case XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV:
360 spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
361 part_uv->flags &= ~XPC_P_ENGAGED_UV;
362 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
363 break;
364
365 default:
366 dev_err(xpc_part, "received unknown activate_mq msg type=%d "
367 "from partition=%d\n", msg_hdr->type, XPC_PARTID(part));
368
369 /* get hb checker to deactivate from the remote partition */
370 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
371 if (part_uv->act_state_req == 0)
372 xpc_activate_IRQ_rcvd++;
373 part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV;
374 part_uv->reason = xpBadMsgType;
375 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
376
377 (*wakeup_hb_checker)++;
378 return;
379 }
380
381 if (msg_hdr->rp_ts_jiffies != part->remote_rp_ts_jiffies &&
382 part->remote_rp_ts_jiffies != 0) {
383 /*
384 * ??? Does what we do here need to be sensitive to
385 * ??? act_state or remote_act_state?
386 */
387 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
388 if (part_uv->act_state_req == 0)
389 xpc_activate_IRQ_rcvd++;
390 part_uv->act_state_req = XPC_P_ASR_REACTIVATE_UV;
391 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
392
393 (*wakeup_hb_checker)++;
394 }
395}
396
397static irqreturn_t
398xpc_handle_activate_IRQ_uv(int irq, void *dev_id)
399{
400 struct xpc_activate_mq_msghdr_uv *msg_hdr;
401 short partid;
402 struct xpc_partition *part;
403 int wakeup_hb_checker = 0;
404
405 while ((msg_hdr = gru_get_next_message(xpc_activate_mq_uv)) != NULL) {
406
407 partid = msg_hdr->partid;
408 if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) {
409 dev_err(xpc_part, "xpc_handle_activate_IRQ_uv() "
410 "received invalid partid=0x%x in message\n",
411 partid);
412 } else {
413 part = &xpc_partitions[partid];
414 if (xpc_part_ref(part)) {
415 xpc_handle_activate_mq_msg_uv(part, msg_hdr,
416 &wakeup_hb_checker);
417 xpc_part_deref(part);
418 }
419 }
420
421 gru_free_message(xpc_activate_mq_uv, msg_hdr);
422 }
423
424 if (wakeup_hb_checker)
425 wake_up_interruptible(&xpc_activate_IRQ_wq);
426
427 return IRQ_HANDLED;
428}
429
430static enum xp_retval
431xpc_send_activate_IRQ_uv(struct xpc_partition *part, void *msg, size_t msg_size,
432 int msg_type)
433{
434 struct xpc_activate_mq_msghdr_uv *msg_hdr = msg;
435
436 DBUG_ON(msg_size > XPC_ACTIVATE_MSG_SIZE_UV);
437
438 msg_hdr->type = msg_type;
439 msg_hdr->partid = XPC_PARTID(part);
440 msg_hdr->act_state = part->act_state;
441 msg_hdr->rp_ts_jiffies = xpc_rsvd_page->ts_jiffies;
442
443 /* ??? Is holding a spin_lock (ch->lock) during this call a bad idea? */
444 return xpc_send_gru_msg(part->sn.uv.remote_activate_mq_gpa, msg,
445 msg_size);
446}
447
448static void
449xpc_send_activate_IRQ_part_uv(struct xpc_partition *part, void *msg,
450 size_t msg_size, int msg_type)
451{
452 enum xp_retval ret;
453
454 ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type);
455 if (unlikely(ret != xpSuccess))
456 XPC_DEACTIVATE_PARTITION(part, ret);
457}
458
459static void
460xpc_send_activate_IRQ_ch_uv(struct xpc_channel *ch, unsigned long *irq_flags,
461 void *msg, size_t msg_size, int msg_type)
462{
463 struct xpc_partition *part = &xpc_partitions[ch->number];
464 enum xp_retval ret;
465
466 ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type);
467 if (unlikely(ret != xpSuccess)) {
468 if (irq_flags != NULL)
469 spin_unlock_irqrestore(&ch->lock, *irq_flags);
470
471 XPC_DEACTIVATE_PARTITION(part, ret);
472
473 if (irq_flags != NULL)
474 spin_lock_irqsave(&ch->lock, *irq_flags);
475 }
476}
477
478static void
479xpc_send_local_activate_IRQ_uv(struct xpc_partition *part, int act_state_req)
480{
481 unsigned long irq_flags;
482 struct xpc_partition_uv *part_uv = &part->sn.uv;
483
484 /*
485 * !!! Make our side think that the remote parition sent an activate
486 * !!! message our way by doing what the activate IRQ handler would
487 * !!! do had one really been sent.
488 */
489
490 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
491 if (part_uv->act_state_req == 0)
492 xpc_activate_IRQ_rcvd++;
493 part_uv->act_state_req = act_state_req;
494 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
495
496 wake_up_interruptible(&xpc_activate_IRQ_wq);
497}
498
499static enum xp_retval
500xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa,
501 size_t *len)
502{
503 /* !!! call the UV version of sn_partition_reserved_page_pa() */
504 return xpUnsupported;
505}
506
507static int
508xpc_setup_rsvd_page_sn_uv(struct xpc_rsvd_page *rp)
509{
510 rp->sn.activate_mq_gpa = uv_gpa(xpc_activate_mq_uv);
511 return 0;
512}
513
514static void
515xpc_send_heartbeat_uv(int msg_type)
516{
517 short partid;
518 struct xpc_partition *part;
519 struct xpc_activate_mq_msg_heartbeat_req_uv msg;
520
521 /*
522 * !!! On uv we're broadcasting a heartbeat message every 5 seconds.
523 * !!! Whereas on sn2 we're bte_copy'ng the heartbeat info every 20
524 * !!! seconds. This is an increase in numalink traffic.
525 * ??? Is this good?
526 */
527
528 msg.heartbeat = atomic64_inc_return(&xpc_heartbeat_uv);
529
530 partid = find_first_bit(xpc_heartbeating_to_mask_uv,
531 XP_MAX_NPARTITIONS_UV);
532
533 while (partid < XP_MAX_NPARTITIONS_UV) {
534 part = &xpc_partitions[partid];
535
536 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
537 msg_type);
538
539 partid = find_next_bit(xpc_heartbeating_to_mask_uv,
540 XP_MAX_NPARTITIONS_UV, partid + 1);
541 }
542}
543
544static void
545xpc_increment_heartbeat_uv(void)
546{
547 xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_INC_HEARTBEAT_UV);
548}
549
550static void
551xpc_offline_heartbeat_uv(void)
552{
553 xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV);
554}
555
556static void
557xpc_online_heartbeat_uv(void)
558{
559 xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_ONLINE_HEARTBEAT_UV);
560}
561
562static void
563xpc_heartbeat_init_uv(void)
564{
565 atomic64_set(&xpc_heartbeat_uv, 0);
566 bitmap_zero(xpc_heartbeating_to_mask_uv, XP_MAX_NPARTITIONS_UV);
567 xpc_heartbeating_to_mask = &xpc_heartbeating_to_mask_uv[0];
568}
569
570static void
571xpc_heartbeat_exit_uv(void)
572{
573 xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV);
574}
575
576static enum xp_retval
577xpc_get_remote_heartbeat_uv(struct xpc_partition *part)
578{
579 struct xpc_partition_uv *part_uv = &part->sn.uv;
580 enum xp_retval ret = xpNoHeartbeat;
581
582 if (part_uv->remote_act_state != XPC_P_AS_INACTIVE &&
583 part_uv->remote_act_state != XPC_P_AS_DEACTIVATING) {
584
585 if (part_uv->heartbeat != part->last_heartbeat ||
586 (part_uv->flags & XPC_P_HEARTBEAT_OFFLINE_UV)) {
587
588 part->last_heartbeat = part_uv->heartbeat;
589 ret = xpSuccess;
590 }
591 }
592 return ret;
593}
594
595static void
596xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp,
597 unsigned long remote_rp_gpa, int nasid)
598{
599 short partid = remote_rp->SAL_partid;
600 struct xpc_partition *part = &xpc_partitions[partid];
601 struct xpc_activate_mq_msg_activate_req_uv msg;
602
603 part->remote_rp_pa = remote_rp_gpa; /* !!! _pa here is really _gpa */
604 part->remote_rp_ts_jiffies = remote_rp->ts_jiffies;
605 part->sn.uv.remote_activate_mq_gpa = remote_rp->sn.activate_mq_gpa;
606
607 /*
608 * ??? Is it a good idea to make this conditional on what is
609 * ??? potentially stale state information?
610 */
611 if (part->sn.uv.remote_act_state == XPC_P_AS_INACTIVE) {
612 msg.rp_gpa = uv_gpa(xpc_rsvd_page);
613 msg.activate_mq_gpa = xpc_rsvd_page->sn.activate_mq_gpa;
614 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
615 XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV);
616 }
617
618 if (part->act_state == XPC_P_AS_INACTIVE)
619 xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV);
620}
621
622static void
623xpc_request_partition_reactivation_uv(struct xpc_partition *part)
624{
625 xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV);
626}
627
628static void
629xpc_request_partition_deactivation_uv(struct xpc_partition *part)
630{
631 struct xpc_activate_mq_msg_deactivate_req_uv msg;
632
633 /*
634 * ??? Is it a good idea to make this conditional on what is
635 * ??? potentially stale state information?
636 */
637 if (part->sn.uv.remote_act_state != XPC_P_AS_DEACTIVATING &&
638 part->sn.uv.remote_act_state != XPC_P_AS_INACTIVE) {
639
640 msg.reason = part->reason;
641 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
642 XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV);
643 }
644}
645
646static void
647xpc_cancel_partition_deactivation_request_uv(struct xpc_partition *part)
648{
649 /* nothing needs to be done */
650 return;
651}
652
653static void
654xpc_init_fifo_uv(struct xpc_fifo_head_uv *head)
655{
656 head->first = NULL;
657 head->last = NULL;
658 spin_lock_init(&head->lock);
659 head->n_entries = 0;
660}
661
662static void *
663xpc_get_fifo_entry_uv(struct xpc_fifo_head_uv *head)
664{
665 unsigned long irq_flags;
666 struct xpc_fifo_entry_uv *first;
667
668 spin_lock_irqsave(&head->lock, irq_flags);
669 first = head->first;
670 if (head->first != NULL) {
671 head->first = first->next;
672 if (head->first == NULL)
673 head->last = NULL;
674 }
675 head->n_entries++;
676 spin_unlock_irqrestore(&head->lock, irq_flags);
677 first->next = NULL;
678 return first;
679}
680
681static void
682xpc_put_fifo_entry_uv(struct xpc_fifo_head_uv *head,
683 struct xpc_fifo_entry_uv *last)
684{
685 unsigned long irq_flags;
686
687 last->next = NULL;
688 spin_lock_irqsave(&head->lock, irq_flags);
689 if (head->last != NULL)
690 head->last->next = last;
691 else
692 head->first = last;
693 head->last = last;
694 head->n_entries--;
695 BUG_ON(head->n_entries < 0);
696 spin_unlock_irqrestore(&head->lock, irq_flags);
697}
698
699static int
700xpc_n_of_fifo_entries_uv(struct xpc_fifo_head_uv *head)
701{
702 return head->n_entries;
703}
704
705/*
706 * Setup the channel structures that are uv specific.
707 */
708static enum xp_retval
709xpc_setup_ch_structures_sn_uv(struct xpc_partition *part)
710{
711 struct xpc_channel_uv *ch_uv;
712 int ch_number;
713
714 for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
715 ch_uv = &part->channels[ch_number].sn.uv;
716
717 xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
718 xpc_init_fifo_uv(&ch_uv->recv_msg_list);
719 }
720
721 return xpSuccess;
722}
723
724/*
725 * Teardown the channel structures that are uv specific.
726 */
727static void
728xpc_teardown_ch_structures_sn_uv(struct xpc_partition *part)
729{
730 /* nothing needs to be done */
731 return;
732}
733
734static enum xp_retval
735xpc_make_first_contact_uv(struct xpc_partition *part)
736{
737 struct xpc_activate_mq_msg_uv msg;
738
739 /*
740 * We send a sync msg to get the remote partition's remote_act_state
741 * updated to our current act_state which at this point should
742 * be XPC_P_AS_ACTIVATING.
743 */
744 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
745 XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV);
746
747 while (part->sn.uv.remote_act_state != XPC_P_AS_ACTIVATING) {
748
749 dev_dbg(xpc_part, "waiting to make first contact with "
750 "partition %d\n", XPC_PARTID(part));
751
752 /* wait a 1/4 of a second or so */
753 (void)msleep_interruptible(250);
754
755 if (part->act_state == XPC_P_AS_DEACTIVATING)
756 return part->reason;
757 }
758
759 return xpSuccess;
760}
761
762static u64
763xpc_get_chctl_all_flags_uv(struct xpc_partition *part)
764{
765 unsigned long irq_flags;
766 union xpc_channel_ctl_flags chctl;
767
768 spin_lock_irqsave(&part->chctl_lock, irq_flags);
769 chctl = part->chctl;
770 if (chctl.all_flags != 0)
771 part->chctl.all_flags = 0;
772
773 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
774 return chctl.all_flags;
775}
776
777static enum xp_retval
778xpc_allocate_send_msg_slot_uv(struct xpc_channel *ch)
779{
780 struct xpc_channel_uv *ch_uv = &ch->sn.uv;
781 struct xpc_send_msg_slot_uv *msg_slot;
782 unsigned long irq_flags;
783 int nentries;
784 int entry;
785 size_t nbytes;
786
787 for (nentries = ch->local_nentries; nentries > 0; nentries--) {
788 nbytes = nentries * sizeof(struct xpc_send_msg_slot_uv);
789 ch_uv->send_msg_slots = kzalloc(nbytes, GFP_KERNEL);
790 if (ch_uv->send_msg_slots == NULL)
791 continue;
792
793 for (entry = 0; entry < nentries; entry++) {
794 msg_slot = &ch_uv->send_msg_slots[entry];
795
796 msg_slot->msg_slot_number = entry;
797 xpc_put_fifo_entry_uv(&ch_uv->msg_slot_free_list,
798 &msg_slot->next);
799 }
800
801 spin_lock_irqsave(&ch->lock, irq_flags);
802 if (nentries < ch->local_nentries)
803 ch->local_nentries = nentries;
804 spin_unlock_irqrestore(&ch->lock, irq_flags);
805 return xpSuccess;
806 }
807
808 return xpNoMemory;
809}
810
811static enum xp_retval
812xpc_allocate_recv_msg_slot_uv(struct xpc_channel *ch)
813{
814 struct xpc_channel_uv *ch_uv = &ch->sn.uv;
815 struct xpc_notify_mq_msg_uv *msg_slot;
816 unsigned long irq_flags;
817 int nentries;
818 int entry;
819 size_t nbytes;
820
821 for (nentries = ch->remote_nentries; nentries > 0; nentries--) {
822 nbytes = nentries * ch->entry_size;
823 ch_uv->recv_msg_slots = kzalloc(nbytes, GFP_KERNEL);
824 if (ch_uv->recv_msg_slots == NULL)
825 continue;
826
827 for (entry = 0; entry < nentries; entry++) {
828 msg_slot = ch_uv->recv_msg_slots + entry *
829 ch->entry_size;
830
831 msg_slot->hdr.msg_slot_number = entry;
832 }
833
834 spin_lock_irqsave(&ch->lock, irq_flags);
835 if (nentries < ch->remote_nentries)
836 ch->remote_nentries = nentries;
837 spin_unlock_irqrestore(&ch->lock, irq_flags);
838 return xpSuccess;
839 }
840
841 return xpNoMemory;
842}
843
844/*
845 * Allocate msg_slots associated with the channel.
846 */
847static enum xp_retval
848xpc_setup_msg_structures_uv(struct xpc_channel *ch)
849{
850 static enum xp_retval ret;
851 struct xpc_channel_uv *ch_uv = &ch->sn.uv;
852
853 DBUG_ON(ch->flags & XPC_C_SETUP);
854
855 ret = xpc_allocate_send_msg_slot_uv(ch);
856 if (ret == xpSuccess) {
857
858 ret = xpc_allocate_recv_msg_slot_uv(ch);
859 if (ret != xpSuccess) {
860 kfree(ch_uv->send_msg_slots);
861 xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
862 }
863 }
864 return ret;
865}
866
867/*
868 * Free up msg_slots and clear other stuff that were setup for the specified
869 * channel.
870 */
871static void
872xpc_teardown_msg_structures_uv(struct xpc_channel *ch)
873{
874 struct xpc_channel_uv *ch_uv = &ch->sn.uv;
875
876 DBUG_ON(!spin_is_locked(&ch->lock));
877
878 ch_uv->remote_notify_mq_gpa = 0;
879
880 if (ch->flags & XPC_C_SETUP) {
881 xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
882 kfree(ch_uv->send_msg_slots);
883 xpc_init_fifo_uv(&ch_uv->recv_msg_list);
884 kfree(ch_uv->recv_msg_slots);
885 }
886}
887
888static void
889xpc_send_chctl_closerequest_uv(struct xpc_channel *ch, unsigned long *irq_flags)
890{
891 struct xpc_activate_mq_msg_chctl_closerequest_uv msg;
892
893 msg.ch_number = ch->number;
894 msg.reason = ch->reason;
895 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
896 XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV);
897}
898
899static void
900xpc_send_chctl_closereply_uv(struct xpc_channel *ch, unsigned long *irq_flags)
901{
902 struct xpc_activate_mq_msg_chctl_closereply_uv msg;
903
904 msg.ch_number = ch->number;
905 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
906 XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV);
907}
908
909static void
910xpc_send_chctl_openrequest_uv(struct xpc_channel *ch, unsigned long *irq_flags)
911{
912 struct xpc_activate_mq_msg_chctl_openrequest_uv msg;
913
914 msg.ch_number = ch->number;
915 msg.entry_size = ch->entry_size;
916 msg.local_nentries = ch->local_nentries;
917 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
918 XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV);
919}
920
921static void
922xpc_send_chctl_openreply_uv(struct xpc_channel *ch, unsigned long *irq_flags)
923{
924 struct xpc_activate_mq_msg_chctl_openreply_uv msg;
925
926 msg.ch_number = ch->number;
927 msg.local_nentries = ch->local_nentries;
928 msg.remote_nentries = ch->remote_nentries;
929 msg.local_notify_mq_gpa = uv_gpa(xpc_notify_mq_uv);
930 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
931 XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV);
932}
933
934static void
935xpc_send_chctl_local_msgrequest_uv(struct xpc_partition *part, int ch_number)
936{
937 unsigned long irq_flags;
938
939 spin_lock_irqsave(&part->chctl_lock, irq_flags);
940 part->chctl.flags[ch_number] |= XPC_CHCTL_MSGREQUEST;
941 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
942
943 xpc_wakeup_channel_mgr(part);
944}
945
946static void
947xpc_save_remote_msgqueue_pa_uv(struct xpc_channel *ch,
948 unsigned long msgqueue_pa)
949{
950 ch->sn.uv.remote_notify_mq_gpa = msgqueue_pa;
951}
952
953static void
954xpc_indicate_partition_engaged_uv(struct xpc_partition *part)
955{
956 struct xpc_activate_mq_msg_uv msg;
957
958 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
959 XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV);
960}
961
962static void
963xpc_indicate_partition_disengaged_uv(struct xpc_partition *part)
964{
965 struct xpc_activate_mq_msg_uv msg;
966
967 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
968 XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV);
969}
970
971static void
972xpc_assume_partition_disengaged_uv(short partid)
973{
974 struct xpc_partition_uv *part_uv = &xpc_partitions[partid].sn.uv;
975 unsigned long irq_flags;
976
977 spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
978 part_uv->flags &= ~XPC_P_ENGAGED_UV;
979 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
980}
981
982static int
983xpc_partition_engaged_uv(short partid)
984{
985 return (xpc_partitions[partid].sn.uv.flags & XPC_P_ENGAGED_UV) != 0;
986}
987
988static int
989xpc_any_partition_engaged_uv(void)
990{
991 struct xpc_partition_uv *part_uv;
992 short partid;
993
994 for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
995 part_uv = &xpc_partitions[partid].sn.uv;
996 if ((part_uv->flags & XPC_P_ENGAGED_UV) != 0)
997 return 1;
998 }
999 return 0;
1000}
1001
1002static enum xp_retval
1003xpc_allocate_msg_slot_uv(struct xpc_channel *ch, u32 flags,
1004 struct xpc_send_msg_slot_uv **address_of_msg_slot)
1005{
1006 enum xp_retval ret;
1007 struct xpc_send_msg_slot_uv *msg_slot;
1008 struct xpc_fifo_entry_uv *entry;
1009
1010 while (1) {
1011 entry = xpc_get_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list);
1012 if (entry != NULL)
1013 break;
1014
1015 if (flags & XPC_NOWAIT)
1016 return xpNoWait;
1017
1018 ret = xpc_allocate_msg_wait(ch);
1019 if (ret != xpInterrupted && ret != xpTimeout)
1020 return ret;
1021 }
1022
1023 msg_slot = container_of(entry, struct xpc_send_msg_slot_uv, next);
1024 *address_of_msg_slot = msg_slot;
1025 return xpSuccess;
1026}
1027
1028static void
1029xpc_free_msg_slot_uv(struct xpc_channel *ch,
1030 struct xpc_send_msg_slot_uv *msg_slot)
1031{
1032 xpc_put_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list, &msg_slot->next);
1033
1034 /* wakeup anyone waiting for a free msg slot */
1035 if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
1036 wake_up(&ch->msg_allocate_wq);
1037}
1038
1039static void
1040xpc_notify_sender_uv(struct xpc_channel *ch,
1041 struct xpc_send_msg_slot_uv *msg_slot,
1042 enum xp_retval reason)
1043{
1044 xpc_notify_func func = msg_slot->func;
1045
1046 if (func != NULL && cmpxchg(&msg_slot->func, func, NULL) == func) {
1047
1048 atomic_dec(&ch->n_to_notify);
1049
1050 dev_dbg(xpc_chan, "msg_slot->func() called, msg_slot=0x%p "
1051 "msg_slot_number=%d partid=%d channel=%d\n", msg_slot,
1052 msg_slot->msg_slot_number, ch->partid, ch->number);
1053
1054 func(reason, ch->partid, ch->number, msg_slot->key);
1055
1056 dev_dbg(xpc_chan, "msg_slot->func() returned, msg_slot=0x%p "
1057 "msg_slot_number=%d partid=%d channel=%d\n", msg_slot,
1058 msg_slot->msg_slot_number, ch->partid, ch->number);
1059 }
1060}
1061
1062static void
1063xpc_handle_notify_mq_ack_uv(struct xpc_channel *ch,
1064 struct xpc_notify_mq_msg_uv *msg)
1065{
1066 struct xpc_send_msg_slot_uv *msg_slot;
1067 int entry = msg->hdr.msg_slot_number % ch->local_nentries;
1068
1069 msg_slot = &ch->sn.uv.send_msg_slots[entry];
1070
1071 BUG_ON(msg_slot->msg_slot_number != msg->hdr.msg_slot_number);
1072 msg_slot->msg_slot_number += ch->local_nentries;
1073
1074 if (msg_slot->func != NULL)
1075 xpc_notify_sender_uv(ch, msg_slot, xpMsgDelivered);
1076
1077 xpc_free_msg_slot_uv(ch, msg_slot);
1078}
1079
1080static void
1081xpc_handle_notify_mq_msg_uv(struct xpc_partition *part,
1082 struct xpc_notify_mq_msg_uv *msg)
1083{
1084 struct xpc_partition_uv *part_uv = &part->sn.uv;
1085 struct xpc_channel *ch;
1086 struct xpc_channel_uv *ch_uv;
1087 struct xpc_notify_mq_msg_uv *msg_slot;
1088 unsigned long irq_flags;
1089 int ch_number = msg->hdr.ch_number;
1090
1091 if (unlikely(ch_number >= part->nchannels)) {
1092 dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received invalid "
1093 "channel number=0x%x in message from partid=%d\n",
1094 ch_number, XPC_PARTID(part));
1095
1096 /* get hb checker to deactivate from the remote partition */
1097 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
1098 if (part_uv->act_state_req == 0)
1099 xpc_activate_IRQ_rcvd++;
1100 part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV;
1101 part_uv->reason = xpBadChannelNumber;
1102 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
1103
1104 wake_up_interruptible(&xpc_activate_IRQ_wq);
1105 return;
1106 }
1107
1108 ch = &part->channels[ch_number];
1109 xpc_msgqueue_ref(ch);
1110
1111 if (!(ch->flags & XPC_C_CONNECTED)) {
1112 xpc_msgqueue_deref(ch);
1113 return;
1114 }
1115
1116 /* see if we're really dealing with an ACK for a previously sent msg */
1117 if (msg->hdr.size == 0) {
1118 xpc_handle_notify_mq_ack_uv(ch, msg);
1119 xpc_msgqueue_deref(ch);
1120 return;
1121 }
1122
1123 /* we're dealing with a normal message sent via the notify_mq */
1124 ch_uv = &ch->sn.uv;
1125
1126 msg_slot = (struct xpc_notify_mq_msg_uv *)((u64)ch_uv->recv_msg_slots +
1127 (msg->hdr.msg_slot_number % ch->remote_nentries) *
1128 ch->entry_size);
1129
1130 BUG_ON(msg->hdr.msg_slot_number != msg_slot->hdr.msg_slot_number);
1131 BUG_ON(msg_slot->hdr.size != 0);
1132
1133 memcpy(msg_slot, msg, msg->hdr.size);
1134
1135 xpc_put_fifo_entry_uv(&ch_uv->recv_msg_list, &msg_slot->hdr.u.next);
1136
1137 if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) {
1138 /*
1139 * If there is an existing idle kthread get it to deliver
1140 * the payload, otherwise we'll have to get the channel mgr
1141 * for this partition to create a kthread to do the delivery.
1142 */
1143 if (atomic_read(&ch->kthreads_idle) > 0)
1144 wake_up_nr(&ch->idle_wq, 1);
1145 else
1146 xpc_send_chctl_local_msgrequest_uv(part, ch->number);
1147 }
1148 xpc_msgqueue_deref(ch);
1149}
1150
1151static irqreturn_t
1152xpc_handle_notify_IRQ_uv(int irq, void *dev_id)
1153{
1154 struct xpc_notify_mq_msg_uv *msg;
1155 short partid;
1156 struct xpc_partition *part;
1157
1158 while ((msg = gru_get_next_message(xpc_notify_mq_uv)) != NULL) {
1159
1160 partid = msg->hdr.partid;
1161 if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) {
1162 dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received "
1163 "invalid partid=0x%x in message\n", partid);
1164 } else {
1165 part = &xpc_partitions[partid];
1166
1167 if (xpc_part_ref(part)) {
1168 xpc_handle_notify_mq_msg_uv(part, msg);
1169 xpc_part_deref(part);
1170 }
1171 }
1172
1173 gru_free_message(xpc_notify_mq_uv, msg);
1174 }
1175
1176 return IRQ_HANDLED;
1177}
1178
1179static int
1180xpc_n_of_deliverable_payloads_uv(struct xpc_channel *ch)
1181{
1182 return xpc_n_of_fifo_entries_uv(&ch->sn.uv.recv_msg_list);
1183}
1184
1185static void
1186xpc_process_msg_chctl_flags_uv(struct xpc_partition *part, int ch_number)
1187{
1188 struct xpc_channel *ch = &part->channels[ch_number];
1189 int ndeliverable_payloads;
1190
1191 xpc_msgqueue_ref(ch);
1192
1193 ndeliverable_payloads = xpc_n_of_deliverable_payloads_uv(ch);
1194
1195 if (ndeliverable_payloads > 0 &&
1196 (ch->flags & XPC_C_CONNECTED) &&
1197 (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE)) {
1198
1199 xpc_activate_kthreads(ch, ndeliverable_payloads);
1200 }
1201
1202 xpc_msgqueue_deref(ch);
1203}
1204
1205static enum xp_retval
1206xpc_send_payload_uv(struct xpc_channel *ch, u32 flags, void *payload,
1207 u16 payload_size, u8 notify_type, xpc_notify_func func,
1208 void *key)
1209{
1210 enum xp_retval ret = xpSuccess;
1211 struct xpc_send_msg_slot_uv *msg_slot = NULL;
1212 struct xpc_notify_mq_msg_uv *msg;
1213 u8 msg_buffer[XPC_NOTIFY_MSG_SIZE_UV];
1214 size_t msg_size;
1215
1216 DBUG_ON(notify_type != XPC_N_CALL);
1217
1218 msg_size = sizeof(struct xpc_notify_mq_msghdr_uv) + payload_size;
1219 if (msg_size > ch->entry_size)
1220 return xpPayloadTooBig;
1221
1222 xpc_msgqueue_ref(ch);
1223
1224 if (ch->flags & XPC_C_DISCONNECTING) {
1225 ret = ch->reason;
1226 goto out_1;
1227 }
1228 if (!(ch->flags & XPC_C_CONNECTED)) {
1229 ret = xpNotConnected;
1230 goto out_1;
1231 }
1232
1233 ret = xpc_allocate_msg_slot_uv(ch, flags, &msg_slot);
1234 if (ret != xpSuccess)
1235 goto out_1;
1236
1237 if (func != NULL) {
1238 atomic_inc(&ch->n_to_notify);
1239
1240 msg_slot->key = key;
1241 wmb(); /* a non-NULL func must hit memory after the key */
1242 msg_slot->func = func;
1243
1244 if (ch->flags & XPC_C_DISCONNECTING) {
1245 ret = ch->reason;
1246 goto out_2;
1247 }
1248 }
1249
1250 msg = (struct xpc_notify_mq_msg_uv *)&msg_buffer;
1251 msg->hdr.partid = xp_partition_id;
1252 msg->hdr.ch_number = ch->number;
1253 msg->hdr.size = msg_size;
1254 msg->hdr.msg_slot_number = msg_slot->msg_slot_number;
1255 memcpy(&msg->payload, payload, payload_size);
1256
1257 ret = xpc_send_gru_msg(ch->sn.uv.remote_notify_mq_gpa, msg, msg_size);
1258 if (ret == xpSuccess)
1259 goto out_1;
1260
1261 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
1262out_2:
1263 if (func != NULL) {
1264 /*
1265 * Try to NULL the msg_slot's func field. If we fail, then
1266 * xpc_notify_senders_of_disconnect_uv() beat us to it, in which
1267 * case we need to pretend we succeeded to send the message
1268 * since the user will get a callout for the disconnect error
1269 * by xpc_notify_senders_of_disconnect_uv(), and to also get an
1270 * error returned here will confuse them. Additionally, since
1271 * in this case the channel is being disconnected we don't need
1272 * to put the the msg_slot back on the free list.
1273 */
1274 if (cmpxchg(&msg_slot->func, func, NULL) != func) {
1275 ret = xpSuccess;
1276 goto out_1;
1277 }
1278
1279 msg_slot->key = NULL;
1280 atomic_dec(&ch->n_to_notify);
1281 }
1282 xpc_free_msg_slot_uv(ch, msg_slot);
1283out_1:
1284 xpc_msgqueue_deref(ch);
1285 return ret;
1286}
1287
1288/*
1289 * Tell the callers of xpc_send_notify() that the status of their payloads
1290 * is unknown because the channel is now disconnecting.
1291 *
1292 * We don't worry about putting these msg_slots on the free list since the
1293 * msg_slots themselves are about to be kfree'd.
1294 */
1295static void
1296xpc_notify_senders_of_disconnect_uv(struct xpc_channel *ch)
1297{
1298 struct xpc_send_msg_slot_uv *msg_slot;
1299 int entry;
1300
1301 DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING));
1302
1303 for (entry = 0; entry < ch->local_nentries; entry++) {
1304
1305 if (atomic_read(&ch->n_to_notify) == 0)
1306 break;
1307
1308 msg_slot = &ch->sn.uv.send_msg_slots[entry];
1309 if (msg_slot->func != NULL)
1310 xpc_notify_sender_uv(ch, msg_slot, ch->reason);
1311 }
1312}
1313
1314/*
1315 * Get the next deliverable message's payload.
1316 */
1317static void *
1318xpc_get_deliverable_payload_uv(struct xpc_channel *ch)
1319{
1320 struct xpc_fifo_entry_uv *entry;
1321 struct xpc_notify_mq_msg_uv *msg;
1322 void *payload = NULL;
1323
1324 if (!(ch->flags & XPC_C_DISCONNECTING)) {
1325 entry = xpc_get_fifo_entry_uv(&ch->sn.uv.recv_msg_list);
1326 if (entry != NULL) {
1327 msg = container_of(entry, struct xpc_notify_mq_msg_uv,
1328 hdr.u.next);
1329 payload = &msg->payload;
1330 }
1331 }
1332 return payload;
1333}
1334
1335static void
1336xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
1337{
1338 struct xpc_notify_mq_msg_uv *msg;
1339 enum xp_retval ret;
1340
1341 msg = container_of(payload, struct xpc_notify_mq_msg_uv, payload);
1342
1343 /* return an ACK to the sender of this message */
1344
1345 msg->hdr.partid = xp_partition_id;
1346 msg->hdr.size = 0; /* size of zero indicates this is an ACK */
1347
1348 ret = xpc_send_gru_msg(ch->sn.uv.remote_notify_mq_gpa, msg,
1349 sizeof(struct xpc_notify_mq_msghdr_uv));
1350 if (ret != xpSuccess)
1351 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
1352
1353 msg->hdr.msg_slot_number += ch->remote_nentries;
1354}
1355
1356int
1357xpc_init_uv(void)
1358{
1359 xpc_setup_partitions_sn = xpc_setup_partitions_sn_uv;
1360 xpc_process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv;
1361 xpc_get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_uv;
1362 xpc_setup_rsvd_page_sn = xpc_setup_rsvd_page_sn_uv;
1363 xpc_increment_heartbeat = xpc_increment_heartbeat_uv;
1364 xpc_offline_heartbeat = xpc_offline_heartbeat_uv;
1365 xpc_online_heartbeat = xpc_online_heartbeat_uv;
1366 xpc_heartbeat_init = xpc_heartbeat_init_uv;
1367 xpc_heartbeat_exit = xpc_heartbeat_exit_uv;
1368 xpc_get_remote_heartbeat = xpc_get_remote_heartbeat_uv;
1369
1370 xpc_request_partition_activation = xpc_request_partition_activation_uv;
1371 xpc_request_partition_reactivation =
1372 xpc_request_partition_reactivation_uv;
1373 xpc_request_partition_deactivation =
1374 xpc_request_partition_deactivation_uv;
1375 xpc_cancel_partition_deactivation_request =
1376 xpc_cancel_partition_deactivation_request_uv;
1377
1378 xpc_setup_ch_structures_sn = xpc_setup_ch_structures_sn_uv;
1379 xpc_teardown_ch_structures_sn = xpc_teardown_ch_structures_sn_uv;
1380
1381 xpc_make_first_contact = xpc_make_first_contact_uv;
1382
1383 xpc_get_chctl_all_flags = xpc_get_chctl_all_flags_uv;
1384 xpc_send_chctl_closerequest = xpc_send_chctl_closerequest_uv;
1385 xpc_send_chctl_closereply = xpc_send_chctl_closereply_uv;
1386 xpc_send_chctl_openrequest = xpc_send_chctl_openrequest_uv;
1387 xpc_send_chctl_openreply = xpc_send_chctl_openreply_uv;
1388
1389 xpc_save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_uv;
1390
1391 xpc_setup_msg_structures = xpc_setup_msg_structures_uv;
1392 xpc_teardown_msg_structures = xpc_teardown_msg_structures_uv;
1393
1394 xpc_indicate_partition_engaged = xpc_indicate_partition_engaged_uv;
1395 xpc_indicate_partition_disengaged =
1396 xpc_indicate_partition_disengaged_uv;
1397 xpc_assume_partition_disengaged = xpc_assume_partition_disengaged_uv;
1398 xpc_partition_engaged = xpc_partition_engaged_uv;
1399 xpc_any_partition_engaged = xpc_any_partition_engaged_uv;
1400
1401 xpc_n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_uv;
1402 xpc_process_msg_chctl_flags = xpc_process_msg_chctl_flags_uv;
1403 xpc_send_payload = xpc_send_payload_uv;
1404 xpc_notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv;
1405 xpc_get_deliverable_payload = xpc_get_deliverable_payload_uv;
1406 xpc_received_payload = xpc_received_payload_uv;
1407
1408 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
1409 dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
1410 XPC_MSG_HDR_MAX_SIZE);
1411 return -E2BIG;
1412 }
1413
1414 /* ??? The cpuid argument's value is 0, is that what we want? */
1415 /* !!! The irq argument's value isn't correct. */
1416 xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0, 0,
1417 xpc_handle_activate_IRQ_uv);
1418 if (xpc_activate_mq_uv == NULL)
1419 return -ENOMEM;
1420
1421 /* ??? The cpuid argument's value is 0, is that what we want? */
1422 /* !!! The irq argument's value isn't correct. */
1423 xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0, 0,
1424 xpc_handle_notify_IRQ_uv);
1425 if (xpc_notify_mq_uv == NULL) {
1426 /* !!! The irq argument's value isn't correct. */
1427 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv,
1428 XPC_ACTIVATE_MQ_SIZE_UV, 0);
1429 return -ENOMEM;
1430 }
1431
1432 return 0;
1433}
1434
1435void
1436xpc_exit_uv(void)
1437{
1438 /* !!! The irq argument's value isn't correct. */
1439 xpc_destroy_gru_mq_uv(xpc_notify_mq_uv, XPC_NOTIFY_MQ_SIZE_UV, 0);
1440
1441 /* !!! The irq argument's value isn't correct. */
1442 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv, XPC_ACTIVATE_MQ_SIZE_UV, 0);
1443}
diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c
index 822dc8e8d7f0..71513b3af708 100644
--- a/drivers/misc/sgi-xp/xpnet.c
+++ b/drivers/misc/sgi-xp/xpnet.c
@@ -21,21 +21,8 @@
21 */ 21 */
22 22
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/types.h>
25#include <linux/kernel.h>
26#include <linux/init.h>
27#include <linux/ioport.h>
28#include <linux/netdevice.h> 24#include <linux/netdevice.h>
29#include <linux/etherdevice.h> 25#include <linux/etherdevice.h>
30#include <linux/delay.h>
31#include <linux/ethtool.h>
32#include <linux/mii.h>
33#include <linux/smp.h>
34#include <linux/string.h>
35#include <asm/sn/bte.h>
36#include <asm/sn/io.h>
37#include <asm/sn/sn_sal.h>
38#include <asm/atomic.h>
39#include "xp.h" 26#include "xp.h"
40 27
41/* 28/*
@@ -57,7 +44,7 @@ struct xpnet_message {
57 u16 version; /* Version for this message */ 44 u16 version; /* Version for this message */
58 u16 embedded_bytes; /* #of bytes embedded in XPC message */ 45 u16 embedded_bytes; /* #of bytes embedded in XPC message */
59 u32 magic; /* Special number indicating this is xpnet */ 46 u32 magic; /* Special number indicating this is xpnet */
60 u64 buf_pa; /* phys address of buffer to retrieve */ 47 unsigned long buf_pa; /* phys address of buffer to retrieve */
61 u32 size; /* #of bytes in buffer */ 48 u32 size; /* #of bytes in buffer */
62 u8 leadin_ignore; /* #of bytes to ignore at the beginning */ 49 u8 leadin_ignore; /* #of bytes to ignore at the beginning */
63 u8 tailout_ignore; /* #of bytes to ignore at the end */ 50 u8 tailout_ignore; /* #of bytes to ignore at the end */
@@ -70,11 +57,10 @@ struct xpnet_message {
70 * 57 *
71 * XPC expects each message to exist in an individual cacheline. 58 * XPC expects each message to exist in an individual cacheline.
72 */ 59 */
73#define XPNET_MSG_SIZE (L1_CACHE_BYTES - XPC_MSG_PAYLOAD_OFFSET) 60#define XPNET_MSG_SIZE XPC_MSG_PAYLOAD_MAX_SIZE
74#define XPNET_MSG_DATA_MAX \ 61#define XPNET_MSG_DATA_MAX \
75 (XPNET_MSG_SIZE - (u64)(&((struct xpnet_message *)0)->data)) 62 (XPNET_MSG_SIZE - offsetof(struct xpnet_message, data))
76#define XPNET_MSG_ALIGNED_SIZE (L1_CACHE_ALIGN(XPNET_MSG_SIZE)) 63#define XPNET_MSG_NENTRIES (PAGE_SIZE / XPC_MSG_MAX_SIZE)
77#define XPNET_MSG_NENTRIES (PAGE_SIZE / XPNET_MSG_ALIGNED_SIZE)
78 64
79#define XPNET_MAX_KTHREADS (XPNET_MSG_NENTRIES + 1) 65#define XPNET_MAX_KTHREADS (XPNET_MSG_NENTRIES + 1)
80#define XPNET_MAX_IDLE_KTHREADS (XPNET_MSG_NENTRIES + 1) 66#define XPNET_MAX_IDLE_KTHREADS (XPNET_MSG_NENTRIES + 1)
@@ -105,7 +91,6 @@ struct xpnet_message {
105 * then be released. 91 * then be released.
106 */ 92 */
107struct xpnet_pending_msg { 93struct xpnet_pending_msg {
108 struct list_head free_list;
109 struct sk_buff *skb; 94 struct sk_buff *skb;
110 atomic_t use_count; 95 atomic_t use_count;
111}; 96};
@@ -121,7 +106,7 @@ struct net_device *xpnet_device;
121 * When we are notified of other partitions activating, we add them to 106 * When we are notified of other partitions activating, we add them to
122 * our bitmask of partitions to which we broadcast. 107 * our bitmask of partitions to which we broadcast.
123 */ 108 */
124static u64 xpnet_broadcast_partitions; 109static unsigned long *xpnet_broadcast_partitions;
125/* protect above */ 110/* protect above */
126static DEFINE_SPINLOCK(xpnet_broadcast_lock); 111static DEFINE_SPINLOCK(xpnet_broadcast_lock);
127 112
@@ -141,16 +126,13 @@ static DEFINE_SPINLOCK(xpnet_broadcast_lock);
141#define XPNET_DEF_MTU (0x8000UL) 126#define XPNET_DEF_MTU (0x8000UL)
142 127
143/* 128/*
144 * The partition id is encapsulated in the MAC address. The following 129 * The partid is encapsulated in the MAC address beginning in the following
145 * define locates the octet the partid is in. 130 * octet and it consists of two octets.
146 */ 131 */
147#define XPNET_PARTID_OCTET 1 132#define XPNET_PARTID_OCTET 2
148#define XPNET_LICENSE_OCTET 2 133
134/* Define the XPNET debug device structures to be used with dev_dbg() et al */
149 135
150/*
151 * Define the XPNET debug device structure that is to be used with dev_dbg(),
152 * dev_err(), dev_warn(), and dev_info().
153 */
154struct device_driver xpnet_dbg_name = { 136struct device_driver xpnet_dbg_name = {
155 .name = "xpnet" 137 .name = "xpnet"
156}; 138};
@@ -169,7 +151,8 @@ static void
169xpnet_receive(short partid, int channel, struct xpnet_message *msg) 151xpnet_receive(short partid, int channel, struct xpnet_message *msg)
170{ 152{
171 struct sk_buff *skb; 153 struct sk_buff *skb;
172 bte_result_t bret; 154 void *dst;
155 enum xp_retval ret;
173 struct xpnet_dev_private *priv = 156 struct xpnet_dev_private *priv =
174 (struct xpnet_dev_private *)xpnet_device->priv; 157 (struct xpnet_dev_private *)xpnet_device->priv;
175 158
@@ -201,7 +184,7 @@ xpnet_receive(short partid, int channel, struct xpnet_message *msg)
201 184
202 /* 185 /*
203 * The allocated skb has some reserved space. 186 * The allocated skb has some reserved space.
204 * In order to use bte_copy, we need to get the 187 * In order to use xp_remote_memcpy(), we need to get the
205 * skb->data pointer moved forward. 188 * skb->data pointer moved forward.
206 */ 189 */
207 skb_reserve(skb, (L1_CACHE_BYTES - ((u64)skb->data & 190 skb_reserve(skb, (L1_CACHE_BYTES - ((u64)skb->data &
@@ -226,26 +209,21 @@ xpnet_receive(short partid, int channel, struct xpnet_message *msg)
226 skb_copy_to_linear_data(skb, &msg->data, 209 skb_copy_to_linear_data(skb, &msg->data,
227 (size_t)msg->embedded_bytes); 210 (size_t)msg->embedded_bytes);
228 } else { 211 } else {
212 dst = (void *)((u64)skb->data & ~(L1_CACHE_BYTES - 1));
229 dev_dbg(xpnet, "transferring buffer to the skb->data area;\n\t" 213 dev_dbg(xpnet, "transferring buffer to the skb->data area;\n\t"
230 "bte_copy(0x%p, 0x%p, %hu)\n", (void *)msg->buf_pa, 214 "xp_remote_memcpy(0x%p, 0x%p, %hu)\n", dst,
231 (void *)__pa((u64)skb->data & ~(L1_CACHE_BYTES - 1)), 215 (void *)msg->buf_pa, msg->size);
232 msg->size);
233
234 bret = bte_copy(msg->buf_pa,
235 __pa((u64)skb->data & ~(L1_CACHE_BYTES - 1)),
236 msg->size, (BTE_NOTIFY | BTE_WACQUIRE), NULL);
237 216
238 if (bret != BTE_SUCCESS) { 217 ret = xp_remote_memcpy(xp_pa(dst), msg->buf_pa, msg->size);
218 if (ret != xpSuccess) {
239 /* 219 /*
240 * >>> Need better way of cleaning skb. Currently skb 220 * !!! Need better way of cleaning skb. Currently skb
241 * >>> appears in_use and we can't just call 221 * !!! appears in_use and we can't just call
242 * >>> dev_kfree_skb. 222 * !!! dev_kfree_skb.
243 */ 223 */
244 dev_err(xpnet, "bte_copy(0x%p, 0x%p, 0x%hx) returned " 224 dev_err(xpnet, "xp_remote_memcpy(0x%p, 0x%p, 0x%hx) "
245 "error=0x%x\n", (void *)msg->buf_pa, 225 "returned error=0x%x\n", dst,
246 (void *)__pa((u64)skb->data & 226 (void *)msg->buf_pa, msg->size, ret);
247 ~(L1_CACHE_BYTES - 1)),
248 msg->size, bret);
249 227
250 xpc_received(partid, channel, (void *)msg); 228 xpc_received(partid, channel, (void *)msg);
251 229
@@ -285,9 +263,7 @@ static void
285xpnet_connection_activity(enum xp_retval reason, short partid, int channel, 263xpnet_connection_activity(enum xp_retval reason, short partid, int channel,
286 void *data, void *key) 264 void *data, void *key)
287{ 265{
288 long bp; 266 DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
289
290 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
291 DBUG_ON(channel != XPC_NET_CHANNEL); 267 DBUG_ON(channel != XPC_NET_CHANNEL);
292 268
293 switch (reason) { 269 switch (reason) {
@@ -299,31 +275,28 @@ xpnet_connection_activity(enum xp_retval reason, short partid, int channel,
299 275
300 case xpConnected: /* connection completed to a partition */ 276 case xpConnected: /* connection completed to a partition */
301 spin_lock_bh(&xpnet_broadcast_lock); 277 spin_lock_bh(&xpnet_broadcast_lock);
302 xpnet_broadcast_partitions |= 1UL << (partid - 1); 278 __set_bit(partid, xpnet_broadcast_partitions);
303 bp = xpnet_broadcast_partitions;
304 spin_unlock_bh(&xpnet_broadcast_lock); 279 spin_unlock_bh(&xpnet_broadcast_lock);
305 280
306 netif_carrier_on(xpnet_device); 281 netif_carrier_on(xpnet_device);
307 282
308 dev_dbg(xpnet, "%s connection created to partition %d; " 283 dev_dbg(xpnet, "%s connected to partition %d\n",
309 "xpnet_broadcast_partitions=0x%lx\n", 284 xpnet_device->name, partid);
310 xpnet_device->name, partid, bp);
311 break; 285 break;
312 286
313 default: 287 default:
314 spin_lock_bh(&xpnet_broadcast_lock); 288 spin_lock_bh(&xpnet_broadcast_lock);
315 xpnet_broadcast_partitions &= ~(1UL << (partid - 1)); 289 __clear_bit(partid, xpnet_broadcast_partitions);
316 bp = xpnet_broadcast_partitions;
317 spin_unlock_bh(&xpnet_broadcast_lock); 290 spin_unlock_bh(&xpnet_broadcast_lock);
318 291
319 if (bp == 0) 292 if (bitmap_empty((unsigned long *)xpnet_broadcast_partitions,
293 xp_max_npartitions)) {
320 netif_carrier_off(xpnet_device); 294 netif_carrier_off(xpnet_device);
295 }
321 296
322 dev_dbg(xpnet, "%s disconnected from partition %d; " 297 dev_dbg(xpnet, "%s disconnected from partition %d\n",
323 "xpnet_broadcast_partitions=0x%lx\n", 298 xpnet_device->name, partid);
324 xpnet_device->name, partid, bp);
325 break; 299 break;
326
327 } 300 }
328} 301}
329 302
@@ -334,8 +307,10 @@ xpnet_dev_open(struct net_device *dev)
334 307
335 dev_dbg(xpnet, "calling xpc_connect(%d, 0x%p, NULL, %ld, %ld, %ld, " 308 dev_dbg(xpnet, "calling xpc_connect(%d, 0x%p, NULL, %ld, %ld, %ld, "
336 "%ld)\n", XPC_NET_CHANNEL, xpnet_connection_activity, 309 "%ld)\n", XPC_NET_CHANNEL, xpnet_connection_activity,
337 XPNET_MSG_SIZE, XPNET_MSG_NENTRIES, XPNET_MAX_KTHREADS, 310 (unsigned long)XPNET_MSG_SIZE,
338 XPNET_MAX_IDLE_KTHREADS); 311 (unsigned long)XPNET_MSG_NENTRIES,
312 (unsigned long)XPNET_MAX_KTHREADS,
313 (unsigned long)XPNET_MAX_IDLE_KTHREADS);
339 314
340 ret = xpc_connect(XPC_NET_CHANNEL, xpnet_connection_activity, NULL, 315 ret = xpc_connect(XPC_NET_CHANNEL, xpnet_connection_activity, NULL,
341 XPNET_MSG_SIZE, XPNET_MSG_NENTRIES, 316 XPNET_MSG_SIZE, XPNET_MSG_NENTRIES,
@@ -426,35 +401,74 @@ xpnet_send_completed(enum xp_retval reason, short partid, int channel,
426 } 401 }
427} 402}
428 403
404static void
405xpnet_send(struct sk_buff *skb, struct xpnet_pending_msg *queued_msg,
406 u64 start_addr, u64 end_addr, u16 embedded_bytes, int dest_partid)
407{
408 u8 msg_buffer[XPNET_MSG_SIZE];
409 struct xpnet_message *msg = (struct xpnet_message *)&msg_buffer;
410 u16 msg_size = sizeof(struct xpnet_message);
411 enum xp_retval ret;
412
413 msg->embedded_bytes = embedded_bytes;
414 if (unlikely(embedded_bytes != 0)) {
415 msg->version = XPNET_VERSION_EMBED;
416 dev_dbg(xpnet, "calling memcpy(0x%p, 0x%p, 0x%lx)\n",
417 &msg->data, skb->data, (size_t)embedded_bytes);
418 skb_copy_from_linear_data(skb, &msg->data,
419 (size_t)embedded_bytes);
420 msg_size += embedded_bytes - 1;
421 } else {
422 msg->version = XPNET_VERSION;
423 }
424 msg->magic = XPNET_MAGIC;
425 msg->size = end_addr - start_addr;
426 msg->leadin_ignore = (u64)skb->data - start_addr;
427 msg->tailout_ignore = end_addr - (u64)skb_tail_pointer(skb);
428 msg->buf_pa = xp_pa((void *)start_addr);
429
430 dev_dbg(xpnet, "sending XPC message to %d:%d\n"
431 KERN_DEBUG "msg->buf_pa=0x%lx, msg->size=%u, "
432 "msg->leadin_ignore=%u, msg->tailout_ignore=%u\n",
433 dest_partid, XPC_NET_CHANNEL, msg->buf_pa, msg->size,
434 msg->leadin_ignore, msg->tailout_ignore);
435
436 atomic_inc(&queued_msg->use_count);
437
438 ret = xpc_send_notify(dest_partid, XPC_NET_CHANNEL, XPC_NOWAIT, msg,
439 msg_size, xpnet_send_completed, queued_msg);
440 if (unlikely(ret != xpSuccess))
441 atomic_dec(&queued_msg->use_count);
442}
443
429/* 444/*
430 * Network layer has formatted a packet (skb) and is ready to place it 445 * Network layer has formatted a packet (skb) and is ready to place it
431 * "on the wire". Prepare and send an xpnet_message to all partitions 446 * "on the wire". Prepare and send an xpnet_message to all partitions
432 * which have connected with us and are targets of this packet. 447 * which have connected with us and are targets of this packet.
433 * 448 *
434 * MAC-NOTE: For the XPNET driver, the MAC address contains the 449 * MAC-NOTE: For the XPNET driver, the MAC address contains the
435 * destination partition_id. If the destination partition id word 450 * destination partid. If the destination partid octets are 0xffff,
436 * is 0xff, this packet is to broadcast to all partitions. 451 * this packet is to be broadcast to all connected partitions.
437 */ 452 */
438static int 453static int
439xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) 454xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
440{ 455{
441 struct xpnet_pending_msg *queued_msg; 456 struct xpnet_pending_msg *queued_msg;
442 enum xp_retval ret;
443 struct xpnet_message *msg;
444 u64 start_addr, end_addr; 457 u64 start_addr, end_addr;
445 long dp;
446 u8 second_mac_octet;
447 short dest_partid; 458 short dest_partid;
448 struct xpnet_dev_private *priv; 459 struct xpnet_dev_private *priv = (struct xpnet_dev_private *)dev->priv;
449 u16 embedded_bytes; 460 u16 embedded_bytes = 0;
450
451 priv = (struct xpnet_dev_private *)dev->priv;
452 461
453 dev_dbg(xpnet, ">skb->head=0x%p skb->data=0x%p skb->tail=0x%p " 462 dev_dbg(xpnet, ">skb->head=0x%p skb->data=0x%p skb->tail=0x%p "
454 "skb->end=0x%p skb->len=%d\n", (void *)skb->head, 463 "skb->end=0x%p skb->len=%d\n", (void *)skb->head,
455 (void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb), 464 (void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb),
456 skb->len); 465 skb->len);
457 466
467 if (skb->data[0] == 0x33) {
468 dev_kfree_skb(skb);
469 return 0; /* nothing needed to be done */
470 }
471
458 /* 472 /*
459 * The xpnet_pending_msg tracks how many outstanding 473 * The xpnet_pending_msg tracks how many outstanding
460 * xpc_send_notifies are relying on this skb. When none 474 * xpc_send_notifies are relying on this skb. When none
@@ -466,7 +480,6 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
466 "packet\n", sizeof(struct xpnet_pending_msg)); 480 "packet\n", sizeof(struct xpnet_pending_msg));
467 481
468 priv->stats.tx_errors++; 482 priv->stats.tx_errors++;
469
470 return -ENOMEM; 483 return -ENOMEM;
471 } 484 }
472 485
@@ -475,7 +488,6 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
475 end_addr = L1_CACHE_ALIGN((u64)skb_tail_pointer(skb)); 488 end_addr = L1_CACHE_ALIGN((u64)skb_tail_pointer(skb));
476 489
477 /* calculate how many bytes to embed in the XPC message */ 490 /* calculate how many bytes to embed in the XPC message */
478 embedded_bytes = 0;
479 if (unlikely(skb->len <= XPNET_MSG_DATA_MAX)) { 491 if (unlikely(skb->len <= XPNET_MSG_DATA_MAX)) {
480 /* skb->data does fit so embed */ 492 /* skb->data does fit so embed */
481 embedded_bytes = skb->len; 493 embedded_bytes = skb->len;
@@ -491,82 +503,28 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
491 atomic_set(&queued_msg->use_count, 1); 503 atomic_set(&queued_msg->use_count, 1);
492 queued_msg->skb = skb; 504 queued_msg->skb = skb;
493 505
494 second_mac_octet = skb->data[XPNET_PARTID_OCTET]; 506 if (skb->data[0] == 0xff) {
495 if (second_mac_octet == 0xff) {
496 /* we are being asked to broadcast to all partitions */ 507 /* we are being asked to broadcast to all partitions */
497 dp = xpnet_broadcast_partitions; 508 for_each_bit(dest_partid, xpnet_broadcast_partitions,
498 } else if (second_mac_octet != 0) { 509 xp_max_npartitions) {
499 dp = xpnet_broadcast_partitions &
500 (1UL << (second_mac_octet - 1));
501 } else {
502 /* 0 is an invalid partid. Ignore */
503 dp = 0;
504 }
505 dev_dbg(xpnet, "destination Partitions mask (dp) = 0x%lx\n", dp);
506
507 /*
508 * If we wanted to allow promiscuous mode to work like an
509 * unswitched network, this would be a good point to OR in a
510 * mask of partitions which should be receiving all packets.
511 */
512
513 /*
514 * Main send loop.
515 */
516 for (dest_partid = 1; dp && dest_partid < XP_MAX_PARTITIONS;
517 dest_partid++) {
518 510
519 if (!(dp & (1UL << (dest_partid - 1)))) { 511 xpnet_send(skb, queued_msg, start_addr, end_addr,
520 /* not destined for this partition */ 512 embedded_bytes, dest_partid);
521 continue;
522 } 513 }
514 } else {
515 dest_partid = (short)skb->data[XPNET_PARTID_OCTET + 1];
516 dest_partid |= (short)skb->data[XPNET_PARTID_OCTET + 0] << 8;
523 517
524 /* remove this partition from the destinations mask */ 518 if (dest_partid >= 0 &&
525 dp &= ~(1UL << (dest_partid - 1)); 519 dest_partid < xp_max_npartitions &&
526 520 test_bit(dest_partid, xpnet_broadcast_partitions) != 0) {
527 /* found a partition to send to */ 521
528 522 xpnet_send(skb, queued_msg, start_addr, end_addr,
529 ret = xpc_allocate(dest_partid, XPC_NET_CHANNEL, 523 embedded_bytes, dest_partid);
530 XPC_NOWAIT, (void **)&msg);
531 if (unlikely(ret != xpSuccess))
532 continue;
533
534 msg->embedded_bytes = embedded_bytes;
535 if (unlikely(embedded_bytes != 0)) {
536 msg->version = XPNET_VERSION_EMBED;
537 dev_dbg(xpnet, "calling memcpy(0x%p, 0x%p, 0x%lx)\n",
538 &msg->data, skb->data, (size_t)embedded_bytes);
539 skb_copy_from_linear_data(skb, &msg->data,
540 (size_t)embedded_bytes);
541 } else {
542 msg->version = XPNET_VERSION;
543 }
544 msg->magic = XPNET_MAGIC;
545 msg->size = end_addr - start_addr;
546 msg->leadin_ignore = (u64)skb->data - start_addr;
547 msg->tailout_ignore = end_addr - (u64)skb_tail_pointer(skb);
548 msg->buf_pa = __pa(start_addr);
549
550 dev_dbg(xpnet, "sending XPC message to %d:%d\n"
551 KERN_DEBUG "msg->buf_pa=0x%lx, msg->size=%u, "
552 "msg->leadin_ignore=%u, msg->tailout_ignore=%u\n",
553 dest_partid, XPC_NET_CHANNEL, msg->buf_pa, msg->size,
554 msg->leadin_ignore, msg->tailout_ignore);
555
556 atomic_inc(&queued_msg->use_count);
557
558 ret = xpc_send_notify(dest_partid, XPC_NET_CHANNEL, msg,
559 xpnet_send_completed, queued_msg);
560 if (unlikely(ret != xpSuccess)) {
561 atomic_dec(&queued_msg->use_count);
562 continue;
563 } 524 }
564 } 525 }
565 526
566 if (atomic_dec_return(&queued_msg->use_count) == 0) { 527 if (atomic_dec_return(&queued_msg->use_count) == 0) {
567 dev_dbg(xpnet, "no partitions to receive packet destined for "
568 "%d\n", dest_partid);
569
570 dev_kfree_skb(skb); 528 dev_kfree_skb(skb);
571 kfree(queued_msg); 529 kfree(queued_msg);
572 } 530 }
@@ -594,23 +552,28 @@ xpnet_dev_tx_timeout(struct net_device *dev)
594static int __init 552static int __init
595xpnet_init(void) 553xpnet_init(void)
596{ 554{
597 int i; 555 int result;
598 u32 license_num;
599 int result = -ENOMEM;
600 556
601 if (!ia64_platform_is("sn2")) 557 if (!is_shub() && !is_uv())
602 return -ENODEV; 558 return -ENODEV;
603 559
604 dev_info(xpnet, "registering network device %s\n", XPNET_DEVICE_NAME); 560 dev_info(xpnet, "registering network device %s\n", XPNET_DEVICE_NAME);
605 561
562 xpnet_broadcast_partitions = kzalloc(BITS_TO_LONGS(xp_max_npartitions) *
563 sizeof(long), GFP_KERNEL);
564 if (xpnet_broadcast_partitions == NULL)
565 return -ENOMEM;
566
606 /* 567 /*
607 * use ether_setup() to init the majority of our device 568 * use ether_setup() to init the majority of our device
608 * structure and then override the necessary pieces. 569 * structure and then override the necessary pieces.
609 */ 570 */
610 xpnet_device = alloc_netdev(sizeof(struct xpnet_dev_private), 571 xpnet_device = alloc_netdev(sizeof(struct xpnet_dev_private),
611 XPNET_DEVICE_NAME, ether_setup); 572 XPNET_DEVICE_NAME, ether_setup);
612 if (xpnet_device == NULL) 573 if (xpnet_device == NULL) {
574 kfree(xpnet_broadcast_partitions);
613 return -ENOMEM; 575 return -ENOMEM;
576 }
614 577
615 netif_carrier_off(xpnet_device); 578 netif_carrier_off(xpnet_device);
616 579
@@ -628,14 +591,10 @@ xpnet_init(void)
628 * MAC addresses. We chose the first octet of the MAC to be unlikely 591 * MAC addresses. We chose the first octet of the MAC to be unlikely
629 * to collide with any vendor's officially issued MAC. 592 * to collide with any vendor's officially issued MAC.
630 */ 593 */
631 xpnet_device->dev_addr[0] = 0xfe; 594 xpnet_device->dev_addr[0] = 0x02; /* locally administered, no OUI */
632 xpnet_device->dev_addr[XPNET_PARTID_OCTET] = sn_partition_id; 595
633 license_num = sn_partition_serial_number_val(); 596 xpnet_device->dev_addr[XPNET_PARTID_OCTET + 1] = xp_partition_id;
634 for (i = 3; i >= 0; i--) { 597 xpnet_device->dev_addr[XPNET_PARTID_OCTET + 0] = (xp_partition_id >> 8);
635 xpnet_device->dev_addr[XPNET_LICENSE_OCTET + i] =
636 license_num & 0xff;
637 license_num = license_num >> 8;
638 }
639 598
640 /* 599 /*
641 * ether_setup() sets this to a multicast device. We are 600 * ether_setup() sets this to a multicast device. We are
@@ -651,8 +610,10 @@ xpnet_init(void)
651 xpnet_device->features = NETIF_F_NO_CSUM; 610 xpnet_device->features = NETIF_F_NO_CSUM;
652 611
653 result = register_netdev(xpnet_device); 612 result = register_netdev(xpnet_device);
654 if (result != 0) 613 if (result != 0) {
655 free_netdev(xpnet_device); 614 free_netdev(xpnet_device);
615 kfree(xpnet_broadcast_partitions);
616 }
656 617
657 return result; 618 return result;
658} 619}
@@ -666,8 +627,8 @@ xpnet_exit(void)
666 xpnet_device[0].name); 627 xpnet_device[0].name);
667 628
668 unregister_netdev(xpnet_device); 629 unregister_netdev(xpnet_device);
669
670 free_netdev(xpnet_device); 630 free_netdev(xpnet_device);
631 kfree(xpnet_broadcast_partitions);
671} 632}
672 633
673module_exit(xpnet_exit); 634module_exit(xpnet_exit);
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 66e5a5487c20..86dbb366415a 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -213,7 +213,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
213 struct mmc_blk_data *md = mq->data; 213 struct mmc_blk_data *md = mq->data;
214 struct mmc_card *card = md->queue.card; 214 struct mmc_card *card = md->queue.card;
215 struct mmc_blk_request brq; 215 struct mmc_blk_request brq;
216 int ret = 1, sg_pos, data_size; 216 int ret = 1, data_size, i;
217 struct scatterlist *sg;
217 218
218 mmc_claim_host(card->host); 219 mmc_claim_host(card->host);
219 220
@@ -267,18 +268,22 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
267 268
268 mmc_queue_bounce_pre(mq); 269 mmc_queue_bounce_pre(mq);
269 270
271 /*
272 * Adjust the sg list so it is the same size as the
273 * request.
274 */
270 if (brq.data.blocks != 275 if (brq.data.blocks !=
271 (req->nr_sectors >> (md->block_bits - 9))) { 276 (req->nr_sectors >> (md->block_bits - 9))) {
272 data_size = brq.data.blocks * brq.data.blksz; 277 data_size = brq.data.blocks * brq.data.blksz;
273 for (sg_pos = 0; sg_pos < brq.data.sg_len; sg_pos++) { 278 for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) {
274 data_size -= mq->sg[sg_pos].length; 279 data_size -= sg->length;
275 if (data_size <= 0) { 280 if (data_size <= 0) {
276 mq->sg[sg_pos].length += data_size; 281 sg->length += data_size;
277 sg_pos++; 282 i++;
278 break; 283 break;
279 } 284 }
280 } 285 }
281 brq.data.sg_len = sg_pos; 286 brq.data.sg_len = i;
282 } 287 }
283 288
284 mmc_wait_for_req(card->host, &brq.mrq); 289 mmc_wait_for_req(card->host, &brq.mrq);
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index a067fe436301..f26b01d811ae 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -388,16 +388,14 @@ static int mmc_test_transfer(struct mmc_test_card *test,
388 int ret, i; 388 int ret, i;
389 unsigned long flags; 389 unsigned long flags;
390 390
391 BUG_ON(blocks * blksz > BUFFER_SIZE);
392
393 if (write) { 391 if (write) {
394 for (i = 0;i < blocks * blksz;i++) 392 for (i = 0;i < blocks * blksz;i++)
395 test->scratch[i] = i; 393 test->scratch[i] = i;
396 } else { 394 } else {
397 memset(test->scratch, 0, blocks * blksz); 395 memset(test->scratch, 0, BUFFER_SIZE);
398 } 396 }
399 local_irq_save(flags); 397 local_irq_save(flags);
400 sg_copy_from_buffer(sg, sg_len, test->scratch, blocks * blksz); 398 sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
401 local_irq_restore(flags); 399 local_irq_restore(flags);
402 400
403 ret = mmc_test_set_blksize(test, blksz); 401 ret = mmc_test_set_blksize(test, blksz);
@@ -444,7 +442,7 @@ static int mmc_test_transfer(struct mmc_test_card *test,
444 } 442 }
445 } else { 443 } else {
446 local_irq_save(flags); 444 local_irq_save(flags);
447 sg_copy_to_buffer(sg, sg_len, test->scratch, blocks * blksz); 445 sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
448 local_irq_restore(flags); 446 local_irq_restore(flags);
449 for (i = 0;i < blocks * blksz;i++) { 447 for (i = 0;i < blocks * blksz;i++) {
450 if (test->scratch[i] != (u8)i) 448 if (test->scratch[i] != (u8)i)
@@ -805,69 +803,6 @@ static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
805 return 0; 803 return 0;
806} 804}
807 805
808static int mmc_test_bigsg_write(struct mmc_test_card *test)
809{
810 int ret;
811 unsigned int size;
812 struct scatterlist sg;
813
814 if (test->card->host->max_blk_count == 1)
815 return RESULT_UNSUP_HOST;
816
817 size = PAGE_SIZE * 2;
818 size = min(size, test->card->host->max_req_size);
819 size = min(size, test->card->host->max_seg_size);
820 size = min(size, test->card->host->max_blk_count * 512);
821
822 memset(test->buffer, 0, BUFFER_SIZE);
823
824 if (size < 1024)
825 return RESULT_UNSUP_HOST;
826
827 sg_init_table(&sg, 1);
828 sg_init_one(&sg, test->buffer, BUFFER_SIZE);
829
830 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
831 if (ret)
832 return ret;
833
834 return 0;
835}
836
837static int mmc_test_bigsg_read(struct mmc_test_card *test)
838{
839 int ret, i;
840 unsigned int size;
841 struct scatterlist sg;
842
843 if (test->card->host->max_blk_count == 1)
844 return RESULT_UNSUP_HOST;
845
846 size = PAGE_SIZE * 2;
847 size = min(size, test->card->host->max_req_size);
848 size = min(size, test->card->host->max_seg_size);
849 size = min(size, test->card->host->max_blk_count * 512);
850
851 if (size < 1024)
852 return RESULT_UNSUP_HOST;
853
854 memset(test->buffer, 0xCD, BUFFER_SIZE);
855
856 sg_init_table(&sg, 1);
857 sg_init_one(&sg, test->buffer, BUFFER_SIZE);
858 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
859 if (ret)
860 return ret;
861
862 /* mmc_test_transfer() doesn't check for read overflows */
863 for (i = size;i < BUFFER_SIZE;i++) {
864 if (test->buffer[i] != 0xCD)
865 return RESULT_FAIL;
866 }
867
868 return 0;
869}
870
871#ifdef CONFIG_HIGHMEM 806#ifdef CONFIG_HIGHMEM
872 807
873static int mmc_test_write_high(struct mmc_test_card *test) 808static int mmc_test_write_high(struct mmc_test_card *test)
@@ -1071,20 +1006,6 @@ static const struct mmc_test_case mmc_test_cases[] = {
1071 .run = mmc_test_multi_xfersize_read, 1006 .run = mmc_test_multi_xfersize_read,
1072 }, 1007 },
1073 1008
1074 {
1075 .name = "Over-sized SG list write",
1076 .prepare = mmc_test_prepare_write,
1077 .run = mmc_test_bigsg_write,
1078 .cleanup = mmc_test_cleanup,
1079 },
1080
1081 {
1082 .name = "Over-sized SG list read",
1083 .prepare = mmc_test_prepare_read,
1084 .run = mmc_test_bigsg_read,
1085 .cleanup = mmc_test_cleanup,
1086 },
1087
1088#ifdef CONFIG_HIGHMEM 1009#ifdef CONFIG_HIGHMEM
1089 1010
1090 { 1011 {
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 3ee5b8c3b5ce..044d84eeed7c 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -121,6 +121,7 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
121{ 121{
122#ifdef CONFIG_MMC_DEBUG 122#ifdef CONFIG_MMC_DEBUG
123 unsigned int i, sz; 123 unsigned int i, sz;
124 struct scatterlist *sg;
124#endif 125#endif
125 126
126 pr_debug("%s: starting CMD%u arg %08x flags %08x\n", 127 pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
@@ -156,8 +157,8 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
156 157
157#ifdef CONFIG_MMC_DEBUG 158#ifdef CONFIG_MMC_DEBUG
158 sz = 0; 159 sz = 0;
159 for (i = 0;i < mrq->data->sg_len;i++) 160 for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
160 sz += mrq->data->sg[i].length; 161 sz += sg->length;
161 BUG_ON(sz != mrq->data->blocks * mrq->data->blksz); 162 BUG_ON(sz != mrq->data->blocks * mrq->data->blksz);
162#endif 163#endif
163 164
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c
index f15e2064305c..6915f40ac8ab 100644
--- a/drivers/mmc/host/at91_mci.c
+++ b/drivers/mmc/host/at91_mci.c
@@ -73,9 +73,9 @@
73#include <asm/gpio.h> 73#include <asm/gpio.h>
74 74
75#include <asm/mach/mmc.h> 75#include <asm/mach/mmc.h>
76#include <asm/arch/board.h> 76#include <mach/board.h>
77#include <asm/arch/cpu.h> 77#include <mach/cpu.h>
78#include <asm/arch/at91_mci.h> 78#include <mach/at91_mci.h>
79 79
80#define DRIVER_NAME "at91_mci" 80#define DRIVER_NAME "at91_mci"
81 81
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 992b4beb757c..0bd06f5bd62f 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -28,7 +28,7 @@
28#include <asm/io.h> 28#include <asm/io.h>
29#include <asm/unaligned.h> 29#include <asm/unaligned.h>
30 30
31#include <asm/arch/board.h> 31#include <mach/board.h>
32 32
33#include "atmel-mci-regs.h" 33#include "atmel-mci-regs.h"
34 34
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index 99b20917cc0f..d3f55615c099 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -61,7 +61,13 @@
61 61
62/* Hardware definitions */ 62/* Hardware definitions */
63#define AU1XMMC_DESCRIPTOR_COUNT 1 63#define AU1XMMC_DESCRIPTOR_COUNT 1
64#define AU1XMMC_DESCRIPTOR_SIZE 2048 64
65/* max DMA seg size: 64KB on Au1100, 4MB on Au1200 */
66#ifdef CONFIG_SOC_AU1100
67#define AU1XMMC_DESCRIPTOR_SIZE 0x0000ffff
68#else /* Au1200 */
69#define AU1XMMC_DESCRIPTOR_SIZE 0x003fffff
70#endif
65 71
66#define AU1XMMC_OCR (MMC_VDD_27_28 | MMC_VDD_28_29 | MMC_VDD_29_30 | \ 72#define AU1XMMC_OCR (MMC_VDD_27_28 | MMC_VDD_28_29 | MMC_VDD_29_30 | \
67 MMC_VDD_30_31 | MMC_VDD_31_32 | MMC_VDD_32_33 | \ 73 MMC_VDD_30_31 | MMC_VDD_31_32 | MMC_VDD_32_33 | \
diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c
index f61406da65d2..2f0fcdb869b7 100644
--- a/drivers/mmc/host/imxmmc.c
+++ b/drivers/mmc/host/imxmmc.c
@@ -42,8 +42,8 @@
42#include <asm/io.h> 42#include <asm/io.h>
43#include <asm/irq.h> 43#include <asm/irq.h>
44#include <asm/sizes.h> 44#include <asm/sizes.h>
45#include <asm/arch/mmc.h> 45#include <mach/mmc.h>
46#include <asm/arch/imx-dma.h> 46#include <mach/imx-dma.h>
47 47
48#include "imxmmc.h" 48#include "imxmmc.h"
49 49
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index dbc26eb6a89e..c16028872bbb 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -29,14 +29,13 @@
29 29
30#include <asm/io.h> 30#include <asm/io.h>
31#include <asm/irq.h> 31#include <asm/irq.h>
32#include <asm/mach-types.h> 32
33 33#include <mach/board.h>
34#include <asm/arch/board.h> 34#include <mach/mmc.h>
35#include <asm/arch/mmc.h> 35#include <mach/gpio.h>
36#include <asm/arch/gpio.h> 36#include <mach/dma.h>
37#include <asm/arch/dma.h> 37#include <mach/mux.h>
38#include <asm/arch/mux.h> 38#include <mach/fpga.h>
39#include <asm/arch/fpga.h>
40 39
41#define OMAP_MMC_REG_CMD 0x00 40#define OMAP_MMC_REG_CMD 0x00
42#define OMAP_MMC_REG_ARGL 0x04 41#define OMAP_MMC_REG_ARGL 0x04
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index a8e18fe53077..55093ad132ca 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -31,8 +31,8 @@
31#include <asm/io.h> 31#include <asm/io.h>
32#include <asm/sizes.h> 32#include <asm/sizes.h>
33 33
34#include <asm/arch/pxa-regs.h> 34#include <mach/pxa-regs.h>
35#include <asm/arch/mmc.h> 35#include <mach/mmc.h>
36 36
37#include "pxamci.h" 37#include "pxamci.h"
38 38
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index be550c26da68..7c994e1ae276 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -18,8 +18,8 @@
18 18
19#include <asm/dma.h> 19#include <asm/dma.h>
20 20
21#include <asm/arch/regs-sdi.h> 21#include <mach/regs-sdi.h>
22#include <asm/arch/regs-gpio.h> 22#include <mach/regs-gpio.h>
23 23
24#include <asm/plat-s3c24xx/mci.h> 24#include <asm/plat-s3c24xx/mci.h>
25 25
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index deb607c52c0d..fcb14c2346cc 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -143,7 +143,8 @@ static int jmicron_probe(struct sdhci_pci_chip *chip)
143 chip->quirks |= SDHCI_QUIRK_32BIT_DMA_ADDR | 143 chip->quirks |= SDHCI_QUIRK_32BIT_DMA_ADDR |
144 SDHCI_QUIRK_32BIT_DMA_SIZE | 144 SDHCI_QUIRK_32BIT_DMA_SIZE |
145 SDHCI_QUIRK_32BIT_ADMA_SIZE | 145 SDHCI_QUIRK_32BIT_ADMA_SIZE |
146 SDHCI_QUIRK_RESET_AFTER_REQUEST; 146 SDHCI_QUIRK_RESET_AFTER_REQUEST |
147 SDHCI_QUIRK_BROKEN_SMALL_PIO;
147 } 148 }
148 149
149 /* 150 /*
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 5f95e10229b5..e3a8133560a2 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -278,6 +278,15 @@ static void sdhci_transfer_pio(struct sdhci_host *host)
278 else 278 else
279 mask = SDHCI_SPACE_AVAILABLE; 279 mask = SDHCI_SPACE_AVAILABLE;
280 280
281 /*
282 * Some controllers (JMicron JMB38x) mess up the buffer bits
283 * for transfers < 4 bytes. As long as it is just one block,
284 * we can ignore the bits.
285 */
286 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
287 (host->data->blocks == 1))
288 mask = ~0;
289
281 while (readl(host->ioaddr + SDHCI_PRESENT_STATE) & mask) { 290 while (readl(host->ioaddr + SDHCI_PRESENT_STATE) & mask) {
282 if (host->data->flags & MMC_DATA_READ) 291 if (host->data->flags & MMC_DATA_READ)
283 sdhci_read_block_pio(host); 292 sdhci_read_block_pio(host);
@@ -439,7 +448,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
439 448
440 host->adma_addr = dma_map_single(mmc_dev(host->mmc), 449 host->adma_addr = dma_map_single(mmc_dev(host->mmc),
441 host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE); 450 host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE);
442 if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr)) 451 if (dma_mapping_error(mmc_dev(host->mmc), host->adma_addr))
443 goto unmap_entries; 452 goto unmap_entries;
444 BUG_ON(host->adma_addr & 0x3); 453 BUG_ON(host->adma_addr & 0x3);
445 454
@@ -645,7 +654,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
645 * us an invalid request. 654 * us an invalid request.
646 */ 655 */
647 WARN_ON(1); 656 WARN_ON(1);
648 host->flags &= ~SDHCI_USE_DMA; 657 host->flags &= ~SDHCI_REQ_USE_DMA;
649 } else { 658 } else {
650 writel(host->adma_addr, 659 writel(host->adma_addr,
651 host->ioaddr + SDHCI_ADMA_ADDRESS); 660 host->ioaddr + SDHCI_ADMA_ADDRESS);
@@ -664,7 +673,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
664 * us an invalid request. 673 * us an invalid request.
665 */ 674 */
666 WARN_ON(1); 675 WARN_ON(1);
667 host->flags &= ~SDHCI_USE_DMA; 676 host->flags &= ~SDHCI_REQ_USE_DMA;
668 } else { 677 } else {
669 WARN_ON(sg_cnt != 1); 678 WARN_ON(sg_cnt != 1);
670 writel(sg_dma_address(data->sg), 679 writel(sg_dma_address(data->sg),
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index e354faee5df0..197d4a05f4ae 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -206,6 +206,8 @@ struct sdhci_host {
206#define SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER (1<<11) 206#define SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER (1<<11)
207/* Controller provides an incorrect timeout value for transfers */ 207/* Controller provides an incorrect timeout value for transfers */
208#define SDHCI_QUIRK_BROKEN_TIMEOUT_VAL (1<<12) 208#define SDHCI_QUIRK_BROKEN_TIMEOUT_VAL (1<<12)
209/* Controller has an issue with buffer bits for small transfers */
210#define SDHCI_QUIRK_BROKEN_SMALL_PIO (1<<13)
209 211
210 int irq; /* Device IRQ */ 212 int irq; /* Device IRQ */
211 void __iomem * ioaddr; /* Mapped address */ 213 void __iomem * ioaddr; /* Mapped address */
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index dbba5abf0db8..f84ab6182148 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -41,7 +41,7 @@
41 41
42 42
43/* AMD */ 43/* AMD */
44#define AM29DL800BB 0x22C8 44#define AM29DL800BB 0x22CB
45#define AM29DL800BT 0x224A 45#define AM29DL800BT 0x224A
46 46
47#define AM29F800BB 0x2258 47#define AM29F800BB 0x2258
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 54e36bfc2c3b..8bd0dea6885f 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -15,6 +15,8 @@
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/device.h> 16#include <linux/device.h>
17#include <linux/mutex.h> 17#include <linux/mutex.h>
18#include <linux/err.h>
19
18#include <linux/spi/spi.h> 20#include <linux/spi/spi.h>
19#include <linux/spi/flash.h> 21#include <linux/spi/flash.h>
20 22
@@ -487,9 +489,8 @@ add_dataflash(struct spi_device *spi, char *name,
487 device->write = dataflash_write; 489 device->write = dataflash_write;
488 device->priv = priv; 490 device->priv = priv;
489 491
490 dev_info(&spi->dev, "%s (%d KBytes) pagesize %d bytes, " 492 dev_info(&spi->dev, "%s (%d KBytes) pagesize %d bytes\n",
491 "erasesize %d bytes\n", name, device->size/1024, 493 name, DIV_ROUND_UP(device->size, 1024), pagesize);
492 pagesize, pagesize * 8); /* 8 pages = 1 block */
493 dev_set_drvdata(&spi->dev, priv); 494 dev_set_drvdata(&spi->dev, priv);
494 495
495 if (mtd_has_partitions()) { 496 if (mtd_has_partitions()) {
@@ -518,65 +519,57 @@ add_dataflash(struct spi_device *spi, char *name,
518 return add_mtd_device(device) == 1 ? -ENODEV : 0; 519 return add_mtd_device(device) == 1 ? -ENODEV : 0;
519} 520}
520 521
521/*
522 * Detect and initialize DataFlash device:
523 *
524 * Device Density ID code #Pages PageSize Offset
525 * AT45DB011B 1Mbit (128K) xx0011xx (0x0c) 512 264 9
526 * AT45DB021B 2Mbit (256K) xx0101xx (0x14) 1024 264 9
527 * AT45DB041B 4Mbit (512K) xx0111xx (0x1c) 2048 264 9
528 * AT45DB081B 8Mbit (1M) xx1001xx (0x24) 4096 264 9
529 * AT45DB0161B 16Mbit (2M) xx1011xx (0x2c) 4096 528 10
530 * AT45DB0321B 32Mbit (4M) xx1101xx (0x34) 8192 528 10
531 * AT45DB0642 64Mbit (8M) xx111xxx (0x3c) 8192 1056 11
532 * AT45DB1282 128Mbit (16M) xx0100xx (0x10) 16384 1056 11
533 */
534
535struct flash_info { 522struct flash_info {
536 char *name; 523 char *name;
537 524
538 /* JEDEC id zero means "no ID" (most older chips); otherwise it has 525 /* JEDEC id has a high byte of zero plus three data bytes:
539 * a high byte of zero plus three data bytes: the manufacturer id, 526 * the manufacturer id, then a two byte device id.
540 * then a two byte device id.
541 */ 527 */
542 uint32_t jedec_id; 528 uint32_t jedec_id;
543 529
544 /* The size listed here is what works with OPCODE_SE, which isn't 530 /* The size listed here is what works with OP_ERASE_PAGE. */
545 * necessarily called a "sector" by the vendor.
546 */
547 unsigned nr_pages; 531 unsigned nr_pages;
548 uint16_t pagesize; 532 uint16_t pagesize;
549 uint16_t pageoffset; 533 uint16_t pageoffset;
550 534
551 uint16_t flags; 535 uint16_t flags;
552#define SUP_POW2PS 0x02 536#define SUP_POW2PS 0x0002 /* supports 2^N byte pages */
553#define IS_POW2PS 0x01 537#define IS_POW2PS 0x0001 /* uses 2^N byte pages */
554}; 538};
555 539
556static struct flash_info __devinitdata dataflash_data [] = { 540static struct flash_info __devinitdata dataflash_data [] = {
557 541
558 { "at45db011d", 0x1f2200, 512, 264, 9, SUP_POW2PS}, 542 /*
543 * NOTE: chips with SUP_POW2PS (rev D and up) need two entries,
544 * one with IS_POW2PS and the other without. The entry with the
545 * non-2^N byte page size can't name exact chip revisions without
546 * losing backwards compatibility for cmdlinepart.
547 *
548 * These newer chips also support 128-byte security registers (with
549 * 64 bytes one-time-programmable) and software write-protection.
550 */
551 { "AT45DB011B", 0x1f2200, 512, 264, 9, SUP_POW2PS},
559 { "at45db011d", 0x1f2200, 512, 256, 8, SUP_POW2PS | IS_POW2PS}, 552 { "at45db011d", 0x1f2200, 512, 256, 8, SUP_POW2PS | IS_POW2PS},
560 553
561 { "at45db021d", 0x1f2300, 1024, 264, 9, SUP_POW2PS}, 554 { "AT45DB021B", 0x1f2300, 1024, 264, 9, SUP_POW2PS},
562 { "at45db021d", 0x1f2300, 1024, 256, 8, SUP_POW2PS | IS_POW2PS}, 555 { "at45db021d", 0x1f2300, 1024, 256, 8, SUP_POW2PS | IS_POW2PS},
563 556
564 { "at45db041d", 0x1f2400, 2048, 264, 9, SUP_POW2PS}, 557 { "AT45DB041x", 0x1f2400, 2048, 264, 9, SUP_POW2PS},
565 { "at45db041d", 0x1f2400, 2048, 256, 8, SUP_POW2PS | IS_POW2PS}, 558 { "at45db041d", 0x1f2400, 2048, 256, 8, SUP_POW2PS | IS_POW2PS},
566 559
567 { "at45db081d", 0x1f2500, 4096, 264, 9, SUP_POW2PS}, 560 { "AT45DB081B", 0x1f2500, 4096, 264, 9, SUP_POW2PS},
568 { "at45db081d", 0x1f2500, 4096, 256, 8, SUP_POW2PS | IS_POW2PS}, 561 { "at45db081d", 0x1f2500, 4096, 256, 8, SUP_POW2PS | IS_POW2PS},
569 562
570 { "at45db161d", 0x1f2600, 4096, 528, 10, SUP_POW2PS}, 563 { "AT45DB161x", 0x1f2600, 4096, 528, 10, SUP_POW2PS},
571 { "at45db161d", 0x1f2600, 4096, 512, 9, SUP_POW2PS | IS_POW2PS}, 564 { "at45db161d", 0x1f2600, 4096, 512, 9, SUP_POW2PS | IS_POW2PS},
572 565
573 { "at45db321c", 0x1f2700, 8192, 528, 10, }, 566 { "AT45DB321x", 0x1f2700, 8192, 528, 10, 0}, /* rev C */
574 567
575 { "at45db321d", 0x1f2701, 8192, 528, 10, SUP_POW2PS}, 568 { "AT45DB321x", 0x1f2701, 8192, 528, 10, SUP_POW2PS},
576 { "at45db321d", 0x1f2701, 8192, 512, 9, SUP_POW2PS | IS_POW2PS}, 569 { "at45db321d", 0x1f2701, 8192, 512, 9, SUP_POW2PS | IS_POW2PS},
577 570
578 { "at45db641d", 0x1f2800, 8192, 1056, 11, SUP_POW2PS}, 571 { "AT45DB642x", 0x1f2800, 8192, 1056, 11, SUP_POW2PS},
579 { "at45db641d", 0x1f2800, 8192, 1024, 10, SUP_POW2PS | IS_POW2PS}, 572 { "at45db642d", 0x1f2800, 8192, 1024, 10, SUP_POW2PS | IS_POW2PS},
580}; 573};
581 574
582static struct flash_info *__devinit jedec_probe(struct spi_device *spi) 575static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
@@ -588,17 +581,23 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
588 struct flash_info *info; 581 struct flash_info *info;
589 int status; 582 int status;
590 583
591
592 /* JEDEC also defines an optional "extended device information" 584 /* JEDEC also defines an optional "extended device information"
593 * string for after vendor-specific data, after the three bytes 585 * string for after vendor-specific data, after the three bytes
594 * we use here. Supporting some chips might require using it. 586 * we use here. Supporting some chips might require using it.
587 *
588 * If the vendor ID isn't Atmel's (0x1f), assume this call failed.
589 * That's not an error; only rev C and newer chips handle it, and
590 * only Atmel sells these chips.
595 */ 591 */
596 tmp = spi_write_then_read(spi, &code, 1, id, 3); 592 tmp = spi_write_then_read(spi, &code, 1, id, 3);
597 if (tmp < 0) { 593 if (tmp < 0) {
598 DEBUG(MTD_DEBUG_LEVEL0, "%s: error %d reading JEDEC ID\n", 594 DEBUG(MTD_DEBUG_LEVEL0, "%s: error %d reading JEDEC ID\n",
599 spi->dev.bus_id, tmp); 595 spi->dev.bus_id, tmp);
600 return NULL; 596 return ERR_PTR(tmp);
601 } 597 }
598 if (id[0] != 0x1f)
599 return NULL;
600
602 jedec = id[0]; 601 jedec = id[0];
603 jedec = jedec << 8; 602 jedec = jedec << 8;
604 jedec |= id[1]; 603 jedec |= id[1];
@@ -609,19 +608,53 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
609 tmp < ARRAY_SIZE(dataflash_data); 608 tmp < ARRAY_SIZE(dataflash_data);
610 tmp++, info++) { 609 tmp++, info++) {
611 if (info->jedec_id == jedec) { 610 if (info->jedec_id == jedec) {
611 DEBUG(MTD_DEBUG_LEVEL1, "%s: OTP, sector protect%s\n",
612 dev_name(&spi->dev),
613 (info->flags & SUP_POW2PS)
614 ? ", binary pagesize" : ""
615 );
612 if (info->flags & SUP_POW2PS) { 616 if (info->flags & SUP_POW2PS) {
613 status = dataflash_status(spi); 617 status = dataflash_status(spi);
614 if (status & 0x1) 618 if (status < 0) {
615 /* return power of 2 pagesize */ 619 DEBUG(MTD_DEBUG_LEVEL1,
616 return ++info; 620 "%s: status error %d\n",
617 else 621 dev_name(&spi->dev), status);
618 return info; 622 return ERR_PTR(status);
623 }
624 if (status & 0x1) {
625 if (info->flags & IS_POW2PS)
626 return info;
627 } else {
628 if (!(info->flags & IS_POW2PS))
629 return info;
630 }
619 } 631 }
620 } 632 }
621 } 633 }
622 return NULL; 634
635 /*
636 * Treat other chips as errors ... we won't know the right page
637 * size (it might be binary) even when we can tell which density
638 * class is involved (legacy chip id scheme).
639 */
640 dev_warn(&spi->dev, "JEDEC id %06x not handled\n", jedec);
641 return ERR_PTR(-ENODEV);
623} 642}
624 643
644/*
645 * Detect and initialize DataFlash device, using JEDEC IDs on newer chips
646 * or else the ID code embedded in the status bits:
647 *
648 * Device Density ID code #Pages PageSize Offset
649 * AT45DB011B 1Mbit (128K) xx0011xx (0x0c) 512 264 9
650 * AT45DB021B 2Mbit (256K) xx0101xx (0x14) 1024 264 9
651 * AT45DB041B 4Mbit (512K) xx0111xx (0x1c) 2048 264 9
652 * AT45DB081B 8Mbit (1M) xx1001xx (0x24) 4096 264 9
653 * AT45DB0161B 16Mbit (2M) xx1011xx (0x2c) 4096 528 10
654 * AT45DB0321B 32Mbit (4M) xx1101xx (0x34) 8192 528 10
655 * AT45DB0642 64Mbit (8M) xx111xxx (0x3c) 8192 1056 11
656 * AT45DB1282 128Mbit (16M) xx0100xx (0x10) 16384 1056 11
657 */
625static int __devinit dataflash_probe(struct spi_device *spi) 658static int __devinit dataflash_probe(struct spi_device *spi)
626{ 659{
627 int status; 660 int status;
@@ -632,14 +665,17 @@ static int __devinit dataflash_probe(struct spi_device *spi)
632 * If it succeeds we know we have either a C or D part. 665 * If it succeeds we know we have either a C or D part.
633 * D will support power of 2 pagesize option. 666 * D will support power of 2 pagesize option.
634 */ 667 */
635
636 info = jedec_probe(spi); 668 info = jedec_probe(spi);
637 669 if (IS_ERR(info))
670 return PTR_ERR(info);
638 if (info != NULL) 671 if (info != NULL)
639 return add_dataflash(spi, info->name, info->nr_pages, 672 return add_dataflash(spi, info->name, info->nr_pages,
640 info->pagesize, info->pageoffset); 673 info->pagesize, info->pageoffset);
641 674
642 675 /*
676 * Older chips support only legacy commands, identifing
677 * capacity using bits in the status byte.
678 */
643 status = dataflash_status(spi); 679 status = dataflash_status(spi);
644 if (status <= 0 || status == 0xff) { 680 if (status <= 0 || status == 0xff) {
645 DEBUG(MTD_DEBUG_LEVEL1, "%s: status error %d\n", 681 DEBUG(MTD_DEBUG_LEVEL1, "%s: status error %d\n",
@@ -661,13 +697,13 @@ static int __devinit dataflash_probe(struct spi_device *spi)
661 status = add_dataflash(spi, "AT45DB021B", 1024, 264, 9); 697 status = add_dataflash(spi, "AT45DB021B", 1024, 264, 9);
662 break; 698 break;
663 case 0x1c: /* 0 1 1 1 x x */ 699 case 0x1c: /* 0 1 1 1 x x */
664 status = add_dataflash(spi, "AT45DB041B", 2048, 264, 9); 700 status = add_dataflash(spi, "AT45DB041x", 2048, 264, 9);
665 break; 701 break;
666 case 0x24: /* 1 0 0 1 x x */ 702 case 0x24: /* 1 0 0 1 x x */
667 status = add_dataflash(spi, "AT45DB081B", 4096, 264, 9); 703 status = add_dataflash(spi, "AT45DB081B", 4096, 264, 9);
668 break; 704 break;
669 case 0x2c: /* 1 0 1 1 x x */ 705 case 0x2c: /* 1 0 1 1 x x */
670 status = add_dataflash(spi, "AT45DB161B", 4096, 528, 10); 706 status = add_dataflash(spi, "AT45DB161x", 4096, 528, 10);
671 break; 707 break;
672 case 0x34: /* 1 1 0 1 x x */ 708 case 0x34: /* 1 1 0 1 x x */
673 status = add_dataflash(spi, "AT45DB321x", 8192, 528, 10); 709 status = add_dataflash(spi, "AT45DB321x", 8192, 528, 10);
diff --git a/drivers/mtd/maps/autcpu12-nvram.c b/drivers/mtd/maps/autcpu12-nvram.c
index cf32267263df..53664188fc47 100644
--- a/drivers/mtd/maps/autcpu12-nvram.c
+++ b/drivers/mtd/maps/autcpu12-nvram.c
@@ -25,8 +25,8 @@
25#include <linux/init.h> 25#include <linux/init.h>
26#include <asm/io.h> 26#include <asm/io.h>
27#include <asm/sizes.h> 27#include <asm/sizes.h>
28#include <asm/hardware.h> 28#include <mach/hardware.h>
29#include <asm/arch/autcpu12.h> 29#include <mach/autcpu12.h>
30#include <linux/mtd/mtd.h> 30#include <linux/mtd/mtd.h>
31#include <linux/mtd/map.h> 31#include <linux/mtd/map.h>
32#include <linux/mtd/partitions.h> 32#include <linux/mtd/partitions.h>
diff --git a/drivers/mtd/maps/cdb89712.c b/drivers/mtd/maps/cdb89712.c
index cb507da0a87d..e5059aa3c724 100644
--- a/drivers/mtd/maps/cdb89712.c
+++ b/drivers/mtd/maps/cdb89712.c
@@ -9,7 +9,7 @@
9#include <linux/ioport.h> 9#include <linux/ioport.h>
10#include <linux/init.h> 10#include <linux/init.h>
11#include <asm/io.h> 11#include <asm/io.h>
12#include <asm/arch/hardware.h> 12#include <mach/hardware.h>
13#include <linux/mtd/mtd.h> 13#include <linux/mtd/mtd.h>
14#include <linux/mtd/map.h> 14#include <linux/mtd/map.h>
15#include <linux/mtd/partitions.h> 15#include <linux/mtd/partitions.h>
diff --git a/drivers/mtd/maps/ceiva.c b/drivers/mtd/maps/ceiva.c
index 6464d487eb1a..60e68bde0fea 100644
--- a/drivers/mtd/maps/ceiva.c
+++ b/drivers/mtd/maps/ceiva.c
@@ -25,7 +25,7 @@
25#include <linux/mtd/partitions.h> 25#include <linux/mtd/partitions.h>
26#include <linux/mtd/concat.h> 26#include <linux/mtd/concat.h>
27 27
28#include <asm/hardware.h> 28#include <mach/hardware.h>
29#include <asm/mach-types.h> 29#include <asm/mach-types.h>
30#include <asm/io.h> 30#include <asm/io.h>
31#include <asm/sizes.h> 31#include <asm/sizes.h>
diff --git a/drivers/mtd/maps/h720x-flash.c b/drivers/mtd/maps/h720x-flash.c
index ef8915474462..35fef655ccc4 100644
--- a/drivers/mtd/maps/h720x-flash.c
+++ b/drivers/mtd/maps/h720x-flash.c
@@ -16,7 +16,7 @@
16#include <linux/mtd/mtd.h> 16#include <linux/mtd/mtd.h>
17#include <linux/mtd/map.h> 17#include <linux/mtd/map.h>
18#include <linux/mtd/partitions.h> 18#include <linux/mtd/partitions.h>
19#include <asm/hardware.h> 19#include <mach/hardware.h>
20#include <asm/io.h> 20#include <asm/io.h>
21 21
22static struct mtd_info *mymtd; 22static struct mtd_info *mymtd;
diff --git a/drivers/mtd/maps/integrator-flash.c b/drivers/mtd/maps/integrator-flash.c
index ee361aaadb1e..7100ee3c7b01 100644
--- a/drivers/mtd/maps/integrator-flash.c
+++ b/drivers/mtd/maps/integrator-flash.c
@@ -37,7 +37,7 @@
37#include <linux/mtd/partitions.h> 37#include <linux/mtd/partitions.h>
38 38
39#include <asm/mach/flash.h> 39#include <asm/mach/flash.h>
40#include <asm/hardware.h> 40#include <mach/hardware.h>
41#include <asm/io.h> 41#include <asm/io.h>
42#include <asm/system.h> 42#include <asm/system.h>
43 43
diff --git a/drivers/mtd/maps/ipaq-flash.c b/drivers/mtd/maps/ipaq-flash.c
index a806119797e0..ed58f6a77bd9 100644
--- a/drivers/mtd/maps/ipaq-flash.c
+++ b/drivers/mtd/maps/ipaq-flash.c
@@ -24,8 +24,8 @@
24#include <linux/mtd/concat.h> 24#include <linux/mtd/concat.h>
25#endif 25#endif
26 26
27#include <asm/hardware.h> 27#include <mach/hardware.h>
28#include <asm/arch-sa1100/h3600.h> 28#include <mach/h3600.h>
29#include <asm/io.h> 29#include <asm/io.h>
30 30
31 31
diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c
index c2264792a20b..dcdb1f17577d 100644
--- a/drivers/mtd/maps/ixp2000.c
+++ b/drivers/mtd/maps/ixp2000.c
@@ -30,7 +30,7 @@
30#include <linux/mtd/partitions.h> 30#include <linux/mtd/partitions.h>
31 31
32#include <asm/io.h> 32#include <asm/io.h>
33#include <asm/hardware.h> 33#include <mach/hardware.h>
34#include <asm/mach/flash.h> 34#include <asm/mach/flash.h>
35 35
36#include <linux/reboot.h> 36#include <linux/reboot.h>
diff --git a/drivers/mtd/maps/omap_nor.c b/drivers/mtd/maps/omap_nor.c
index 68eec6c6c517..05f276af15da 100644
--- a/drivers/mtd/maps/omap_nor.c
+++ b/drivers/mtd/maps/omap_nor.c
@@ -43,9 +43,9 @@
43#include <linux/mtd/partitions.h> 43#include <linux/mtd/partitions.h>
44 44
45#include <asm/io.h> 45#include <asm/io.h>
46#include <asm/hardware.h> 46#include <mach/hardware.h>
47#include <asm/mach/flash.h> 47#include <asm/mach/flash.h>
48#include <asm/arch/tc.h> 48#include <mach/tc.h>
49 49
50#ifdef CONFIG_MTD_PARTITIONS 50#ifdef CONFIG_MTD_PARTITIONS
51static const char *part_probes[] = { /* "RedBoot", */ "cmdlinepart", NULL }; 51static const char *part_probes[] = { /* "RedBoot", */ "cmdlinepart", NULL };
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index 82113295c266..771139c5bf87 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -19,7 +19,7 @@
19#include <linux/mtd/partitions.h> 19#include <linux/mtd/partitions.h>
20 20
21#include <asm/io.h> 21#include <asm/io.h>
22#include <asm/hardware.h> 22#include <mach/hardware.h>
23#include <asm/cacheflush.h> 23#include <asm/cacheflush.h>
24 24
25#include <asm/mach/flash.h> 25#include <asm/mach/flash.h>
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index e177a43dfff0..7df6bbf0e4d9 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -18,7 +18,7 @@
18#include <linux/mtd/partitions.h> 18#include <linux/mtd/partitions.h>
19#include <linux/mtd/concat.h> 19#include <linux/mtd/concat.h>
20 20
21#include <asm/hardware.h> 21#include <mach/hardware.h>
22#include <asm/io.h> 22#include <asm/io.h>
23#include <asm/sizes.h> 23#include <asm/sizes.h>
24#include <asm/mach/flash.h> 24#include <asm/mach/flash.h>
diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c
index 28cc6787a800..00d46e137b2a 100644
--- a/drivers/mtd/mtdsuper.c
+++ b/drivers/mtd/mtdsuper.c
@@ -125,8 +125,11 @@ int get_sb_mtd(struct file_system_type *fs_type, int flags,
125 int (*fill_super)(struct super_block *, void *, int), 125 int (*fill_super)(struct super_block *, void *, int),
126 struct vfsmount *mnt) 126 struct vfsmount *mnt)
127{ 127{
128 struct nameidata nd; 128#ifdef CONFIG_BLOCK
129 int mtdnr, ret; 129 struct block_device *bdev;
130 int ret, major;
131#endif
132 int mtdnr;
130 133
131 if (!dev_name) 134 if (!dev_name)
132 return -EINVAL; 135 return -EINVAL;
@@ -178,45 +181,38 @@ int get_sb_mtd(struct file_system_type *fs_type, int flags,
178 } 181 }
179 } 182 }
180 183
184#ifdef CONFIG_BLOCK
181 /* try the old way - the hack where we allowed users to mount 185 /* try the old way - the hack where we allowed users to mount
182 * /dev/mtdblock$(n) but didn't actually _use_ the blockdev 186 * /dev/mtdblock$(n) but didn't actually _use_ the blockdev
183 */ 187 */
184 ret = path_lookup(dev_name, LOOKUP_FOLLOW, &nd); 188 bdev = lookup_bdev(dev_name);
185 189 if (IS_ERR(bdev)) {
186 DEBUG(1, "MTDSB: path_lookup() returned %d, inode %p\n", 190 ret = PTR_ERR(bdev);
187 ret, nd.path.dentry ? nd.path.dentry->d_inode : NULL); 191 DEBUG(1, "MTDSB: lookup_bdev() returned %d\n", ret);
188
189 if (ret)
190 return ret; 192 return ret;
193 }
194 DEBUG(1, "MTDSB: lookup_bdev() returned 0\n");
191 195
192 ret = -EINVAL; 196 ret = -EINVAL;
193 197
194 if (!S_ISBLK(nd.path.dentry->d_inode->i_mode)) 198 major = MAJOR(bdev->bd_dev);
195 goto out; 199 mtdnr = MINOR(bdev->bd_dev);
196 200 bdput(bdev);
197 if (nd.path.mnt->mnt_flags & MNT_NODEV) {
198 ret = -EACCES;
199 goto out;
200 }
201 201
202 if (imajor(nd.path.dentry->d_inode) != MTD_BLOCK_MAJOR) 202 if (major != MTD_BLOCK_MAJOR)
203 goto not_an_MTD_device; 203 goto not_an_MTD_device;
204 204
205 mtdnr = iminor(nd.path.dentry->d_inode);
206 path_put(&nd.path);
207
208 return get_sb_mtd_nr(fs_type, flags, dev_name, data, mtdnr, fill_super, 205 return get_sb_mtd_nr(fs_type, flags, dev_name, data, mtdnr, fill_super,
209 mnt); 206 mnt);
210 207
211not_an_MTD_device: 208not_an_MTD_device:
209#endif /* CONFIG_BLOCK */
210
212 if (!(flags & MS_SILENT)) 211 if (!(flags & MS_SILENT))
213 printk(KERN_NOTICE 212 printk(KERN_NOTICE
214 "MTD: Attempt to mount non-MTD device \"%s\"\n", 213 "MTD: Attempt to mount non-MTD device \"%s\"\n",
215 dev_name); 214 dev_name);
216out: 215 return -EINVAL;
217 path_put(&nd.path);
218 return ret;
219
220} 216}
221 217
222EXPORT_SYMBOL_GPL(get_sb_mtd); 218EXPORT_SYMBOL_GPL(get_sb_mtd);
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 71406e517857..02f9cc30d77b 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -104,11 +104,24 @@ config MTD_NAND_BF5XX
104 104
105config MTD_NAND_BF5XX_HWECC 105config MTD_NAND_BF5XX_HWECC
106 bool "BF5XX NAND Hardware ECC" 106 bool "BF5XX NAND Hardware ECC"
107 default y
107 depends on MTD_NAND_BF5XX 108 depends on MTD_NAND_BF5XX
108 help 109 help
109 Enable the use of the BF5XX's internal ECC generator when 110 Enable the use of the BF5XX's internal ECC generator when
110 using NAND. 111 using NAND.
111 112
113config MTD_NAND_BF5XX_BOOTROM_ECC
114 bool "Use Blackfin BootROM ECC Layout"
115 default n
116 depends on MTD_NAND_BF5XX_HWECC
117 help
118 If you wish to modify NAND pages and allow the Blackfin on-chip
119 BootROM to boot from them, say Y here. This is only necessary
120 if you are booting U-Boot out of NAND and you wish to update
121 U-Boot from Linux' userspace. Otherwise, you should say N here.
122
123 If unsure, say N.
124
112config MTD_NAND_RTC_FROM4 125config MTD_NAND_RTC_FROM4
113 tristate "Renesas Flash ROM 4-slot interface board (FROM_BOARD4)" 126 tristate "Renesas Flash ROM 4-slot interface board (FROM_BOARD4)"
114 depends on SH_SOLUTION_ENGINE 127 depends on SH_SOLUTION_ENGINE
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index a0ba07c36ee9..26d42987971f 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -22,10 +22,10 @@
22#include <linux/mtd/nand.h> 22#include <linux/mtd/nand.h>
23#include <linux/mtd/partitions.h> 23#include <linux/mtd/partitions.h>
24#include <asm/io.h> 24#include <asm/io.h>
25#include <asm/arch/hardware.h> 25#include <mach/hardware.h>
26#include <asm/sizes.h> 26#include <asm/sizes.h>
27#include <asm/arch/gpio.h> 27#include <mach/gpio.h>
28#include <asm/arch/board-ams-delta.h> 28#include <mach/board-ams-delta.h>
29 29
30/* 30/*
31 * MTD structure for E3 (Delta) 31 * MTD structure for E3 (Delta)
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 99aec46e2145..3387e0d5076b 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -32,8 +32,8 @@
32#include <linux/gpio.h> 32#include <linux/gpio.h>
33#include <linux/io.h> 33#include <linux/io.h>
34 34
35#include <asm/arch/board.h> 35#include <mach/board.h>
36#include <asm/arch/cpu.h> 36#include <mach/cpu.h>
37 37
38#ifdef CONFIG_MTD_NAND_ATMEL_ECC_HW 38#ifdef CONFIG_MTD_NAND_ATMEL_ECC_HW
39#define hard_ecc 1 39#define hard_ecc 1
diff --git a/drivers/mtd/nand/autcpu12.c b/drivers/mtd/nand/autcpu12.c
index 553dd7e9b41c..7c95da1f612c 100644
--- a/drivers/mtd/nand/autcpu12.c
+++ b/drivers/mtd/nand/autcpu12.c
@@ -32,9 +32,9 @@
32#include <linux/mtd/nand.h> 32#include <linux/mtd/nand.h>
33#include <linux/mtd/partitions.h> 33#include <linux/mtd/partitions.h>
34#include <asm/io.h> 34#include <asm/io.h>
35#include <asm/arch/hardware.h> 35#include <mach/hardware.h>
36#include <asm/sizes.h> 36#include <asm/sizes.h>
37#include <asm/arch/autcpu12.h> 37#include <mach/autcpu12.h>
38 38
39/* 39/*
40 * MTD structure for AUTCPU12 board 40 * MTD structure for AUTCPU12 board
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index e87a57297328..9af2a2cc1153 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -91,6 +91,41 @@ static const unsigned short bfin_nfc_pin_req[] =
91 P_NAND_ALE, 91 P_NAND_ALE,
92 0}; 92 0};
93 93
94#ifdef CONFIG_MTD_NAND_BF5XX_BOOTROM_ECC
95static uint8_t bbt_pattern[] = { 0xff };
96
97static struct nand_bbt_descr bootrom_bbt = {
98 .options = 0,
99 .offs = 63,
100 .len = 1,
101 .pattern = bbt_pattern,
102};
103
104static struct nand_ecclayout bootrom_ecclayout = {
105 .eccbytes = 24,
106 .eccpos = {
107 0x8 * 0, 0x8 * 0 + 1, 0x8 * 0 + 2,
108 0x8 * 1, 0x8 * 1 + 1, 0x8 * 1 + 2,
109 0x8 * 2, 0x8 * 2 + 1, 0x8 * 2 + 2,
110 0x8 * 3, 0x8 * 3 + 1, 0x8 * 3 + 2,
111 0x8 * 4, 0x8 * 4 + 1, 0x8 * 4 + 2,
112 0x8 * 5, 0x8 * 5 + 1, 0x8 * 5 + 2,
113 0x8 * 6, 0x8 * 6 + 1, 0x8 * 6 + 2,
114 0x8 * 7, 0x8 * 7 + 1, 0x8 * 7 + 2
115 },
116 .oobfree = {
117 { 0x8 * 0 + 3, 5 },
118 { 0x8 * 1 + 3, 5 },
119 { 0x8 * 2 + 3, 5 },
120 { 0x8 * 3 + 3, 5 },
121 { 0x8 * 4 + 3, 5 },
122 { 0x8 * 5 + 3, 5 },
123 { 0x8 * 6 + 3, 5 },
124 { 0x8 * 7 + 3, 5 },
125 }
126};
127#endif
128
94/* 129/*
95 * Data structures for bf5xx nand flash controller driver 130 * Data structures for bf5xx nand flash controller driver
96 */ 131 */
@@ -273,7 +308,7 @@ static int bf5xx_nand_correct_data(struct mtd_info *mtd, u_char *dat,
273 dat += 256; 308 dat += 256;
274 read_ecc += 8; 309 read_ecc += 8;
275 calc_ecc += 8; 310 calc_ecc += 8;
276 ret = bf5xx_nand_correct_data_256(mtd, dat, read_ecc, calc_ecc); 311 ret |= bf5xx_nand_correct_data_256(mtd, dat, read_ecc, calc_ecc);
277 } 312 }
278 313
279 return ret; 314 return ret;
@@ -298,7 +333,7 @@ static int bf5xx_nand_calculate_ecc(struct mtd_info *mtd,
298 ecc0 = bfin_read_NFC_ECC0(); 333 ecc0 = bfin_read_NFC_ECC0();
299 ecc1 = bfin_read_NFC_ECC1(); 334 ecc1 = bfin_read_NFC_ECC1();
300 335
301 code[0] = (ecc0 & 0x3FF) | ((ecc1 & 0x3FF) << 11); 336 code[0] = (ecc0 & 0x7ff) | ((ecc1 & 0x7ff) << 11);
302 337
303 dev_dbg(info->device, "returning ecc 0x%08x\n", code[0]); 338 dev_dbg(info->device, "returning ecc 0x%08x\n", code[0]);
304 339
@@ -310,7 +345,7 @@ static int bf5xx_nand_calculate_ecc(struct mtd_info *mtd,
310 if (page_size == 512) { 345 if (page_size == 512) {
311 ecc0 = bfin_read_NFC_ECC2(); 346 ecc0 = bfin_read_NFC_ECC2();
312 ecc1 = bfin_read_NFC_ECC3(); 347 ecc1 = bfin_read_NFC_ECC3();
313 code[1] = (ecc0 & 0x3FF) | ((ecc1 & 0x3FF) << 11); 348 code[1] = (ecc0 & 0x7ff) | ((ecc1 & 0x7ff) << 11);
314 349
315 /* second 3 bytes in ecc_code for second 256 350 /* second 3 bytes in ecc_code for second 256
316 * bytes of 512 page size 351 * bytes of 512 page size
@@ -514,7 +549,6 @@ static void bf5xx_nand_dma_write_buf(struct mtd_info *mtd,
514/* 549/*
515 * System initialization functions 550 * System initialization functions
516 */ 551 */
517
518static int bf5xx_nand_dma_init(struct bf5xx_nand_info *info) 552static int bf5xx_nand_dma_init(struct bf5xx_nand_info *info)
519{ 553{
520 int ret; 554 int ret;
@@ -547,6 +581,13 @@ static int bf5xx_nand_dma_init(struct bf5xx_nand_info *info)
547 return 0; 581 return 0;
548} 582}
549 583
584static void bf5xx_nand_dma_remove(struct bf5xx_nand_info *info)
585{
586 /* Free NFC DMA channel */
587 if (hardware_ecc)
588 free_dma(CH_NFC);
589}
590
550/* 591/*
551 * BF5XX NFC hardware initialization 592 * BF5XX NFC hardware initialization
552 * - pin mux setup 593 * - pin mux setup
@@ -605,7 +646,7 @@ static int bf5xx_nand_add_partition(struct bf5xx_nand_info *info)
605#endif 646#endif
606} 647}
607 648
608static int bf5xx_nand_remove(struct platform_device *pdev) 649static int __devexit bf5xx_nand_remove(struct platform_device *pdev)
609{ 650{
610 struct bf5xx_nand_info *info = to_nand_info(pdev); 651 struct bf5xx_nand_info *info = to_nand_info(pdev);
611 struct mtd_info *mtd = NULL; 652 struct mtd_info *mtd = NULL;
@@ -623,6 +664,7 @@ static int bf5xx_nand_remove(struct platform_device *pdev)
623 } 664 }
624 665
625 peripheral_free_list(bfin_nfc_pin_req); 666 peripheral_free_list(bfin_nfc_pin_req);
667 bf5xx_nand_dma_remove(info);
626 668
627 /* free the common resources */ 669 /* free the common resources */
628 kfree(info); 670 kfree(info);
@@ -638,7 +680,7 @@ static int bf5xx_nand_remove(struct platform_device *pdev)
638 * it can allocate all necessary resources then calls the 680 * it can allocate all necessary resources then calls the
639 * nand layer to look for devices 681 * nand layer to look for devices
640 */ 682 */
641static int bf5xx_nand_probe(struct platform_device *pdev) 683static int __devinit bf5xx_nand_probe(struct platform_device *pdev)
642{ 684{
643 struct bf5xx_nand_platform *plat = to_nand_plat(pdev); 685 struct bf5xx_nand_platform *plat = to_nand_plat(pdev);
644 struct bf5xx_nand_info *info = NULL; 686 struct bf5xx_nand_info *info = NULL;
@@ -648,22 +690,21 @@ static int bf5xx_nand_probe(struct platform_device *pdev)
648 690
649 dev_dbg(&pdev->dev, "(%p)\n", pdev); 691 dev_dbg(&pdev->dev, "(%p)\n", pdev);
650 692
651 if (peripheral_request_list(bfin_nfc_pin_req, DRV_NAME)) {
652 printk(KERN_ERR DRV_NAME
653 ": Requesting Peripherals failed\n");
654 return -EFAULT;
655 }
656
657 if (!plat) { 693 if (!plat) {
658 dev_err(&pdev->dev, "no platform specific information\n"); 694 dev_err(&pdev->dev, "no platform specific information\n");
659 goto exit_error; 695 return -EINVAL;
696 }
697
698 if (peripheral_request_list(bfin_nfc_pin_req, DRV_NAME)) {
699 dev_err(&pdev->dev, "requesting Peripherals failed\n");
700 return -EFAULT;
660 } 701 }
661 702
662 info = kzalloc(sizeof(*info), GFP_KERNEL); 703 info = kzalloc(sizeof(*info), GFP_KERNEL);
663 if (info == NULL) { 704 if (info == NULL) {
664 dev_err(&pdev->dev, "no memory for flash info\n"); 705 dev_err(&pdev->dev, "no memory for flash info\n");
665 err = -ENOMEM; 706 err = -ENOMEM;
666 goto exit_error; 707 goto out_err_kzalloc;
667 } 708 }
668 709
669 platform_set_drvdata(pdev, info); 710 platform_set_drvdata(pdev, info);
@@ -707,11 +748,16 @@ static int bf5xx_nand_probe(struct platform_device *pdev)
707 748
708 /* initialise the hardware */ 749 /* initialise the hardware */
709 err = bf5xx_nand_hw_init(info); 750 err = bf5xx_nand_hw_init(info);
710 if (err != 0) 751 if (err)
711 goto exit_error; 752 goto out_err_hw_init;
712 753
713 /* setup hardware ECC data struct */ 754 /* setup hardware ECC data struct */
714 if (hardware_ecc) { 755 if (hardware_ecc) {
756#ifdef CONFIG_MTD_NAND_BF5XX_BOOTROM_ECC
757 chip->badblock_pattern = &bootrom_bbt;
758 chip->ecc.layout = &bootrom_ecclayout;
759#endif
760
715 if (plat->page_size == NFC_PG_SIZE_256) { 761 if (plat->page_size == NFC_PG_SIZE_256) {
716 chip->ecc.bytes = 3; 762 chip->ecc.bytes = 3;
717 chip->ecc.size = 256; 763 chip->ecc.size = 256;
@@ -733,7 +779,7 @@ static int bf5xx_nand_probe(struct platform_device *pdev)
733 /* scan hardware nand chip and setup mtd info data struct */ 779 /* scan hardware nand chip and setup mtd info data struct */
734 if (nand_scan(mtd, 1)) { 780 if (nand_scan(mtd, 1)) {
735 err = -ENXIO; 781 err = -ENXIO;
736 goto exit_error; 782 goto out_err_nand_scan;
737 } 783 }
738 784
739 /* add NAND partition */ 785 /* add NAND partition */
@@ -742,11 +788,14 @@ static int bf5xx_nand_probe(struct platform_device *pdev)
742 dev_dbg(&pdev->dev, "initialised ok\n"); 788 dev_dbg(&pdev->dev, "initialised ok\n");
743 return 0; 789 return 0;
744 790
745exit_error: 791out_err_nand_scan:
746 bf5xx_nand_remove(pdev); 792 bf5xx_nand_dma_remove(info);
793out_err_hw_init:
794 platform_set_drvdata(pdev, NULL);
795 kfree(info);
796out_err_kzalloc:
797 peripheral_free_list(bfin_nfc_pin_req);
747 798
748 if (err == 0)
749 err = -EINVAL;
750 return err; 799 return err;
751} 800}
752 801
@@ -775,7 +824,7 @@ static int bf5xx_nand_resume(struct platform_device *dev)
775/* driver device registration */ 824/* driver device registration */
776static struct platform_driver bf5xx_nand_driver = { 825static struct platform_driver bf5xx_nand_driver = {
777 .probe = bf5xx_nand_probe, 826 .probe = bf5xx_nand_probe,
778 .remove = bf5xx_nand_remove, 827 .remove = __devexit_p(bf5xx_nand_remove),
779 .suspend = bf5xx_nand_suspend, 828 .suspend = bf5xx_nand_suspend,
780 .resume = bf5xx_nand_resume, 829 .resume = bf5xx_nand_resume,
781 .driver = { 830 .driver = {
diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c
index fc8529bedfdf..9eba3f04783a 100644
--- a/drivers/mtd/nand/cmx270_nand.c
+++ b/drivers/mtd/nand/cmx270_nand.c
@@ -26,8 +26,8 @@
26#include <asm/irq.h> 26#include <asm/irq.h>
27#include <asm/mach-types.h> 27#include <asm/mach-types.h>
28 28
29#include <asm/arch/hardware.h> 29#include <mach/hardware.h>
30#include <asm/arch/pxa-regs.h> 30#include <mach/pxa-regs.h>
31 31
32#define GPIO_NAND_CS (11) 32#define GPIO_NAND_CS (11)
33#define GPIO_NAND_RB (89) 33#define GPIO_NAND_RB (89)
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index 765d4f0f7c86..e4226e02d63e 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -1125,9 +1125,9 @@ static inline int __init nftl_partscan(struct mtd_info *mtd, struct mtd_partitio
1125 goto out; 1125 goto out;
1126 mh = (struct NFTLMediaHeader *)buf; 1126 mh = (struct NFTLMediaHeader *)buf;
1127 1127
1128 mh->NumEraseUnits = le16_to_cpu(mh->NumEraseUnits); 1128 le16_to_cpus(&mh->NumEraseUnits);
1129 mh->FirstPhysicalEUN = le16_to_cpu(mh->FirstPhysicalEUN); 1129 le16_to_cpus(&mh->FirstPhysicalEUN);
1130 mh->FormattedSize = le32_to_cpu(mh->FormattedSize); 1130 le32_to_cpus(&mh->FormattedSize);
1131 1131
1132 printk(KERN_INFO " DataOrgID = %s\n" 1132 printk(KERN_INFO " DataOrgID = %s\n"
1133 " NumEraseUnits = %d\n" 1133 " NumEraseUnits = %d\n"
@@ -1235,12 +1235,12 @@ static inline int __init inftl_partscan(struct mtd_info *mtd, struct mtd_partiti
1235 doc->mh1_page = doc->mh0_page + (4096 >> this->page_shift); 1235 doc->mh1_page = doc->mh0_page + (4096 >> this->page_shift);
1236 mh = (struct INFTLMediaHeader *)buf; 1236 mh = (struct INFTLMediaHeader *)buf;
1237 1237
1238 mh->NoOfBootImageBlocks = le32_to_cpu(mh->NoOfBootImageBlocks); 1238 le32_to_cpus(&mh->NoOfBootImageBlocks);
1239 mh->NoOfBinaryPartitions = le32_to_cpu(mh->NoOfBinaryPartitions); 1239 le32_to_cpus(&mh->NoOfBinaryPartitions);
1240 mh->NoOfBDTLPartitions = le32_to_cpu(mh->NoOfBDTLPartitions); 1240 le32_to_cpus(&mh->NoOfBDTLPartitions);
1241 mh->BlockMultiplierBits = le32_to_cpu(mh->BlockMultiplierBits); 1241 le32_to_cpus(&mh->BlockMultiplierBits);
1242 mh->FormatFlags = le32_to_cpu(mh->FormatFlags); 1242 le32_to_cpus(&mh->FormatFlags);
1243 mh->PercentUsed = le32_to_cpu(mh->PercentUsed); 1243 le32_to_cpus(&mh->PercentUsed);
1244 1244
1245 printk(KERN_INFO " bootRecordID = %s\n" 1245 printk(KERN_INFO " bootRecordID = %s\n"
1246 " NoOfBootImageBlocks = %d\n" 1246 " NoOfBootImageBlocks = %d\n"
@@ -1277,12 +1277,12 @@ static inline int __init inftl_partscan(struct mtd_info *mtd, struct mtd_partiti
1277 /* Scan the partitions */ 1277 /* Scan the partitions */
1278 for (i = 0; (i < 4); i++) { 1278 for (i = 0; (i < 4); i++) {
1279 ip = &(mh->Partitions[i]); 1279 ip = &(mh->Partitions[i]);
1280 ip->virtualUnits = le32_to_cpu(ip->virtualUnits); 1280 le32_to_cpus(&ip->virtualUnits);
1281 ip->firstUnit = le32_to_cpu(ip->firstUnit); 1281 le32_to_cpus(&ip->firstUnit);
1282 ip->lastUnit = le32_to_cpu(ip->lastUnit); 1282 le32_to_cpus(&ip->lastUnit);
1283 ip->flags = le32_to_cpu(ip->flags); 1283 le32_to_cpus(&ip->flags);
1284 ip->spareUnits = le32_to_cpu(ip->spareUnits); 1284 le32_to_cpus(&ip->spareUnits);
1285 ip->Reserved0 = le32_to_cpu(ip->Reserved0); 1285 le32_to_cpus(&ip->Reserved0);
1286 1286
1287 printk(KERN_INFO " PARTITION[%d] ->\n" 1287 printk(KERN_INFO " PARTITION[%d] ->\n"
1288 " virtualUnits = %d\n" 1288 " virtualUnits = %d\n"
diff --git a/drivers/mtd/nand/edb7312.c b/drivers/mtd/nand/edb7312.c
index 387e4352903e..86366bfba9f8 100644
--- a/drivers/mtd/nand/edb7312.c
+++ b/drivers/mtd/nand/edb7312.c
@@ -23,7 +23,7 @@
23#include <linux/mtd/nand.h> 23#include <linux/mtd/nand.h>
24#include <linux/mtd/partitions.h> 24#include <linux/mtd/partitions.h>
25#include <asm/io.h> 25#include <asm/io.h>
26#include <asm/arch/hardware.h> /* for CLPS7111_VIRT_BASE */ 26#include <mach/hardware.h> /* for CLPS7111_VIRT_BASE */
27#include <asm/sizes.h> 27#include <asm/sizes.h>
28#include <asm/hardware/clps7111.h> 28#include <asm/hardware/clps7111.h>
29 29
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 9dff51351f4f..98ad3cefcaf4 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -887,7 +887,7 @@ static int __devinit fsl_elbc_chip_probe(struct fsl_elbc_ctrl *ctrl,
887 goto err; 887 goto err;
888 } 888 }
889 889
890 priv->mtd.name = kasprintf(GFP_KERNEL, "%x.flash", res.start); 890 priv->mtd.name = kasprintf(GFP_KERNEL, "%x.flash", (unsigned)res.start);
891 if (!priv->mtd.name) { 891 if (!priv->mtd.name) {
892 ret = -ENOMEM; 892 ret = -ENOMEM;
893 goto err; 893 goto err;
diff --git a/drivers/mtd/nand/h1910.c b/drivers/mtd/nand/h1910.c
index 9e59de501c2e..f8ce79b446ed 100644
--- a/drivers/mtd/nand/h1910.c
+++ b/drivers/mtd/nand/h1910.c
@@ -24,10 +24,10 @@
24#include <linux/mtd/nand.h> 24#include <linux/mtd/nand.h>
25#include <linux/mtd/partitions.h> 25#include <linux/mtd/partitions.h>
26#include <asm/io.h> 26#include <asm/io.h>
27#include <asm/arch/hardware.h> /* for CLPS7111_VIRT_BASE */ 27#include <mach/hardware.h> /* for CLPS7111_VIRT_BASE */
28#include <asm/sizes.h> 28#include <asm/sizes.h>
29#include <asm/arch/h1900-gpio.h> 29#include <mach/h1900-gpio.h>
30#include <asm/arch/ipaq.h> 30#include <mach/ipaq.h>
31 31
32/* 32/*
33 * MTD structure for EDB7312 board 33 * MTD structure for EDB7312 board
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index ecd70e2504f6..556e8131ecdc 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -28,6 +28,7 @@
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/moduleparam.h> 29#include <linux/moduleparam.h>
30#include <linux/vmalloc.h> 30#include <linux/vmalloc.h>
31#include <asm/div64.h>
31#include <linux/slab.h> 32#include <linux/slab.h>
32#include <linux/errno.h> 33#include <linux/errno.h>
33#include <linux/string.h> 34#include <linux/string.h>
@@ -207,13 +208,16 @@ MODULE_PARM_DESC(overridesize, "Specifies the NAND Flash size overriding the I
207#define STATE_CMD_READID 0x0000000A /* read ID */ 208#define STATE_CMD_READID 0x0000000A /* read ID */
208#define STATE_CMD_ERASE2 0x0000000B /* sector erase second command */ 209#define STATE_CMD_ERASE2 0x0000000B /* sector erase second command */
209#define STATE_CMD_RESET 0x0000000C /* reset */ 210#define STATE_CMD_RESET 0x0000000C /* reset */
211#define STATE_CMD_RNDOUT 0x0000000D /* random output command */
212#define STATE_CMD_RNDOUTSTART 0x0000000E /* random output start command */
210#define STATE_CMD_MASK 0x0000000F /* command states mask */ 213#define STATE_CMD_MASK 0x0000000F /* command states mask */
211 214
212/* After an address is input, the simulator goes to one of these states */ 215/* After an address is input, the simulator goes to one of these states */
213#define STATE_ADDR_PAGE 0x00000010 /* full (row, column) address is accepted */ 216#define STATE_ADDR_PAGE 0x00000010 /* full (row, column) address is accepted */
214#define STATE_ADDR_SEC 0x00000020 /* sector address was accepted */ 217#define STATE_ADDR_SEC 0x00000020 /* sector address was accepted */
215#define STATE_ADDR_ZERO 0x00000030 /* one byte zero address was accepted */ 218#define STATE_ADDR_COLUMN 0x00000030 /* column address was accepted */
216#define STATE_ADDR_MASK 0x00000030 /* address states mask */ 219#define STATE_ADDR_ZERO 0x00000040 /* one byte zero address was accepted */
220#define STATE_ADDR_MASK 0x00000070 /* address states mask */
217 221
218/* Durind data input/output the simulator is in these states */ 222/* Durind data input/output the simulator is in these states */
219#define STATE_DATAIN 0x00000100 /* waiting for data input */ 223#define STATE_DATAIN 0x00000100 /* waiting for data input */
@@ -240,7 +244,7 @@ MODULE_PARM_DESC(overridesize, "Specifies the NAND Flash size overriding the I
240#define ACTION_OOBOFF 0x00600000 /* add to address OOB offset */ 244#define ACTION_OOBOFF 0x00600000 /* add to address OOB offset */
241#define ACTION_MASK 0x00700000 /* action mask */ 245#define ACTION_MASK 0x00700000 /* action mask */
242 246
243#define NS_OPER_NUM 12 /* Number of operations supported by the simulator */ 247#define NS_OPER_NUM 13 /* Number of operations supported by the simulator */
244#define NS_OPER_STATES 6 /* Maximum number of states in operation */ 248#define NS_OPER_STATES 6 /* Maximum number of states in operation */
245 249
246#define OPT_ANY 0xFFFFFFFF /* any chip supports this operation */ 250#define OPT_ANY 0xFFFFFFFF /* any chip supports this operation */
@@ -373,7 +377,10 @@ static struct nandsim_operations {
373 {OPT_ANY, {STATE_CMD_READID, STATE_ADDR_ZERO, STATE_DATAOUT_ID, STATE_READY}}, 377 {OPT_ANY, {STATE_CMD_READID, STATE_ADDR_ZERO, STATE_DATAOUT_ID, STATE_READY}},
374 /* Large page devices read page */ 378 /* Large page devices read page */
375 {OPT_LARGEPAGE, {STATE_CMD_READ0, STATE_ADDR_PAGE, STATE_CMD_READSTART | ACTION_CPY, 379 {OPT_LARGEPAGE, {STATE_CMD_READ0, STATE_ADDR_PAGE, STATE_CMD_READSTART | ACTION_CPY,
376 STATE_DATAOUT, STATE_READY}} 380 STATE_DATAOUT, STATE_READY}},
381 /* Large page devices random page read */
382 {OPT_LARGEPAGE, {STATE_CMD_RNDOUT, STATE_ADDR_COLUMN, STATE_CMD_RNDOUTSTART | ACTION_CPY,
383 STATE_DATAOUT, STATE_READY}},
377}; 384};
378 385
379struct weak_block { 386struct weak_block {
@@ -579,7 +586,8 @@ static int init_nandsim(struct mtd_info *mtd)
579 if (ns->busw == 16) 586 if (ns->busw == 16)
580 NS_WARN("16-bit flashes support wasn't tested\n"); 587 NS_WARN("16-bit flashes support wasn't tested\n");
581 588
582 printk("flash size: %llu MiB\n", ns->geom.totsz >> 20); 589 printk("flash size: %llu MiB\n",
590 (unsigned long long)ns->geom.totsz >> 20);
583 printk("page size: %u bytes\n", ns->geom.pgsz); 591 printk("page size: %u bytes\n", ns->geom.pgsz);
584 printk("OOB area size: %u bytes\n", ns->geom.oobsz); 592 printk("OOB area size: %u bytes\n", ns->geom.oobsz);
585 printk("sector size: %u KiB\n", ns->geom.secsz >> 10); 593 printk("sector size: %u KiB\n", ns->geom.secsz >> 10);
@@ -588,8 +596,9 @@ static int init_nandsim(struct mtd_info *mtd)
588 printk("bus width: %u\n", ns->busw); 596 printk("bus width: %u\n", ns->busw);
589 printk("bits in sector size: %u\n", ns->geom.secshift); 597 printk("bits in sector size: %u\n", ns->geom.secshift);
590 printk("bits in page size: %u\n", ns->geom.pgshift); 598 printk("bits in page size: %u\n", ns->geom.pgshift);
591 printk("bits in OOB size: %u\n", ns->geom.oobshift); 599 printk("bits in OOB size: %u\n", ns->geom.oobshift);
592 printk("flash size with OOB: %llu KiB\n", ns->geom.totszoob >> 10); 600 printk("flash size with OOB: %llu KiB\n",
601 (unsigned long long)ns->geom.totszoob >> 10);
593 printk("page address bytes: %u\n", ns->geom.pgaddrbytes); 602 printk("page address bytes: %u\n", ns->geom.pgaddrbytes);
594 printk("sector address bytes: %u\n", ns->geom.secaddrbytes); 603 printk("sector address bytes: %u\n", ns->geom.secaddrbytes);
595 printk("options: %#x\n", ns->options); 604 printk("options: %#x\n", ns->options);
@@ -937,12 +946,18 @@ static char *get_state_name(uint32_t state)
937 return "STATE_CMD_ERASE2"; 946 return "STATE_CMD_ERASE2";
938 case STATE_CMD_RESET: 947 case STATE_CMD_RESET:
939 return "STATE_CMD_RESET"; 948 return "STATE_CMD_RESET";
949 case STATE_CMD_RNDOUT:
950 return "STATE_CMD_RNDOUT";
951 case STATE_CMD_RNDOUTSTART:
952 return "STATE_CMD_RNDOUTSTART";
940 case STATE_ADDR_PAGE: 953 case STATE_ADDR_PAGE:
941 return "STATE_ADDR_PAGE"; 954 return "STATE_ADDR_PAGE";
942 case STATE_ADDR_SEC: 955 case STATE_ADDR_SEC:
943 return "STATE_ADDR_SEC"; 956 return "STATE_ADDR_SEC";
944 case STATE_ADDR_ZERO: 957 case STATE_ADDR_ZERO:
945 return "STATE_ADDR_ZERO"; 958 return "STATE_ADDR_ZERO";
959 case STATE_ADDR_COLUMN:
960 return "STATE_ADDR_COLUMN";
946 case STATE_DATAIN: 961 case STATE_DATAIN:
947 return "STATE_DATAIN"; 962 return "STATE_DATAIN";
948 case STATE_DATAOUT: 963 case STATE_DATAOUT:
@@ -973,6 +988,7 @@ static int check_command(int cmd)
973 switch (cmd) { 988 switch (cmd) {
974 989
975 case NAND_CMD_READ0: 990 case NAND_CMD_READ0:
991 case NAND_CMD_READ1:
976 case NAND_CMD_READSTART: 992 case NAND_CMD_READSTART:
977 case NAND_CMD_PAGEPROG: 993 case NAND_CMD_PAGEPROG:
978 case NAND_CMD_READOOB: 994 case NAND_CMD_READOOB:
@@ -982,7 +998,8 @@ static int check_command(int cmd)
982 case NAND_CMD_READID: 998 case NAND_CMD_READID:
983 case NAND_CMD_ERASE2: 999 case NAND_CMD_ERASE2:
984 case NAND_CMD_RESET: 1000 case NAND_CMD_RESET:
985 case NAND_CMD_READ1: 1001 case NAND_CMD_RNDOUT:
1002 case NAND_CMD_RNDOUTSTART:
986 return 0; 1003 return 0;
987 1004
988 case NAND_CMD_STATUS_MULTI: 1005 case NAND_CMD_STATUS_MULTI:
@@ -1021,6 +1038,10 @@ static uint32_t get_state_by_command(unsigned command)
1021 return STATE_CMD_ERASE2; 1038 return STATE_CMD_ERASE2;
1022 case NAND_CMD_RESET: 1039 case NAND_CMD_RESET:
1023 return STATE_CMD_RESET; 1040 return STATE_CMD_RESET;
1041 case NAND_CMD_RNDOUT:
1042 return STATE_CMD_RNDOUT;
1043 case NAND_CMD_RNDOUTSTART:
1044 return STATE_CMD_RNDOUTSTART;
1024 } 1045 }
1025 1046
1026 NS_ERR("get_state_by_command: unknown command, BUG\n"); 1047 NS_ERR("get_state_by_command: unknown command, BUG\n");
@@ -1582,6 +1603,11 @@ static void switch_state(struct nandsim *ns)
1582 ns->regs.num = 1; 1603 ns->regs.num = 1;
1583 break; 1604 break;
1584 1605
1606 case STATE_ADDR_COLUMN:
1607 /* Column address is always 2 bytes */
1608 ns->regs.num = ns->geom.pgaddrbytes - ns->geom.secaddrbytes;
1609 break;
1610
1585 default: 1611 default:
1586 NS_ERR("switch_state: BUG! unknown address state\n"); 1612 NS_ERR("switch_state: BUG! unknown address state\n");
1587 } 1613 }
@@ -1693,15 +1719,21 @@ static void ns_nand_write_byte(struct mtd_info *mtd, u_char byte)
1693 return; 1719 return;
1694 } 1720 }
1695 1721
1696 /* 1722 /* Check that the command byte is correct */
1697 * Chip might still be in STATE_DATAOUT 1723 if (check_command(byte)) {
1698 * (if OPT_AUTOINCR feature is supported), STATE_DATAOUT_STATUS or 1724 NS_ERR("write_byte: unknown command %#x\n", (uint)byte);
1699 * STATE_DATAOUT_STATUS_M state. If so, switch state. 1725 return;
1700 */ 1726 }
1727
1701 if (NS_STATE(ns->state) == STATE_DATAOUT_STATUS 1728 if (NS_STATE(ns->state) == STATE_DATAOUT_STATUS
1702 || NS_STATE(ns->state) == STATE_DATAOUT_STATUS_M 1729 || NS_STATE(ns->state) == STATE_DATAOUT_STATUS_M
1703 || ((ns->options & OPT_AUTOINCR) && NS_STATE(ns->state) == STATE_DATAOUT)) 1730 || NS_STATE(ns->state) == STATE_DATAOUT) {
1731 int row = ns->regs.row;
1732
1704 switch_state(ns); 1733 switch_state(ns);
1734 if (byte == NAND_CMD_RNDOUT)
1735 ns->regs.row = row;
1736 }
1705 1737
1706 /* Check if chip is expecting command */ 1738 /* Check if chip is expecting command */
1707 if (NS_STATE(ns->nxstate) != STATE_UNKNOWN && !(ns->nxstate & STATE_CMD_MASK)) { 1739 if (NS_STATE(ns->nxstate) != STATE_UNKNOWN && !(ns->nxstate & STATE_CMD_MASK)) {
@@ -1715,12 +1747,6 @@ static void ns_nand_write_byte(struct mtd_info *mtd, u_char byte)
1715 switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); 1747 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
1716 } 1748 }
1717 1749
1718 /* Check that the command byte is correct */
1719 if (check_command(byte)) {
1720 NS_ERR("write_byte: unknown command %#x\n", (uint)byte);
1721 return;
1722 }
1723
1724 NS_DBG("command byte corresponding to %s state accepted\n", 1750 NS_DBG("command byte corresponding to %s state accepted\n",
1725 get_state_name(get_state_by_command(byte))); 1751 get_state_name(get_state_by_command(byte)));
1726 ns->regs.command = byte; 1752 ns->regs.command = byte;
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index ee2ac3948cd8..64002488c6ee 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -18,7 +18,7 @@
18#include <linux/mtd/partitions.h> 18#include <linux/mtd/partitions.h>
19#include <asm/io.h> 19#include <asm/io.h>
20#include <asm/sizes.h> 20#include <asm/sizes.h>
21#include <asm/arch/hardware.h> 21#include <mach/hardware.h>
22#include <asm/plat-orion/orion_nand.h> 22#include <asm/plat-orion/orion_nand.h>
23 23
24#ifdef CONFIG_MTD_CMDLINE_PARTS 24#ifdef CONFIG_MTD_CMDLINE_PARTS
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index fe2bc7e42119..a64ad15b8fdd 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -22,8 +22,8 @@
22#include <linux/irq.h> 22#include <linux/irq.h>
23#include <asm/dma.h> 23#include <asm/dma.h>
24 24
25#include <asm/arch/pxa-regs.h> 25#include <mach/pxa-regs.h>
26#include <asm/arch/pxa3xx_nand.h> 26#include <mach/pxa3xx_nand.h>
27 27
28#define CHIP_DELAY_TIMEOUT (2 * HZ/10) 28#define CHIP_DELAY_TIMEOUT (2 * HZ/10)
29 29
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c
index 6dba2fb66ae5..30a518e211bd 100644
--- a/drivers/mtd/nand/sharpsl.c
+++ b/drivers/mtd/nand/sharpsl.c
@@ -21,7 +21,7 @@
21#include <linux/mtd/partitions.h> 21#include <linux/mtd/partitions.h>
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <asm/io.h> 23#include <asm/io.h>
24#include <asm/hardware.h> 24#include <mach/hardware.h>
25#include <asm/mach-types.h> 25#include <asm/mach-types.h>
26 26
27static void __iomem *sharpsl_io_base; 27static void __iomem *sharpsl_io_base;
diff --git a/drivers/mtd/nand/ts7250.c b/drivers/mtd/nand/ts7250.c
index 807a72752eeb..2c410a011317 100644
--- a/drivers/mtd/nand/ts7250.c
+++ b/drivers/mtd/nand/ts7250.c
@@ -25,7 +25,7 @@
25#include <linux/mtd/nand.h> 25#include <linux/mtd/nand.h>
26#include <linux/mtd/partitions.h> 26#include <linux/mtd/partitions.h>
27#include <asm/io.h> 27#include <asm/io.h>
28#include <asm/arch/hardware.h> 28#include <mach/hardware.h>
29#include <asm/sizes.h> 29#include <asm/sizes.h>
30#include <asm/mach-types.h> 30#include <asm/mach-types.h>
31 31
diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c
index dc6e474229b1..e2ce41d3828e 100644
--- a/drivers/net/3c523.c
+++ b/drivers/net/3c523.c
@@ -640,10 +640,8 @@ static int init586(struct net_device *dev)
640 cfg_cmd->time_low = 0x00; 640 cfg_cmd->time_low = 0x00;
641 cfg_cmd->time_high = 0xf2; 641 cfg_cmd->time_high = 0xf2;
642 cfg_cmd->promisc = 0; 642 cfg_cmd->promisc = 0;
643 if (dev->flags & (IFF_ALLMULTI | IFF_PROMISC)) { 643 if (dev->flags & (IFF_ALLMULTI | IFF_PROMISC))
644 cfg_cmd->promisc = 1; 644 cfg_cmd->promisc = 1;
645 dev->flags |= IFF_PROMISC;
646 }
647 cfg_cmd->carr_coll = 0x00; 645 cfg_cmd->carr_coll = 0x00;
648 646
649 p->scb->cbl_offset = make16(cfg_cmd); 647 p->scb->cbl_offset = make16(cfg_cmd);
diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c
index 6aca0c640f13..abc84f765973 100644
--- a/drivers/net/3c527.c
+++ b/drivers/net/3c527.c
@@ -1521,14 +1521,11 @@ static void do_mc32_set_multicast_list(struct net_device *dev, int retry)
1521 struct mc32_local *lp = netdev_priv(dev); 1521 struct mc32_local *lp = netdev_priv(dev);
1522 u16 filt = (1<<2); /* Save Bad Packets, for stats purposes */ 1522 u16 filt = (1<<2); /* Save Bad Packets, for stats purposes */
1523 1523
1524 if (dev->flags&IFF_PROMISC) 1524 if ((dev->flags&IFF_PROMISC) ||
1525 (dev->flags&IFF_ALLMULTI) ||
1526 dev->mc_count > 10)
1525 /* Enable promiscuous mode */ 1527 /* Enable promiscuous mode */
1526 filt |= 1; 1528 filt |= 1;
1527 else if((dev->flags&IFF_ALLMULTI) || dev->mc_count > 10)
1528 {
1529 dev->flags|=IFF_PROMISC;
1530 filt |= 1;
1531 }
1532 else if(dev->mc_count) 1529 else if(dev->mc_count)
1533 { 1530 {
1534 unsigned char block[62]; 1531 unsigned char block[62];
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 8db4e6b89482..491ee16da5c1 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -1692,12 +1692,14 @@ vortex_open(struct net_device *dev)
1692 vp->rx_ring[i].next = cpu_to_le32(vp->rx_ring_dma + sizeof(struct boom_rx_desc) * (i+1)); 1692 vp->rx_ring[i].next = cpu_to_le32(vp->rx_ring_dma + sizeof(struct boom_rx_desc) * (i+1));
1693 vp->rx_ring[i].status = 0; /* Clear complete bit. */ 1693 vp->rx_ring[i].status = 0; /* Clear complete bit. */
1694 vp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ | LAST_FRAG); 1694 vp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ | LAST_FRAG);
1695 skb = dev_alloc_skb(PKT_BUF_SZ); 1695
1696 skb = __netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN,
1697 GFP_KERNEL);
1696 vp->rx_skbuff[i] = skb; 1698 vp->rx_skbuff[i] = skb;
1697 if (skb == NULL) 1699 if (skb == NULL)
1698 break; /* Bad news! */ 1700 break; /* Bad news! */
1699 skb->dev = dev; /* Mark as being used by this device. */ 1701
1700 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 1702 skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */
1701 vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); 1703 vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
1702 } 1704 }
1703 if (i != RX_RING_SIZE) { 1705 if (i != RX_RING_SIZE) {
@@ -2538,7 +2540,7 @@ boomerang_rx(struct net_device *dev)
2538 struct sk_buff *skb; 2540 struct sk_buff *skb;
2539 entry = vp->dirty_rx % RX_RING_SIZE; 2541 entry = vp->dirty_rx % RX_RING_SIZE;
2540 if (vp->rx_skbuff[entry] == NULL) { 2542 if (vp->rx_skbuff[entry] == NULL) {
2541 skb = dev_alloc_skb(PKT_BUF_SZ); 2543 skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN);
2542 if (skb == NULL) { 2544 if (skb == NULL) {
2543 static unsigned long last_jif; 2545 static unsigned long last_jif;
2544 if (time_after(jiffies, last_jif + 10 * HZ)) { 2546 if (time_after(jiffies, last_jif + 10 * HZ)) {
@@ -2549,8 +2551,8 @@ boomerang_rx(struct net_device *dev)
2549 mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1)); 2551 mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1));
2550 break; /* Bad news! */ 2552 break; /* Bad news! */
2551 } 2553 }
2552 skb->dev = dev; /* Mark as being used by this device. */ 2554
2553 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 2555 skb_reserve(skb, NET_IP_ALIGN);
2554 vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); 2556 vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
2555 vp->rx_skbuff[entry] = skb; 2557 vp->rx_skbuff[entry] = skb;
2556 } 2558 }
diff --git a/drivers/net/8390.c b/drivers/net/8390.c
index dc5d2584bd0c..f72a2e87d569 100644
--- a/drivers/net/8390.c
+++ b/drivers/net/8390.c
@@ -9,42 +9,39 @@ int ei_open(struct net_device *dev)
9{ 9{
10 return __ei_open(dev); 10 return __ei_open(dev);
11} 11}
12EXPORT_SYMBOL(ei_open);
12 13
13int ei_close(struct net_device *dev) 14int ei_close(struct net_device *dev)
14{ 15{
15 return __ei_close(dev); 16 return __ei_close(dev);
16} 17}
18EXPORT_SYMBOL(ei_close);
17 19
18irqreturn_t ei_interrupt(int irq, void *dev_id) 20irqreturn_t ei_interrupt(int irq, void *dev_id)
19{ 21{
20 return __ei_interrupt(irq, dev_id); 22 return __ei_interrupt(irq, dev_id);
21} 23}
24EXPORT_SYMBOL(ei_interrupt);
22 25
23#ifdef CONFIG_NET_POLL_CONTROLLER 26#ifdef CONFIG_NET_POLL_CONTROLLER
24void ei_poll(struct net_device *dev) 27void ei_poll(struct net_device *dev)
25{ 28{
26 __ei_poll(dev); 29 __ei_poll(dev);
27} 30}
31EXPORT_SYMBOL(ei_poll);
28#endif 32#endif
29 33
30struct net_device *__alloc_ei_netdev(int size) 34struct net_device *__alloc_ei_netdev(int size)
31{ 35{
32 return ____alloc_ei_netdev(size); 36 return ____alloc_ei_netdev(size);
33} 37}
38EXPORT_SYMBOL(__alloc_ei_netdev);
34 39
35void NS8390_init(struct net_device *dev, int startp) 40void NS8390_init(struct net_device *dev, int startp)
36{ 41{
37 __NS8390_init(dev, startp); 42 __NS8390_init(dev, startp);
38} 43}
39
40EXPORT_SYMBOL(ei_open);
41EXPORT_SYMBOL(ei_close);
42EXPORT_SYMBOL(ei_interrupt);
43#ifdef CONFIG_NET_POLL_CONTROLLER
44EXPORT_SYMBOL(ei_poll);
45#endif
46EXPORT_SYMBOL(NS8390_init); 44EXPORT_SYMBOL(NS8390_init);
47EXPORT_SYMBOL(__alloc_ei_netdev);
48 45
49#if defined(MODULE) 46#if defined(MODULE)
50 47
diff --git a/drivers/net/8390p.c b/drivers/net/8390p.c
index 71f19884c4b1..4c6eea4611a2 100644
--- a/drivers/net/8390p.c
+++ b/drivers/net/8390p.c
@@ -4,9 +4,9 @@ static const char version[] =
4 "8390p.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; 4 "8390p.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
5 5
6#define ei_inb(_p) inb(_p) 6#define ei_inb(_p) inb(_p)
7#define ei_outb(_v,_p) outb(_v,_p) 7#define ei_outb(_v, _p) outb(_v, _p)
8#define ei_inb_p(_p) inb_p(_p) 8#define ei_inb_p(_p) inb_p(_p)
9#define ei_outb_p(_v,_p) outb_p(_v,_p) 9#define ei_outb_p(_v, _p) outb_p(_v, _p)
10 10
11#include "lib8390.c" 11#include "lib8390.c"
12 12
@@ -14,42 +14,39 @@ int eip_open(struct net_device *dev)
14{ 14{
15 return __ei_open(dev); 15 return __ei_open(dev);
16} 16}
17EXPORT_SYMBOL(eip_open);
17 18
18int eip_close(struct net_device *dev) 19int eip_close(struct net_device *dev)
19{ 20{
20 return __ei_close(dev); 21 return __ei_close(dev);
21} 22}
23EXPORT_SYMBOL(eip_close);
22 24
23irqreturn_t eip_interrupt(int irq, void *dev_id) 25irqreturn_t eip_interrupt(int irq, void *dev_id)
24{ 26{
25 return __ei_interrupt(irq, dev_id); 27 return __ei_interrupt(irq, dev_id);
26} 28}
29EXPORT_SYMBOL(eip_interrupt);
27 30
28#ifdef CONFIG_NET_POLL_CONTROLLER 31#ifdef CONFIG_NET_POLL_CONTROLLER
29void eip_poll(struct net_device *dev) 32void eip_poll(struct net_device *dev)
30{ 33{
31 __ei_poll(dev); 34 __ei_poll(dev);
32} 35}
36EXPORT_SYMBOL(eip_poll);
33#endif 37#endif
34 38
35struct net_device *__alloc_eip_netdev(int size) 39struct net_device *__alloc_eip_netdev(int size)
36{ 40{
37 return ____alloc_ei_netdev(size); 41 return ____alloc_ei_netdev(size);
38} 42}
43EXPORT_SYMBOL(__alloc_eip_netdev);
39 44
40void NS8390p_init(struct net_device *dev, int startp) 45void NS8390p_init(struct net_device *dev, int startp)
41{ 46{
42 return __NS8390_init(dev, startp); 47 __NS8390_init(dev, startp);
43} 48}
44
45EXPORT_SYMBOL(eip_open);
46EXPORT_SYMBOL(eip_close);
47EXPORT_SYMBOL(eip_interrupt);
48#ifdef CONFIG_NET_POLL_CONTROLLER
49EXPORT_SYMBOL(eip_poll);
50#endif
51EXPORT_SYMBOL(NS8390p_init); 49EXPORT_SYMBOL(NS8390p_init);
52EXPORT_SYMBOL(__alloc_eip_netdev);
53 50
54#if defined(MODULE) 51#if defined(MODULE)
55 52
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index fa533c27052a..4b4cb2bf4f11 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -510,14 +510,15 @@ config STNIC
510config SH_ETH 510config SH_ETH
511 tristate "Renesas SuperH Ethernet support" 511 tristate "Renesas SuperH Ethernet support"
512 depends on SUPERH && \ 512 depends on SUPERH && \
513 (CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712) 513 (CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || CPU_SUBTYPE_SH7763 || \
514 CPU_SUBTYPE_SH7619)
514 select CRC32 515 select CRC32
515 select MII 516 select MII
516 select MDIO_BITBANG 517 select MDIO_BITBANG
517 select PHYLIB 518 select PHYLIB
518 help 519 help
519 Renesas SuperH Ethernet device driver. 520 Renesas SuperH Ethernet device driver.
520 This driver support SH7710 and SH7712. 521 This driver support SH7710, SH7712, SH7763 and SH7619.
521 522
522config SUNLANCE 523config SUNLANCE
523 tristate "Sun LANCE support" 524 tristate "Sun LANCE support"
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index a637910b02dd..aa4a5246be53 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -28,7 +28,7 @@
28#include <linux/bitops.h> 28#include <linux/bitops.h>
29#include <linux/platform_device.h> 29#include <linux/platform_device.h>
30 30
31#include <asm/hardware.h> 31#include <mach/hardware.h>
32#include <asm/io.h> 32#include <asm/io.h>
33#include <asm/system.h> 33#include <asm/system.h>
34 34
diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c
index ffae266e2d7f..0fa53464efb2 100644
--- a/drivers/net/arm/at91_ether.c
+++ b/drivers/net/arm/at91_ether.c
@@ -32,9 +32,9 @@
32#include <asm/uaccess.h> 32#include <asm/uaccess.h>
33#include <asm/mach-types.h> 33#include <asm/mach-types.h>
34 34
35#include <asm/arch/at91rm9200_emac.h> 35#include <mach/at91rm9200_emac.h>
36#include <asm/arch/gpio.h> 36#include <mach/gpio.h>
37#include <asm/arch/board.h> 37#include <mach/board.h>
38 38
39#include "at91_ether.h" 39#include "at91_ether.h"
40 40
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index 18d3eeb7eab2..1267444d79da 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -20,8 +20,8 @@
20#include <linux/moduleparam.h> 20#include <linux/moduleparam.h>
21#include <linux/platform_device.h> 21#include <linux/platform_device.h>
22#include <linux/delay.h> 22#include <linux/delay.h>
23#include <asm/arch/ep93xx-regs.h> 23#include <mach/ep93xx-regs.h>
24#include <asm/arch/platform.h> 24#include <mach/platform.h>
25#include <asm/io.h> 25#include <asm/io.h>
26 26
27#define DRV_MODULE_NAME "ep93xx-eth" 27#define DRV_MODULE_NAME "ep93xx-eth"
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c
index 9b777d9433cd..020771bfb603 100644
--- a/drivers/net/arm/ixp4xx_eth.c
+++ b/drivers/net/arm/ixp4xx_eth.c
@@ -32,8 +32,8 @@
32#include <linux/kernel.h> 32#include <linux/kernel.h>
33#include <linux/mii.h> 33#include <linux/mii.h>
34#include <linux/platform_device.h> 34#include <linux/platform_device.h>
35#include <asm/arch/npe.h> 35#include <mach/npe.h>
36#include <asm/arch/qmgr.h> 36#include <mach/qmgr.h>
37 37
38#define DEBUG_QUEUES 0 38#define DEBUG_QUEUES 0
39#define DEBUG_DESC 0 39#define DEBUG_DESC 0
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index f12e3d12474b..e6a7bb79d4df 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -1790,6 +1790,17 @@ static void atl1_rx_checksum(struct atl1_adapter *adapter,
1790{ 1790{
1791 struct pci_dev *pdev = adapter->pdev; 1791 struct pci_dev *pdev = adapter->pdev;
1792 1792
1793 /*
1794 * The L1 hardware contains a bug that erroneously sets the
1795 * PACKET_FLAG_ERR and ERR_FLAG_L4_CHKSUM bits whenever a
1796 * fragmented IP packet is received, even though the packet
1797 * is perfectly valid and its checksum is correct. There's
1798 * no way to distinguish between one of these good packets
1799 * and a packet that actually contains a TCP/UDP checksum
1800 * error, so all we can do is allow it to be handed up to
1801 * the higher layers and let it be sorted out there.
1802 */
1803
1793 skb->ip_summed = CHECKSUM_NONE; 1804 skb->ip_summed = CHECKSUM_NONE;
1794 1805
1795 if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { 1806 if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) {
@@ -1816,14 +1827,6 @@ static void atl1_rx_checksum(struct atl1_adapter *adapter,
1816 return; 1827 return;
1817 } 1828 }
1818 1829
1819 /* IPv4, but hardware thinks its checksum is wrong */
1820 if (netif_msg_rx_err(adapter))
1821 dev_printk(KERN_DEBUG, &pdev->dev,
1822 "hw csum wrong, pkt_flag:%x, err_flag:%x\n",
1823 rrd->pkt_flg, rrd->err_flg);
1824 skb->ip_summed = CHECKSUM_COMPLETE;
1825 skb->csum = htons(rrd->xsz.xsum_sz.rx_chksum);
1826 adapter->hw_csum_err++;
1827 return; 1830 return;
1828} 1831}
1829 1832
diff --git a/drivers/net/atp.c b/drivers/net/atp.c
index 3d4433358a36..c10cd8058e23 100644
--- a/drivers/net/atp.c
+++ b/drivers/net/atp.c
@@ -854,14 +854,9 @@ static void set_rx_mode_8002(struct net_device *dev)
854 struct net_local *lp = netdev_priv(dev); 854 struct net_local *lp = netdev_priv(dev);
855 long ioaddr = dev->base_addr; 855 long ioaddr = dev->base_addr;
856 856
857 if ( dev->mc_count > 0 || (dev->flags & (IFF_ALLMULTI|IFF_PROMISC))) { 857 if (dev->mc_count > 0 || (dev->flags & (IFF_ALLMULTI|IFF_PROMISC)))
858 /* We must make the kernel realise we had to move
859 * into promisc mode or we start all out war on
860 * the cable. - AC
861 */
862 dev->flags|=IFF_PROMISC;
863 lp->addr_mode = CMR2h_PROMISC; 858 lp->addr_mode = CMR2h_PROMISC;
864 } else 859 else
865 lp->addr_mode = CMR2h_Normal; 860 lp->addr_mode = CMR2h_Normal;
866 write_reg_high(ioaddr, CMR2, lp->addr_mode); 861 write_reg_high(ioaddr, CMR2, lp->addr_mode);
867} 862}
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index a8ec60e1ed75..3db7db1828e7 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -605,36 +605,87 @@ adjust_head:
605static int bfin_mac_hard_start_xmit(struct sk_buff *skb, 605static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
606 struct net_device *dev) 606 struct net_device *dev)
607{ 607{
608 unsigned int data; 608 u16 *data;
609 609
610 current_tx_ptr->skb = skb; 610 current_tx_ptr->skb = skb;
611 611
612 /* 612 if (ANOMALY_05000285) {
613 * Is skb->data always 16-bit aligned? 613 /*
614 * Do we need to memcpy((char *)(tail->packet + 2), skb->data, len)? 614 * TXDWA feature is not avaible to older revision < 0.3 silicon
615 */ 615 * of BF537
616 if ((((unsigned int)(skb->data)) & 0x02) == 2) { 616 *
617 /* move skb->data to current_tx_ptr payload */ 617 * Only if data buffer is ODD WORD alignment, we do not
618 data = (unsigned int)(skb->data) - 2; 618 * need to memcpy
619 *((unsigned short *)data) = (unsigned short)(skb->len); 619 */
620 current_tx_ptr->desc_a.start_addr = (unsigned long)data; 620 u32 data_align = (u32)(skb->data) & 0x3;
621 /* this is important! */ 621 if (data_align == 0x2) {
622 blackfin_dcache_flush_range(data, (data + (skb->len)) + 2); 622 /* move skb->data to current_tx_ptr payload */
623 623 data = (u16 *)(skb->data) - 1;
624 *data = (u16)(skb->len);
625 current_tx_ptr->desc_a.start_addr = (u32)data;
626 /* this is important! */
627 blackfin_dcache_flush_range((u32)data,
628 (u32)((u8 *)data + skb->len + 4));
629 } else {
630 *((u16 *)(current_tx_ptr->packet)) = (u16)(skb->len);
631 memcpy((u8 *)(current_tx_ptr->packet + 2), skb->data,
632 skb->len);
633 current_tx_ptr->desc_a.start_addr =
634 (u32)current_tx_ptr->packet;
635 if (current_tx_ptr->status.status_word != 0)
636 current_tx_ptr->status.status_word = 0;
637 blackfin_dcache_flush_range(
638 (u32)current_tx_ptr->packet,
639 (u32)(current_tx_ptr->packet + skb->len + 2));
640 }
624 } else { 641 } else {
625 *((unsigned short *)(current_tx_ptr->packet)) = 642 /*
626 (unsigned short)(skb->len); 643 * TXDWA feature is avaible to revision < 0.3 silicon of
627 memcpy((char *)(current_tx_ptr->packet + 2), skb->data, 644 * BF537 and always avaible to BF52x
628 (skb->len)); 645 */
629 current_tx_ptr->desc_a.start_addr = 646 u32 data_align = (u32)(skb->data) & 0x3;
630 (unsigned long)current_tx_ptr->packet; 647 if (data_align == 0x0) {
631 if (current_tx_ptr->status.status_word != 0) 648 u16 sysctl = bfin_read_EMAC_SYSCTL();
632 current_tx_ptr->status.status_word = 0; 649 sysctl |= TXDWA;
633 blackfin_dcache_flush_range((unsigned int)current_tx_ptr-> 650 bfin_write_EMAC_SYSCTL(sysctl);
634 packet, 651
635 (unsigned int)(current_tx_ptr-> 652 /* move skb->data to current_tx_ptr payload */
636 packet + skb->len) + 653 data = (u16 *)(skb->data) - 2;
637 2); 654 *data = (u16)(skb->len);
655 current_tx_ptr->desc_a.start_addr = (u32)data;
656 /* this is important! */
657 blackfin_dcache_flush_range(
658 (u32)data,
659 (u32)((u8 *)data + skb->len + 4));
660 } else if (data_align == 0x2) {
661 u16 sysctl = bfin_read_EMAC_SYSCTL();
662 sysctl &= ~TXDWA;
663 bfin_write_EMAC_SYSCTL(sysctl);
664
665 /* move skb->data to current_tx_ptr payload */
666 data = (u16 *)(skb->data) - 1;
667 *data = (u16)(skb->len);
668 current_tx_ptr->desc_a.start_addr = (u32)data;
669 /* this is important! */
670 blackfin_dcache_flush_range(
671 (u32)data,
672 (u32)((u8 *)data + skb->len + 4));
673 } else {
674 u16 sysctl = bfin_read_EMAC_SYSCTL();
675 sysctl &= ~TXDWA;
676 bfin_write_EMAC_SYSCTL(sysctl);
677
678 *((u16 *)(current_tx_ptr->packet)) = (u16)(skb->len);
679 memcpy((u8 *)(current_tx_ptr->packet + 2), skb->data,
680 skb->len);
681 current_tx_ptr->desc_a.start_addr =
682 (u32)current_tx_ptr->packet;
683 if (current_tx_ptr->status.status_word != 0)
684 current_tx_ptr->status.status_word = 0;
685 blackfin_dcache_flush_range(
686 (u32)current_tx_ptr->packet,
687 (u32)(current_tx_ptr->packet + skb->len + 2));
688 }
638 } 689 }
639 690
640 /* enable this packet's dma */ 691 /* enable this packet's dma */
@@ -691,7 +742,6 @@ static void bfin_mac_rx(struct net_device *dev)
691 (unsigned long)skb->tail); 742 (unsigned long)skb->tail);
692 743
693 dev->last_rx = jiffies; 744 dev->last_rx = jiffies;
694 skb->dev = dev;
695 skb->protocol = eth_type_trans(skb, dev); 745 skb->protocol = eth_type_trans(skb, dev);
696#if defined(BFIN_MAC_CSUM_OFFLOAD) 746#if defined(BFIN_MAC_CSUM_OFFLOAD)
697 skb->csum = current_rx_ptr->status.ip_payload_csum; 747 skb->csum = current_rx_ptr->status.ip_payload_csum;
@@ -920,6 +970,7 @@ static int bfin_mac_open(struct net_device *dev)
920 phy_start(lp->phydev); 970 phy_start(lp->phydev);
921 phy_write(lp->phydev, MII_BMCR, BMCR_RESET); 971 phy_write(lp->phydev, MII_BMCR, BMCR_RESET);
922 setup_system_regs(dev); 972 setup_system_regs(dev);
973 setup_mac_addr(dev->dev_addr);
923 bfin_mac_disable(); 974 bfin_mac_disable();
924 bfin_mac_enable(); 975 bfin_mac_enable();
925 pr_debug("hardware init finished\n"); 976 pr_debug("hardware init finished\n");
@@ -955,7 +1006,7 @@ static int bfin_mac_close(struct net_device *dev)
955 return 0; 1006 return 0;
956} 1007}
957 1008
958static int __init bfin_mac_probe(struct platform_device *pdev) 1009static int __devinit bfin_mac_probe(struct platform_device *pdev)
959{ 1010{
960 struct net_device *ndev; 1011 struct net_device *ndev;
961 struct bfin_mac_local *lp; 1012 struct bfin_mac_local *lp;
@@ -1081,7 +1132,7 @@ out_err_probe_mac:
1081 return rc; 1132 return rc;
1082} 1133}
1083 1134
1084static int bfin_mac_remove(struct platform_device *pdev) 1135static int __devexit bfin_mac_remove(struct platform_device *pdev)
1085{ 1136{
1086 struct net_device *ndev = platform_get_drvdata(pdev); 1137 struct net_device *ndev = platform_get_drvdata(pdev);
1087 struct bfin_mac_local *lp = netdev_priv(ndev); 1138 struct bfin_mac_local *lp = netdev_priv(ndev);
@@ -1128,7 +1179,7 @@ static int bfin_mac_resume(struct platform_device *pdev)
1128 1179
1129static struct platform_driver bfin_mac_driver = { 1180static struct platform_driver bfin_mac_driver = {
1130 .probe = bfin_mac_probe, 1181 .probe = bfin_mac_probe,
1131 .remove = bfin_mac_remove, 1182 .remove = __devexit_p(bfin_mac_remove),
1132 .resume = bfin_mac_resume, 1183 .resume = bfin_mac_resume,
1133 .suspend = bfin_mac_suspend, 1184 .suspend = bfin_mac_suspend,
1134 .driver = { 1185 .driver = {
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index af251a5df844..272a4bd25953 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -7202,7 +7202,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7202 bp->link_params.req_flow_ctrl = (bp->port.link_config & 7202 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7203 PORT_FEATURE_FLOW_CONTROL_MASK); 7203 PORT_FEATURE_FLOW_CONTROL_MASK);
7204 if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) && 7204 if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
7205 (!bp->port.supported & SUPPORTED_Autoneg)) 7205 !(bp->port.supported & SUPPORTED_Autoneg))
7206 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE; 7206 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
7207 7207
7208 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x" 7208 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index ebb539e090c3..6106660a4a44 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2107,6 +2107,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
2107 aggregator = __get_first_agg(port); 2107 aggregator = __get_first_agg(port);
2108 ad_agg_selection_logic(aggregator); 2108 ad_agg_selection_logic(aggregator);
2109 } 2109 }
2110 bond_3ad_set_carrier(bond);
2110 } 2111 }
2111 2112
2112 // for each port run the state machines 2113 // for each port run the state machines
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index a641eeaa2a2f..c792138511e6 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2223,272 +2223,217 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
2223 2223
2224/*-------------------------------- Monitoring -------------------------------*/ 2224/*-------------------------------- Monitoring -------------------------------*/
2225 2225
2226/*
2227 * if !have_locks, return nonzero if a failover is necessary. if
2228 * have_locks, do whatever failover activities are needed.
2229 *
2230 * This is to separate the inspection and failover steps for locking
2231 * purposes; failover requires rtnl, but acquiring it for every
2232 * inspection is undesirable, so a wrapper first does inspection, and
2233 * the acquires the necessary locks and calls again to perform
2234 * failover if needed. Since all locks are dropped, a complete
2235 * restart is needed between calls.
2236 */
2237static int __bond_mii_monitor(struct bonding *bond, int have_locks)
2238{
2239 struct slave *slave, *oldcurrent;
2240 int do_failover = 0;
2241 int i;
2242
2243 if (bond->slave_cnt == 0)
2244 goto out;
2245 2226
2246 /* we will try to read the link status of each of our slaves, and 2227static int bond_miimon_inspect(struct bonding *bond)
2247 * set their IFF_RUNNING flag appropriately. For each slave not 2228{
2248 * supporting MII status, we won't do anything so that a user-space 2229 struct slave *slave;
2249 * program could monitor the link itself if needed. 2230 int i, link_state, commit = 0;
2250 */
2251
2252 read_lock(&bond->curr_slave_lock);
2253 oldcurrent = bond->curr_active_slave;
2254 read_unlock(&bond->curr_slave_lock);
2255 2231
2256 bond_for_each_slave(bond, slave, i) { 2232 bond_for_each_slave(bond, slave, i) {
2257 struct net_device *slave_dev = slave->dev; 2233 slave->new_link = BOND_LINK_NOCHANGE;
2258 int link_state;
2259 u16 old_speed = slave->speed;
2260 u8 old_duplex = slave->duplex;
2261 2234
2262 link_state = bond_check_dev_link(bond, slave_dev, 0); 2235 link_state = bond_check_dev_link(bond, slave->dev, 0);
2263 2236
2264 switch (slave->link) { 2237 switch (slave->link) {
2265 case BOND_LINK_UP: /* the link was up */ 2238 case BOND_LINK_UP:
2266 if (link_state == BMSR_LSTATUS) { 2239 if (link_state)
2267 if (!oldcurrent) { 2240 continue;
2268 if (!have_locks)
2269 return 1;
2270 do_failover = 1;
2271 }
2272 break;
2273 } else { /* link going down */
2274 slave->link = BOND_LINK_FAIL;
2275 slave->delay = bond->params.downdelay;
2276
2277 if (slave->link_failure_count < UINT_MAX) {
2278 slave->link_failure_count++;
2279 }
2280 2241
2281 if (bond->params.downdelay) { 2242 slave->link = BOND_LINK_FAIL;
2282 printk(KERN_INFO DRV_NAME 2243 slave->delay = bond->params.downdelay;
2283 ": %s: link status down for %s " 2244 if (slave->delay) {
2284 "interface %s, disabling it in " 2245 printk(KERN_INFO DRV_NAME
2285 "%d ms.\n", 2246 ": %s: link status down for %s"
2286 bond->dev->name, 2247 "interface %s, disabling it in %d ms.\n",
2287 IS_UP(slave_dev) 2248 bond->dev->name,
2288 ? ((bond->params.mode == BOND_MODE_ACTIVEBACKUP) 2249 (bond->params.mode ==
2289 ? ((slave == oldcurrent) 2250 BOND_MODE_ACTIVEBACKUP) ?
2290 ? "active " : "backup ") 2251 ((slave->state == BOND_STATE_ACTIVE) ?
2291 : "") 2252 "active " : "backup ") : "",
2292 : "idle ", 2253 slave->dev->name,
2293 slave_dev->name, 2254 bond->params.downdelay * bond->params.miimon);
2294 bond->params.downdelay * bond->params.miimon);
2295 }
2296 } 2255 }
2297 /* no break ! fall through the BOND_LINK_FAIL test to 2256 /*FALLTHRU*/
2298 ensure proper action to be taken 2257 case BOND_LINK_FAIL:
2299 */ 2258 if (link_state) {
2300 case BOND_LINK_FAIL: /* the link has just gone down */ 2259 /*
2301 if (link_state != BMSR_LSTATUS) { 2260 * recovered before downdelay expired
2302 /* link stays down */ 2261 */
2303 if (slave->delay <= 0) { 2262 slave->link = BOND_LINK_UP;
2304 if (!have_locks)
2305 return 1;
2306
2307 /* link down for too long time */
2308 slave->link = BOND_LINK_DOWN;
2309
2310 /* in active/backup mode, we must
2311 * completely disable this interface
2312 */
2313 if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP) ||
2314 (bond->params.mode == BOND_MODE_8023AD)) {
2315 bond_set_slave_inactive_flags(slave);
2316 }
2317
2318 printk(KERN_INFO DRV_NAME
2319 ": %s: link status definitely "
2320 "down for interface %s, "
2321 "disabling it\n",
2322 bond->dev->name,
2323 slave_dev->name);
2324
2325 /* notify ad that the link status has changed */
2326 if (bond->params.mode == BOND_MODE_8023AD) {
2327 bond_3ad_handle_link_change(slave, BOND_LINK_DOWN);
2328 }
2329
2330 if ((bond->params.mode == BOND_MODE_TLB) ||
2331 (bond->params.mode == BOND_MODE_ALB)) {
2332 bond_alb_handle_link_change(bond, slave, BOND_LINK_DOWN);
2333 }
2334
2335 if (slave == oldcurrent) {
2336 do_failover = 1;
2337 }
2338 } else {
2339 slave->delay--;
2340 }
2341 } else {
2342 /* link up again */
2343 slave->link = BOND_LINK_UP;
2344 slave->jiffies = jiffies; 2263 slave->jiffies = jiffies;
2345 printk(KERN_INFO DRV_NAME 2264 printk(KERN_INFO DRV_NAME
2346 ": %s: link status up again after %d " 2265 ": %s: link status up again after %d "
2347 "ms for interface %s.\n", 2266 "ms for interface %s.\n",
2348 bond->dev->name, 2267 bond->dev->name,
2349 (bond->params.downdelay - slave->delay) * bond->params.miimon, 2268 (bond->params.downdelay - slave->delay) *
2350 slave_dev->name); 2269 bond->params.miimon,
2270 slave->dev->name);
2271 continue;
2351 } 2272 }
2352 break;
2353 case BOND_LINK_DOWN: /* the link was down */
2354 if (link_state != BMSR_LSTATUS) {
2355 /* the link stays down, nothing more to do */
2356 break;
2357 } else { /* link going up */
2358 slave->link = BOND_LINK_BACK;
2359 slave->delay = bond->params.updelay;
2360 2273
2361 if (bond->params.updelay) { 2274 if (slave->delay <= 0) {
2362 /* if updelay == 0, no need to 2275 slave->new_link = BOND_LINK_DOWN;
2363 advertise about a 0 ms delay */ 2276 commit++;
2364 printk(KERN_INFO DRV_NAME 2277 continue;
2365 ": %s: link status up for "
2366 "interface %s, enabling it "
2367 "in %d ms.\n",
2368 bond->dev->name,
2369 slave_dev->name,
2370 bond->params.updelay * bond->params.miimon);
2371 }
2372 } 2278 }
2373 /* no break ! fall through the BOND_LINK_BACK state in
2374 case there's something to do.
2375 */
2376 case BOND_LINK_BACK: /* the link has just come back */
2377 if (link_state != BMSR_LSTATUS) {
2378 /* link down again */
2379 slave->link = BOND_LINK_DOWN;
2380 2279
2280 slave->delay--;
2281 break;
2282
2283 case BOND_LINK_DOWN:
2284 if (!link_state)
2285 continue;
2286
2287 slave->link = BOND_LINK_BACK;
2288 slave->delay = bond->params.updelay;
2289
2290 if (slave->delay) {
2291 printk(KERN_INFO DRV_NAME
2292 ": %s: link status up for "
2293 "interface %s, enabling it in %d ms.\n",
2294 bond->dev->name, slave->dev->name,
2295 bond->params.updelay *
2296 bond->params.miimon);
2297 }
2298 /*FALLTHRU*/
2299 case BOND_LINK_BACK:
2300 if (!link_state) {
2301 slave->link = BOND_LINK_DOWN;
2381 printk(KERN_INFO DRV_NAME 2302 printk(KERN_INFO DRV_NAME
2382 ": %s: link status down again after %d " 2303 ": %s: link status down again after %d "
2383 "ms for interface %s.\n", 2304 "ms for interface %s.\n",
2384 bond->dev->name, 2305 bond->dev->name,
2385 (bond->params.updelay - slave->delay) * bond->params.miimon, 2306 (bond->params.updelay - slave->delay) *
2386 slave_dev->name); 2307 bond->params.miimon,
2387 } else { 2308 slave->dev->name);
2388 /* link stays up */
2389 if (slave->delay == 0) {
2390 if (!have_locks)
2391 return 1;
2392
2393 /* now the link has been up for long time enough */
2394 slave->link = BOND_LINK_UP;
2395 slave->jiffies = jiffies;
2396
2397 if (bond->params.mode == BOND_MODE_8023AD) {
2398 /* prevent it from being the active one */
2399 slave->state = BOND_STATE_BACKUP;
2400 } else if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
2401 /* make it immediately active */
2402 slave->state = BOND_STATE_ACTIVE;
2403 } else if (slave != bond->primary_slave) {
2404 /* prevent it from being the active one */
2405 slave->state = BOND_STATE_BACKUP;
2406 }
2407 2309
2408 printk(KERN_INFO DRV_NAME 2310 continue;
2409 ": %s: link status definitely "
2410 "up for interface %s.\n",
2411 bond->dev->name,
2412 slave_dev->name);
2413
2414 /* notify ad that the link status has changed */
2415 if (bond->params.mode == BOND_MODE_8023AD) {
2416 bond_3ad_handle_link_change(slave, BOND_LINK_UP);
2417 }
2418
2419 if ((bond->params.mode == BOND_MODE_TLB) ||
2420 (bond->params.mode == BOND_MODE_ALB)) {
2421 bond_alb_handle_link_change(bond, slave, BOND_LINK_UP);
2422 }
2423
2424 if ((!oldcurrent) ||
2425 (slave == bond->primary_slave)) {
2426 do_failover = 1;
2427 }
2428 } else {
2429 slave->delay--;
2430 }
2431 } 2311 }
2312
2313 if (slave->delay <= 0) {
2314 slave->new_link = BOND_LINK_UP;
2315 commit++;
2316 continue;
2317 }
2318
2319 slave->delay--;
2432 break; 2320 break;
2433 default: 2321 }
2434 /* Should not happen */ 2322 }
2435 printk(KERN_ERR DRV_NAME
2436 ": %s: Error: %s Illegal value (link=%d)\n",
2437 bond->dev->name,
2438 slave->dev->name,
2439 slave->link);
2440 goto out;
2441 } /* end of switch (slave->link) */
2442 2323
2443 bond_update_speed_duplex(slave); 2324 return commit;
2325}
2444 2326
2445 if (bond->params.mode == BOND_MODE_8023AD) { 2327static void bond_miimon_commit(struct bonding *bond)
2446 if (old_speed != slave->speed) { 2328{
2447 bond_3ad_adapter_speed_changed(slave); 2329 struct slave *slave;
2448 } 2330 int i;
2331
2332 bond_for_each_slave(bond, slave, i) {
2333 switch (slave->new_link) {
2334 case BOND_LINK_NOCHANGE:
2335 continue;
2336
2337 case BOND_LINK_UP:
2338 slave->link = BOND_LINK_UP;
2339 slave->jiffies = jiffies;
2449 2340
2450 if (old_duplex != slave->duplex) { 2341 if (bond->params.mode == BOND_MODE_8023AD) {
2451 bond_3ad_adapter_duplex_changed(slave); 2342 /* prevent it from being the active one */
2343 slave->state = BOND_STATE_BACKUP;
2344 } else if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
2345 /* make it immediately active */
2346 slave->state = BOND_STATE_ACTIVE;
2347 } else if (slave != bond->primary_slave) {
2348 /* prevent it from being the active one */
2349 slave->state = BOND_STATE_BACKUP;
2452 } 2350 }
2453 }
2454 2351
2455 } /* end of for */ 2352 printk(KERN_INFO DRV_NAME
2353 ": %s: link status definitely "
2354 "up for interface %s.\n",
2355 bond->dev->name, slave->dev->name);
2456 2356
2457 if (do_failover) { 2357 /* notify ad that the link status has changed */
2458 ASSERT_RTNL(); 2358 if (bond->params.mode == BOND_MODE_8023AD)
2359 bond_3ad_handle_link_change(slave, BOND_LINK_UP);
2459 2360
2460 write_lock_bh(&bond->curr_slave_lock); 2361 if ((bond->params.mode == BOND_MODE_TLB) ||
2362 (bond->params.mode == BOND_MODE_ALB))
2363 bond_alb_handle_link_change(bond, slave,
2364 BOND_LINK_UP);
2461 2365
2462 bond_select_active_slave(bond); 2366 if (!bond->curr_active_slave ||
2367 (slave == bond->primary_slave))
2368 goto do_failover;
2463 2369
2464 write_unlock_bh(&bond->curr_slave_lock); 2370 continue;
2465 2371
2466 } else 2372 case BOND_LINK_DOWN:
2467 bond_set_carrier(bond); 2373 slave->link = BOND_LINK_DOWN;
2468 2374
2469out: 2375 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP ||
2470 return 0; 2376 bond->params.mode == BOND_MODE_8023AD)
2377 bond_set_slave_inactive_flags(slave);
2378
2379 printk(KERN_INFO DRV_NAME
2380 ": %s: link status definitely down for "
2381 "interface %s, disabling it\n",
2382 bond->dev->name, slave->dev->name);
2383
2384 if (bond->params.mode == BOND_MODE_8023AD)
2385 bond_3ad_handle_link_change(slave,
2386 BOND_LINK_DOWN);
2387
2388 if (bond->params.mode == BOND_MODE_TLB ||
2389 bond->params.mode == BOND_MODE_ALB)
2390 bond_alb_handle_link_change(bond, slave,
2391 BOND_LINK_DOWN);
2392
2393 if (slave == bond->curr_active_slave)
2394 goto do_failover;
2395
2396 continue;
2397
2398 default:
2399 printk(KERN_ERR DRV_NAME
2400 ": %s: invalid new link %d on slave %s\n",
2401 bond->dev->name, slave->new_link,
2402 slave->dev->name);
2403 slave->new_link = BOND_LINK_NOCHANGE;
2404
2405 continue;
2406 }
2407
2408do_failover:
2409 ASSERT_RTNL();
2410 write_lock_bh(&bond->curr_slave_lock);
2411 bond_select_active_slave(bond);
2412 write_unlock_bh(&bond->curr_slave_lock);
2413 }
2414
2415 bond_set_carrier(bond);
2471} 2416}
2472 2417
2473/* 2418/*
2474 * bond_mii_monitor 2419 * bond_mii_monitor
2475 * 2420 *
2476 * Really a wrapper that splits the mii monitor into two phases: an 2421 * Really a wrapper that splits the mii monitor into two phases: an
2477 * inspection, then (if inspection indicates something needs to be 2422 * inspection, then (if inspection indicates something needs to be done)
2478 * done) an acquisition of appropriate locks followed by another pass 2423 * an acquisition of appropriate locks followed by a commit phase to
2479 * to implement whatever link state changes are indicated. 2424 * implement whatever link state changes are indicated.
2480 */ 2425 */
2481void bond_mii_monitor(struct work_struct *work) 2426void bond_mii_monitor(struct work_struct *work)
2482{ 2427{
2483 struct bonding *bond = container_of(work, struct bonding, 2428 struct bonding *bond = container_of(work, struct bonding,
2484 mii_work.work); 2429 mii_work.work);
2485 unsigned long delay;
2486 2430
2487 read_lock(&bond->lock); 2431 read_lock(&bond->lock);
2488 if (bond->kill_timers) { 2432 if (bond->kill_timers)
2489 read_unlock(&bond->lock); 2433 goto out;
2490 return; 2434
2491 } 2435 if (bond->slave_cnt == 0)
2436 goto re_arm;
2492 2437
2493 if (bond->send_grat_arp) { 2438 if (bond->send_grat_arp) {
2494 read_lock(&bond->curr_slave_lock); 2439 read_lock(&bond->curr_slave_lock);
@@ -2496,19 +2441,24 @@ void bond_mii_monitor(struct work_struct *work)
2496 read_unlock(&bond->curr_slave_lock); 2441 read_unlock(&bond->curr_slave_lock);
2497 } 2442 }
2498 2443
2499 if (__bond_mii_monitor(bond, 0)) { 2444 if (bond_miimon_inspect(bond)) {
2500 read_unlock(&bond->lock); 2445 read_unlock(&bond->lock);
2501 rtnl_lock(); 2446 rtnl_lock();
2502 read_lock(&bond->lock); 2447 read_lock(&bond->lock);
2503 __bond_mii_monitor(bond, 1); 2448
2449 bond_miimon_commit(bond);
2450
2504 read_unlock(&bond->lock); 2451 read_unlock(&bond->lock);
2505 rtnl_unlock(); /* might sleep, hold no other locks */ 2452 rtnl_unlock(); /* might sleep, hold no other locks */
2506 read_lock(&bond->lock); 2453 read_lock(&bond->lock);
2507 } 2454 }
2508 2455
2509 delay = msecs_to_jiffies(bond->params.miimon); 2456re_arm:
2457 if (bond->params.miimon)
2458 queue_delayed_work(bond->wq, &bond->mii_work,
2459 msecs_to_jiffies(bond->params.miimon));
2460out:
2510 read_unlock(&bond->lock); 2461 read_unlock(&bond->lock);
2511 queue_delayed_work(bond->wq, &bond->mii_work, delay);
2512} 2462}
2513 2463
2514static __be32 bond_glean_dev_ip(struct net_device *dev) 2464static __be32 bond_glean_dev_ip(struct net_device *dev)
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 6caac0ffb2f2..3bdb47382521 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -350,9 +350,6 @@ static ssize_t bonding_store_slaves(struct device *d,
350 if (dev) { 350 if (dev) {
351 printk(KERN_INFO DRV_NAME ": %s: Removing slave %s\n", 351 printk(KERN_INFO DRV_NAME ": %s: Removing slave %s\n",
352 bond->dev->name, dev->name); 352 bond->dev->name, dev->name);
353 if (bond->setup_by_slave)
354 res = bond_release_and_destroy(bond->dev, dev);
355 else
356 res = bond_release(bond->dev, dev); 353 res = bond_release(bond->dev, dev);
357 if (res) { 354 if (res) {
358 ret = res; 355 ret = res;
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index fba87abe78ee..ea6144a9565e 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -189,7 +189,7 @@ static unsigned int netcard_portlist[] __used __initdata = {IXDP2X01_CS8900_VIRT
189static unsigned int cs8900_irq_map[] = {IRQ_IXDP2X01_CS8900, 0, 0, 0}; 189static unsigned int cs8900_irq_map[] = {IRQ_IXDP2X01_CS8900, 0, 0, 0};
190#elif defined(CONFIG_ARCH_PNX010X) 190#elif defined(CONFIG_ARCH_PNX010X)
191#include <asm/irq.h> 191#include <asm/irq.h>
192#include <asm/arch/gpio.h> 192#include <mach/gpio.h>
193#define CIRRUS_DEFAULT_BASE IO_ADDRESS(EXT_STATIC2_s0_BASE + 0x200000) /* = Physical address 0x48200000 */ 193#define CIRRUS_DEFAULT_BASE IO_ADDRESS(EXT_STATIC2_s0_BASE + 0x200000) /* = Physical address 0x48200000 */
194#define CIRRUS_DEFAULT_IRQ VH_INTC_INT_NUM_CASCADED_INTERRUPT_1 /* Event inputs bank 1 - ID 35/bit 3 */ 194#define CIRRUS_DEFAULT_IRQ VH_INTC_INT_NUM_CASCADED_INTERRUPT_1 /* Event inputs bank 1 - ID 35/bit 3 */
195static unsigned int netcard_portlist[] __used __initdata = {CIRRUS_DEFAULT_BASE, 0}; 195static unsigned int netcard_portlist[] __used __initdata = {CIRRUS_DEFAULT_BASE, 0};
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index 47d51788a462..04c0e90119af 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -683,7 +683,7 @@ enum {
683 SF_ERASE_SECTOR = 0xd8, /* erase sector */ 683 SF_ERASE_SECTOR = 0xd8, /* erase sector */
684 684
685 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */ 685 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
686 FW_VERS_ADDR = 0x77ffc, /* flash address holding FW version */ 686 FW_VERS_ADDR = 0x7fffc, /* flash address holding FW version */
687 FW_MIN_SIZE = 8 /* at least version and csum */ 687 FW_MIN_SIZE = 8 /* at least version and csum */
688}; 688};
689 689
diff --git a/drivers/net/de620.c b/drivers/net/de620.c
index 3f5190c654cf..d454e143483e 100644
--- a/drivers/net/de620.c
+++ b/drivers/net/de620.c
@@ -488,13 +488,6 @@ static void de620_set_multicast_list(struct net_device *dev)
488{ 488{
489 if (dev->mc_count || dev->flags&(IFF_ALLMULTI|IFF_PROMISC)) 489 if (dev->mc_count || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
490 { /* Enable promiscuous mode */ 490 { /* Enable promiscuous mode */
491 /*
492 * We must make the kernel realise we had to move
493 * into promisc mode or we start all out war on
494 * the cable. - AC
495 */
496 dev->flags|=IFF_PROMISC;
497
498 de620_set_register(dev, W_TCR, (TCR_DEF & ~RXPBM) | RXALL); 491 de620_set_register(dev, W_TCR, (TCR_DEF & ~RXPBM) | RXALL);
499 } 492 }
500 else 493 else
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 0b0f1c407a7e..f42c23f42652 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -1374,6 +1374,11 @@ dm9000_probe(struct platform_device *pdev)
1374 for (i = 0; i < 6; i += 2) 1374 for (i = 0; i < 6; i += 2)
1375 dm9000_read_eeprom(db, i / 2, ndev->dev_addr+i); 1375 dm9000_read_eeprom(db, i / 2, ndev->dev_addr+i);
1376 1376
1377 if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) {
1378 mac_src = "platform data";
1379 memcpy(ndev->dev_addr, pdata->dev_addr, 6);
1380 }
1381
1377 if (!is_valid_ether_addr(ndev->dev_addr)) { 1382 if (!is_valid_ether_addr(ndev->dev_addr)) {
1378 /* try reading from mac */ 1383 /* try reading from mac */
1379 1384
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 4a4f62e002b2..cf57050d99d8 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -41,24 +41,25 @@
41 41
42struct e1000_info; 42struct e1000_info;
43 43
44#define ndev_printk(level, netdev, format, arg...) \ 44#define e_printk(level, adapter, format, arg...) \
45 printk(level "%s: " format, (netdev)->name, ## arg) 45 printk(level "%s: %s: " format, pci_name(adapter->pdev), \
46 adapter->netdev->name, ## arg)
46 47
47#ifdef DEBUG 48#ifdef DEBUG
48#define ndev_dbg(netdev, format, arg...) \ 49#define e_dbg(format, arg...) \
49 ndev_printk(KERN_DEBUG , netdev, format, ## arg) 50 e_printk(KERN_DEBUG , adapter, format, ## arg)
50#else 51#else
51#define ndev_dbg(netdev, format, arg...) do { (void)(netdev); } while (0) 52#define e_dbg(format, arg...) do { (void)(adapter); } while (0)
52#endif 53#endif
53 54
54#define ndev_err(netdev, format, arg...) \ 55#define e_err(format, arg...) \
55 ndev_printk(KERN_ERR , netdev, format, ## arg) 56 e_printk(KERN_ERR, adapter, format, ## arg)
56#define ndev_info(netdev, format, arg...) \ 57#define e_info(format, arg...) \
57 ndev_printk(KERN_INFO , netdev, format, ## arg) 58 e_printk(KERN_INFO, adapter, format, ## arg)
58#define ndev_warn(netdev, format, arg...) \ 59#define e_warn(format, arg...) \
59 ndev_printk(KERN_WARNING , netdev, format, ## arg) 60 e_printk(KERN_WARNING, adapter, format, ## arg)
60#define ndev_notice(netdev, format, arg...) \ 61#define e_notice(format, arg...) \
61 ndev_printk(KERN_NOTICE , netdev, format, ## arg) 62 e_printk(KERN_NOTICE, adapter, format, ## arg)
62 63
63 64
64/* Tx/Rx descriptor defines */ 65/* Tx/Rx descriptor defines */
@@ -283,10 +284,6 @@ struct e1000_adapter {
283 unsigned long led_status; 284 unsigned long led_status;
284 285
285 unsigned int flags; 286 unsigned int flags;
286
287 /* for ioport free */
288 int bars;
289 int need_ioport;
290}; 287};
291 288
292struct e1000_info { 289struct e1000_info {
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index 9350564065e7..cf9679f2b7c4 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -189,8 +189,7 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
189 /* Fiber NICs only allow 1000 gbps Full duplex */ 189 /* Fiber NICs only allow 1000 gbps Full duplex */
190 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) && 190 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) &&
191 spddplx != (SPEED_1000 + DUPLEX_FULL)) { 191 spddplx != (SPEED_1000 + DUPLEX_FULL)) {
192 ndev_err(adapter->netdev, "Unsupported Speed/Duplex " 192 e_err("Unsupported Speed/Duplex configuration\n");
193 "configuration\n");
194 return -EINVAL; 193 return -EINVAL;
195 } 194 }
196 195
@@ -213,8 +212,7 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
213 break; 212 break;
214 case SPEED_1000 + DUPLEX_HALF: /* not supported */ 213 case SPEED_1000 + DUPLEX_HALF: /* not supported */
215 default: 214 default:
216 ndev_err(adapter->netdev, "Unsupported Speed/Duplex " 215 e_err("Unsupported Speed/Duplex configuration\n");
217 "configuration\n");
218 return -EINVAL; 216 return -EINVAL;
219 } 217 }
220 return 0; 218 return 0;
@@ -231,8 +229,8 @@ static int e1000_set_settings(struct net_device *netdev,
231 * cannot be changed 229 * cannot be changed
232 */ 230 */
233 if (e1000_check_reset_block(hw)) { 231 if (e1000_check_reset_block(hw)) {
234 ndev_err(netdev, "Cannot change link " 232 e_err("Cannot change link characteristics when SoL/IDER is "
235 "characteristics when SoL/IDER is active.\n"); 233 "active.\n");
236 return -EINVAL; 234 return -EINVAL;
237 } 235 }
238 236
@@ -380,8 +378,7 @@ static int e1000_set_tso(struct net_device *netdev, u32 data)
380 netdev->features &= ~NETIF_F_TSO6; 378 netdev->features &= ~NETIF_F_TSO6;
381 } 379 }
382 380
383 ndev_info(netdev, "TSO is %s\n", 381 e_info("TSO is %s\n", data ? "Enabled" : "Disabled");
384 data ? "Enabled" : "Disabled");
385 adapter->flags |= FLAG_TSO_FORCE; 382 adapter->flags |= FLAG_TSO_FORCE;
386 return 0; 383 return 0;
387} 384}
@@ -722,10 +719,9 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
722 (test[pat] & write)); 719 (test[pat] & write));
723 val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset); 720 val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset);
724 if (val != (test[pat] & write & mask)) { 721 if (val != (test[pat] & write & mask)) {
725 ndev_err(adapter->netdev, "pattern test reg %04X " 722 e_err("pattern test reg %04X failed: got 0x%08X "
726 "failed: got 0x%08X expected 0x%08X\n", 723 "expected 0x%08X\n", reg + offset, val,
727 reg + offset, 724 (test[pat] & write & mask));
728 val, (test[pat] & write & mask));
729 *data = reg; 725 *data = reg;
730 return 1; 726 return 1;
731 } 727 }
@@ -740,9 +736,8 @@ static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
740 __ew32(&adapter->hw, reg, write & mask); 736 __ew32(&adapter->hw, reg, write & mask);
741 val = __er32(&adapter->hw, reg); 737 val = __er32(&adapter->hw, reg);
742 if ((write & mask) != (val & mask)) { 738 if ((write & mask) != (val & mask)) {
743 ndev_err(adapter->netdev, "set/check reg %04X test failed: " 739 e_err("set/check reg %04X test failed: got 0x%08X "
744 "got 0x%08X expected 0x%08X\n", reg, (val & mask), 740 "expected 0x%08X\n", reg, (val & mask), (write & mask));
745 (write & mask));
746 *data = reg; 741 *data = reg;
747 return 1; 742 return 1;
748 } 743 }
@@ -766,7 +761,6 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
766{ 761{
767 struct e1000_hw *hw = &adapter->hw; 762 struct e1000_hw *hw = &adapter->hw;
768 struct e1000_mac_info *mac = &adapter->hw.mac; 763 struct e1000_mac_info *mac = &adapter->hw.mac;
769 struct net_device *netdev = adapter->netdev;
770 u32 value; 764 u32 value;
771 u32 before; 765 u32 before;
772 u32 after; 766 u32 after;
@@ -799,8 +793,8 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
799 ew32(STATUS, toggle); 793 ew32(STATUS, toggle);
800 after = er32(STATUS) & toggle; 794 after = er32(STATUS) & toggle;
801 if (value != after) { 795 if (value != after) {
802 ndev_err(netdev, "failed STATUS register test got: " 796 e_err("failed STATUS register test got: 0x%08X expected: "
803 "0x%08X expected: 0x%08X\n", after, value); 797 "0x%08X\n", after, value);
804 *data = 1; 798 *data = 1;
805 return 1; 799 return 1;
806 } 800 }
@@ -903,8 +897,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
903 *data = 1; 897 *data = 1;
904 return -1; 898 return -1;
905 } 899 }
906 ndev_info(netdev, "testing %s interrupt\n", 900 e_info("testing %s interrupt\n", (shared_int ? "shared" : "unshared"));
907 (shared_int ? "shared" : "unshared"));
908 901
909 /* Disable all the interrupts */ 902 /* Disable all the interrupts */
910 ew32(IMC, 0xFFFFFFFF); 903 ew32(IMC, 0xFFFFFFFF);
@@ -1526,8 +1519,7 @@ static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
1526 * sessions are active 1519 * sessions are active
1527 */ 1520 */
1528 if (e1000_check_reset_block(&adapter->hw)) { 1521 if (e1000_check_reset_block(&adapter->hw)) {
1529 ndev_err(adapter->netdev, "Cannot do PHY loopback test " 1522 e_err("Cannot do PHY loopback test when SoL/IDER is active.\n");
1530 "when SoL/IDER is active.\n");
1531 *data = 0; 1523 *data = 0;
1532 goto out; 1524 goto out;
1533 } 1525 }
@@ -1612,7 +1604,7 @@ static void e1000_diag_test(struct net_device *netdev,
1612 forced_speed_duplex = adapter->hw.mac.forced_speed_duplex; 1604 forced_speed_duplex = adapter->hw.mac.forced_speed_duplex;
1613 autoneg = adapter->hw.mac.autoneg; 1605 autoneg = adapter->hw.mac.autoneg;
1614 1606
1615 ndev_info(netdev, "offline testing starting\n"); 1607 e_info("offline testing starting\n");
1616 1608
1617 /* 1609 /*
1618 * Link test performed before hardware reset so autoneg doesn't 1610 * Link test performed before hardware reset so autoneg doesn't
@@ -1658,7 +1650,7 @@ static void e1000_diag_test(struct net_device *netdev,
1658 if (if_running) 1650 if (if_running)
1659 dev_open(netdev); 1651 dev_open(netdev);
1660 } else { 1652 } else {
1661 ndev_info(netdev, "online testing starting\n"); 1653 e_info("online testing starting\n");
1662 /* Online tests */ 1654 /* Online tests */
1663 if (e1000_link_test(adapter, &data[4])) 1655 if (e1000_link_test(adapter, &data[4]))
1664 eth_test->flags |= ETH_TEST_FL_FAILED; 1656 eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1694,8 +1686,8 @@ static void e1000_get_wol(struct net_device *netdev,
1694 wol->supported &= ~WAKE_UCAST; 1686 wol->supported &= ~WAKE_UCAST;
1695 1687
1696 if (adapter->wol & E1000_WUFC_EX) 1688 if (adapter->wol & E1000_WUFC_EX)
1697 ndev_err(netdev, "Interface does not support " 1689 e_err("Interface does not support directed (unicast) "
1698 "directed (unicast) frame wake-up packets\n"); 1690 "frame wake-up packets\n");
1699 } 1691 }
1700 1692
1701 if (adapter->wol & E1000_WUFC_EX) 1693 if (adapter->wol & E1000_WUFC_EX)
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index d13677899767..05b0b2f9c54b 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -484,8 +484,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
484 * packet, also make sure the frame isn't just CRC only */ 484 * packet, also make sure the frame isn't just CRC only */
485 if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) { 485 if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) {
486 /* All receives must fit into a single buffer */ 486 /* All receives must fit into a single buffer */
487 ndev_dbg(netdev, "%s: Receive packet consumed " 487 e_dbg("%s: Receive packet consumed multiple buffers\n",
488 "multiple buffers\n", netdev->name); 488 netdev->name);
489 /* recycle */ 489 /* recycle */
490 buffer_info->skb = skb; 490 buffer_info->skb = skb;
491 goto next_desc; 491 goto next_desc;
@@ -576,28 +576,26 @@ static void e1000_print_tx_hang(struct e1000_adapter *adapter)
576 unsigned int i = tx_ring->next_to_clean; 576 unsigned int i = tx_ring->next_to_clean;
577 unsigned int eop = tx_ring->buffer_info[i].next_to_watch; 577 unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
578 struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop); 578 struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);
579 struct net_device *netdev = adapter->netdev;
580 579
581 /* detected Tx unit hang */ 580 /* detected Tx unit hang */
582 ndev_err(netdev, 581 e_err("Detected Tx Unit Hang:\n"
583 "Detected Tx Unit Hang:\n" 582 " TDH <%x>\n"
584 " TDH <%x>\n" 583 " TDT <%x>\n"
585 " TDT <%x>\n" 584 " next_to_use <%x>\n"
586 " next_to_use <%x>\n" 585 " next_to_clean <%x>\n"
587 " next_to_clean <%x>\n" 586 "buffer_info[next_to_clean]:\n"
588 "buffer_info[next_to_clean]:\n" 587 " time_stamp <%lx>\n"
589 " time_stamp <%lx>\n" 588 " next_to_watch <%x>\n"
590 " next_to_watch <%x>\n" 589 " jiffies <%lx>\n"
591 " jiffies <%lx>\n" 590 " next_to_watch.status <%x>\n",
592 " next_to_watch.status <%x>\n", 591 readl(adapter->hw.hw_addr + tx_ring->head),
593 readl(adapter->hw.hw_addr + tx_ring->head), 592 readl(adapter->hw.hw_addr + tx_ring->tail),
594 readl(adapter->hw.hw_addr + tx_ring->tail), 593 tx_ring->next_to_use,
595 tx_ring->next_to_use, 594 tx_ring->next_to_clean,
596 tx_ring->next_to_clean, 595 tx_ring->buffer_info[eop].time_stamp,
597 tx_ring->buffer_info[eop].time_stamp, 596 eop,
598 eop, 597 jiffies,
599 jiffies, 598 eop_desc->upper.fields.status);
600 eop_desc->upper.fields.status);
601} 599}
602 600
603/** 601/**
@@ -747,8 +745,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
747 buffer_info->dma = 0; 745 buffer_info->dma = 0;
748 746
749 if (!(staterr & E1000_RXD_STAT_EOP)) { 747 if (!(staterr & E1000_RXD_STAT_EOP)) {
750 ndev_dbg(netdev, "%s: Packet Split buffers didn't pick " 748 e_dbg("%s: Packet Split buffers didn't pick up the "
751 "up the full packet\n", netdev->name); 749 "full packet\n", netdev->name);
752 dev_kfree_skb_irq(skb); 750 dev_kfree_skb_irq(skb);
753 goto next_desc; 751 goto next_desc;
754 } 752 }
@@ -761,8 +759,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
761 length = le16_to_cpu(rx_desc->wb.middle.length0); 759 length = le16_to_cpu(rx_desc->wb.middle.length0);
762 760
763 if (!length) { 761 if (!length) {
764 ndev_dbg(netdev, "%s: Last part of the packet spanning" 762 e_dbg("%s: Last part of the packet spanning multiple "
765 " multiple descriptors\n", netdev->name); 763 "descriptors\n", netdev->name);
766 dev_kfree_skb_irq(skb); 764 dev_kfree_skb_irq(skb);
767 goto next_desc; 765 goto next_desc;
768 } 766 }
@@ -1011,7 +1009,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
1011 1009
1012 /* eth type trans needs skb->data to point to something */ 1010 /* eth type trans needs skb->data to point to something */
1013 if (!pskb_may_pull(skb, ETH_HLEN)) { 1011 if (!pskb_may_pull(skb, ETH_HLEN)) {
1014 ndev_err(netdev, "pskb_may_pull failed.\n"); 1012 e_err("pskb_may_pull failed.\n");
1015 dev_kfree_skb(skb); 1013 dev_kfree_skb(skb);
1016 goto next_desc; 1014 goto next_desc;
1017 } 1015 }
@@ -1251,10 +1249,8 @@ static int e1000_request_irq(struct e1000_adapter *adapter)
1251 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, 1249 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
1252 netdev); 1250 netdev);
1253 if (err) { 1251 if (err) {
1254 ndev_err(netdev, 1252 e_err("Unable to allocate %s interrupt (return: %d)\n",
1255 "Unable to allocate %s interrupt (return: %d)\n", 1253 adapter->flags & FLAG_MSI_ENABLED ? "MSI":"INTx", err);
1256 adapter->flags & FLAG_MSI_ENABLED ? "MSI":"INTx",
1257 err);
1258 if (adapter->flags & FLAG_MSI_ENABLED) 1254 if (adapter->flags & FLAG_MSI_ENABLED)
1259 pci_disable_msi(adapter->pdev); 1255 pci_disable_msi(adapter->pdev);
1260 } 1256 }
@@ -1395,8 +1391,7 @@ int e1000e_setup_tx_resources(struct e1000_adapter *adapter)
1395 return 0; 1391 return 0;
1396err: 1392err:
1397 vfree(tx_ring->buffer_info); 1393 vfree(tx_ring->buffer_info);
1398 ndev_err(adapter->netdev, 1394 e_err("Unable to allocate memory for the transmit descriptor ring\n");
1399 "Unable to allocate memory for the transmit descriptor ring\n");
1400 return err; 1395 return err;
1401} 1396}
1402 1397
@@ -1450,8 +1445,7 @@ err_pages:
1450 } 1445 }
1451err: 1446err:
1452 vfree(rx_ring->buffer_info); 1447 vfree(rx_ring->buffer_info);
1453 ndev_err(adapter->netdev, 1448 e_err("Unable to allocate memory for the transmit descriptor ring\n");
1454 "Unable to allocate memory for the transmit descriptor ring\n");
1455 return err; 1449 return err;
1456} 1450}
1457 1451
@@ -2450,13 +2444,13 @@ void e1000e_reset(struct e1000_adapter *adapter)
2450 * For parts with AMT enabled, let the firmware know 2444 * For parts with AMT enabled, let the firmware know
2451 * that the network interface is in control 2445 * that the network interface is in control
2452 */ 2446 */
2453 if ((adapter->flags & FLAG_HAS_AMT) && e1000e_check_mng_mode(hw)) 2447 if (adapter->flags & FLAG_HAS_AMT)
2454 e1000_get_hw_control(adapter); 2448 e1000_get_hw_control(adapter);
2455 2449
2456 ew32(WUC, 0); 2450 ew32(WUC, 0);
2457 2451
2458 if (mac->ops.init_hw(hw)) 2452 if (mac->ops.init_hw(hw))
2459 ndev_err(adapter->netdev, "Hardware Error\n"); 2453 e_err("Hardware Error\n");
2460 2454
2461 e1000_update_mng_vlan(adapter); 2455 e1000_update_mng_vlan(adapter);
2462 2456
@@ -2591,7 +2585,7 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
2591 return 0; 2585 return 0;
2592 2586
2593err: 2587err:
2594 ndev_err(netdev, "Unable to allocate memory for queues\n"); 2588 e_err("Unable to allocate memory for queues\n");
2595 kfree(adapter->rx_ring); 2589 kfree(adapter->rx_ring);
2596 kfree(adapter->tx_ring); 2590 kfree(adapter->tx_ring);
2597 return -ENOMEM; 2591 return -ENOMEM;
@@ -2640,8 +2634,7 @@ static int e1000_open(struct net_device *netdev)
2640 * If AMT is enabled, let the firmware know that the network 2634 * If AMT is enabled, let the firmware know that the network
2641 * interface is now open 2635 * interface is now open
2642 */ 2636 */
2643 if ((adapter->flags & FLAG_HAS_AMT) && 2637 if (adapter->flags & FLAG_HAS_AMT)
2644 e1000e_check_mng_mode(&adapter->hw))
2645 e1000_get_hw_control(adapter); 2638 e1000_get_hw_control(adapter);
2646 2639
2647 /* 2640 /*
@@ -2719,8 +2712,7 @@ static int e1000_close(struct net_device *netdev)
2719 * If AMT is enabled, let the firmware know that the network 2712 * If AMT is enabled, let the firmware know that the network
2720 * interface is now closed 2713 * interface is now closed
2721 */ 2714 */
2722 if ((adapter->flags & FLAG_HAS_AMT) && 2715 if (adapter->flags & FLAG_HAS_AMT)
2723 e1000e_check_mng_mode(&adapter->hw))
2724 e1000_release_hw_control(adapter); 2716 e1000_release_hw_control(adapter);
2725 2717
2726 return 0; 2718 return 0;
@@ -2917,8 +2909,7 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
2917 ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000); 2909 ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000);
2918 ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus); 2910 ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus);
2919 if (ret_val) 2911 if (ret_val)
2920 ndev_warn(adapter->netdev, 2912 e_warn("Error reading PHY register\n");
2921 "Error reading PHY register\n");
2922 } else { 2913 } else {
2923 /* 2914 /*
2924 * Do not read PHY registers if link is not up 2915 * Do not read PHY registers if link is not up
@@ -2943,18 +2934,16 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
2943static void e1000_print_link_info(struct e1000_adapter *adapter) 2934static void e1000_print_link_info(struct e1000_adapter *adapter)
2944{ 2935{
2945 struct e1000_hw *hw = &adapter->hw; 2936 struct e1000_hw *hw = &adapter->hw;
2946 struct net_device *netdev = adapter->netdev;
2947 u32 ctrl = er32(CTRL); 2937 u32 ctrl = er32(CTRL);
2948 2938
2949 ndev_info(netdev, 2939 e_info("Link is Up %d Mbps %s, Flow Control: %s\n",
2950 "Link is Up %d Mbps %s, Flow Control: %s\n", 2940 adapter->link_speed,
2951 adapter->link_speed, 2941 (adapter->link_duplex == FULL_DUPLEX) ?
2952 (adapter->link_duplex == FULL_DUPLEX) ? 2942 "Full Duplex" : "Half Duplex",
2953 "Full Duplex" : "Half Duplex", 2943 ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ?
2954 ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ? 2944 "RX/TX" :
2955 "RX/TX" : 2945 ((ctrl & E1000_CTRL_RFCE) ? "RX" :
2956 ((ctrl & E1000_CTRL_RFCE) ? "RX" : 2946 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" )));
2957 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" )));
2958} 2947}
2959 2948
2960static bool e1000_has_link(struct e1000_adapter *adapter) 2949static bool e1000_has_link(struct e1000_adapter *adapter)
@@ -2994,8 +2983,7 @@ static bool e1000_has_link(struct e1000_adapter *adapter)
2994 if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) && 2983 if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
2995 (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) { 2984 (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
2996 /* See e1000_kmrn_lock_loss_workaround_ich8lan() */ 2985 /* See e1000_kmrn_lock_loss_workaround_ich8lan() */
2997 ndev_info(adapter->netdev, 2986 e_info("Gigabit has been disabled, downgrading speed\n");
2998 "Gigabit has been disabled, downgrading speed\n");
2999 } 2987 }
3000 2988
3001 return link_active; 2989 return link_active;
@@ -3096,8 +3084,7 @@ static void e1000_watchdog_task(struct work_struct *work)
3096 switch (adapter->link_speed) { 3084 switch (adapter->link_speed) {
3097 case SPEED_10: 3085 case SPEED_10:
3098 case SPEED_100: 3086 case SPEED_100:
3099 ndev_info(netdev, 3087 e_info("10/100 speed: disabling TSO\n");
3100 "10/100 speed: disabling TSO\n");
3101 netdev->features &= ~NETIF_F_TSO; 3088 netdev->features &= ~NETIF_F_TSO;
3102 netdev->features &= ~NETIF_F_TSO6; 3089 netdev->features &= ~NETIF_F_TSO6;
3103 break; 3090 break;
@@ -3130,7 +3117,7 @@ static void e1000_watchdog_task(struct work_struct *work)
3130 if (netif_carrier_ok(netdev)) { 3117 if (netif_carrier_ok(netdev)) {
3131 adapter->link_speed = 0; 3118 adapter->link_speed = 0;
3132 adapter->link_duplex = 0; 3119 adapter->link_duplex = 0;
3133 ndev_info(netdev, "Link is Down\n"); 3120 e_info("Link is Down\n");
3134 netif_carrier_off(netdev); 3121 netif_carrier_off(netdev);
3135 netif_tx_stop_all_queues(netdev); 3122 netif_tx_stop_all_queues(netdev);
3136 if (!test_bit(__E1000_DOWN, &adapter->state)) 3123 if (!test_bit(__E1000_DOWN, &adapter->state))
@@ -3604,8 +3591,7 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3604 3591
3605 pull_size = min((unsigned int)4, skb->data_len); 3592 pull_size = min((unsigned int)4, skb->data_len);
3606 if (!__pskb_pull_tail(skb, pull_size)) { 3593 if (!__pskb_pull_tail(skb, pull_size)) {
3607 ndev_err(netdev, 3594 e_err("__pskb_pull_tail failed.\n");
3608 "__pskb_pull_tail failed.\n");
3609 dev_kfree_skb_any(skb); 3595 dev_kfree_skb_any(skb);
3610 return NETDEV_TX_OK; 3596 return NETDEV_TX_OK;
3611 } 3597 }
@@ -3737,25 +3723,25 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3737 3723
3738 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || 3724 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
3739 (max_frame > MAX_JUMBO_FRAME_SIZE)) { 3725 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3740 ndev_err(netdev, "Invalid MTU setting\n"); 3726 e_err("Invalid MTU setting\n");
3741 return -EINVAL; 3727 return -EINVAL;
3742 } 3728 }
3743 3729
3744 /* Jumbo frame size limits */ 3730 /* Jumbo frame size limits */
3745 if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) { 3731 if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) {
3746 if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) { 3732 if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
3747 ndev_err(netdev, "Jumbo Frames not supported.\n"); 3733 e_err("Jumbo Frames not supported.\n");
3748 return -EINVAL; 3734 return -EINVAL;
3749 } 3735 }
3750 if (adapter->hw.phy.type == e1000_phy_ife) { 3736 if (adapter->hw.phy.type == e1000_phy_ife) {
3751 ndev_err(netdev, "Jumbo Frames not supported.\n"); 3737 e_err("Jumbo Frames not supported.\n");
3752 return -EINVAL; 3738 return -EINVAL;
3753 } 3739 }
3754 } 3740 }
3755 3741
3756#define MAX_STD_JUMBO_FRAME_SIZE 9234 3742#define MAX_STD_JUMBO_FRAME_SIZE 9234
3757 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { 3743 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
3758 ndev_err(netdev, "MTU > 9216 not supported.\n"); 3744 e_err("MTU > 9216 not supported.\n");
3759 return -EINVAL; 3745 return -EINVAL;
3760 } 3746 }
3761 3747
@@ -3792,8 +3778,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3792 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN 3778 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
3793 + ETH_FCS_LEN; 3779 + ETH_FCS_LEN;
3794 3780
3795 ndev_info(netdev, "changing MTU from %d to %d\n", 3781 e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
3796 netdev->mtu, new_mtu);
3797 netdev->mtu = new_mtu; 3782 netdev->mtu = new_mtu;
3798 3783
3799 if (netif_running(netdev)) 3784 if (netif_running(netdev))
@@ -4006,10 +3991,7 @@ static int e1000_resume(struct pci_dev *pdev)
4006 pci_restore_state(pdev); 3991 pci_restore_state(pdev);
4007 e1000e_disable_l1aspm(pdev); 3992 e1000e_disable_l1aspm(pdev);
4008 3993
4009 if (adapter->need_ioport) 3994 err = pci_enable_device_mem(pdev);
4010 err = pci_enable_device(pdev);
4011 else
4012 err = pci_enable_device_mem(pdev);
4013 if (err) { 3995 if (err) {
4014 dev_err(&pdev->dev, 3996 dev_err(&pdev->dev,
4015 "Cannot enable PCI device from suspend\n"); 3997 "Cannot enable PCI device from suspend\n");
@@ -4043,7 +4025,7 @@ static int e1000_resume(struct pci_dev *pdev)
4043 * is up. For all other cases, let the f/w know that the h/w is now 4025 * is up. For all other cases, let the f/w know that the h/w is now
4044 * under the control of the driver. 4026 * under the control of the driver.
4045 */ 4027 */
4046 if (!(adapter->flags & FLAG_HAS_AMT) || !e1000e_check_mng_mode(&adapter->hw)) 4028 if (!(adapter->flags & FLAG_HAS_AMT))
4047 e1000_get_hw_control(adapter); 4029 e1000_get_hw_control(adapter);
4048 4030
4049 return 0; 4031 return 0;
@@ -4111,10 +4093,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
4111 int err; 4093 int err;
4112 4094
4113 e1000e_disable_l1aspm(pdev); 4095 e1000e_disable_l1aspm(pdev);
4114 if (adapter->need_ioport) 4096 err = pci_enable_device_mem(pdev);
4115 err = pci_enable_device(pdev);
4116 else
4117 err = pci_enable_device_mem(pdev);
4118 if (err) { 4097 if (err) {
4119 dev_err(&pdev->dev, 4098 dev_err(&pdev->dev,
4120 "Cannot re-enable PCI device after reset.\n"); 4099 "Cannot re-enable PCI device after reset.\n");
@@ -4162,8 +4141,7 @@ static void e1000_io_resume(struct pci_dev *pdev)
4162 * is up. For all other cases, let the f/w know that the h/w is now 4141 * is up. For all other cases, let the f/w know that the h/w is now
4163 * under the control of the driver. 4142 * under the control of the driver.
4164 */ 4143 */
4165 if (!(adapter->flags & FLAG_HAS_AMT) || 4144 if (!(adapter->flags & FLAG_HAS_AMT))
4166 !e1000e_check_mng_mode(&adapter->hw))
4167 e1000_get_hw_control(adapter); 4145 e1000_get_hw_control(adapter);
4168 4146
4169} 4147}
@@ -4175,36 +4153,40 @@ static void e1000_print_device_info(struct e1000_adapter *adapter)
4175 u32 pba_num; 4153 u32 pba_num;
4176 4154
4177 /* print bus type/speed/width info */ 4155 /* print bus type/speed/width info */
4178 ndev_info(netdev, "(PCI Express:2.5GB/s:%s) " 4156 e_info("(PCI Express:2.5GB/s:%s) %02x:%02x:%02x:%02x:%02x:%02x\n",
4179 "%02x:%02x:%02x:%02x:%02x:%02x\n", 4157 /* bus width */
4180 /* bus width */ 4158 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
4181 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : 4159 "Width x1"),
4182 "Width x1"), 4160 /* MAC address */
4183 /* MAC address */ 4161 netdev->dev_addr[0], netdev->dev_addr[1],
4184 netdev->dev_addr[0], netdev->dev_addr[1], 4162 netdev->dev_addr[2], netdev->dev_addr[3],
4185 netdev->dev_addr[2], netdev->dev_addr[3], 4163 netdev->dev_addr[4], netdev->dev_addr[5]);
4186 netdev->dev_addr[4], netdev->dev_addr[5]); 4164 e_info("Intel(R) PRO/%s Network Connection\n",
4187 ndev_info(netdev, "Intel(R) PRO/%s Network Connection\n", 4165 (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000");
4188 (hw->phy.type == e1000_phy_ife)
4189 ? "10/100" : "1000");
4190 e1000e_read_pba_num(hw, &pba_num); 4166 e1000e_read_pba_num(hw, &pba_num);
4191 ndev_info(netdev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n", 4167 e_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
4192 hw->mac.type, hw->phy.type, 4168 hw->mac.type, hw->phy.type, (pba_num >> 8), (pba_num & 0xff));
4193 (pba_num >> 8), (pba_num & 0xff));
4194} 4169}
4195 4170
4196/** 4171static void e1000_eeprom_checks(struct e1000_adapter *adapter)
4197 * e1000e_is_need_ioport - determine if an adapter needs ioport resources or not
4198 * @pdev: PCI device information struct
4199 *
4200 * Returns true if an adapters needs ioport resources
4201 **/
4202static int e1000e_is_need_ioport(struct pci_dev *pdev)
4203{ 4172{
4204 switch (pdev->device) { 4173 struct e1000_hw *hw = &adapter->hw;
4205 /* Currently there are no adapters that need ioport resources */ 4174 int ret_val;
4206 default: 4175 u16 buf = 0;
4207 return false; 4176
4177 if (hw->mac.type != e1000_82573)
4178 return;
4179
4180 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf);
4181 if (!(le16_to_cpu(buf) & (1 << 0))) {
4182 /* Deep Smart Power Down (DSPD) */
4183 e_warn("Warning: detected DSPD enabled in EEPROM\n");
4184 }
4185
4186 ret_val = e1000_read_nvm(hw, NVM_INIT_3GIO_3, 1, &buf);
4187 if (le16_to_cpu(buf) & (3 << 2)) {
4188 /* ASPM enable */
4189 e_warn("Warning: detected ASPM enabled in EEPROM\n");
4208 } 4190 }
4209} 4191}
4210 4192
@@ -4233,19 +4215,10 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4233 int i, err, pci_using_dac; 4215 int i, err, pci_using_dac;
4234 u16 eeprom_data = 0; 4216 u16 eeprom_data = 0;
4235 u16 eeprom_apme_mask = E1000_EEPROM_APME; 4217 u16 eeprom_apme_mask = E1000_EEPROM_APME;
4236 int bars, need_ioport;
4237 4218
4238 e1000e_disable_l1aspm(pdev); 4219 e1000e_disable_l1aspm(pdev);
4239 4220
4240 /* do not allocate ioport bars when not needed */ 4221 err = pci_enable_device_mem(pdev);
4241 need_ioport = e1000e_is_need_ioport(pdev);
4242 if (need_ioport) {
4243 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
4244 err = pci_enable_device(pdev);
4245 } else {
4246 bars = pci_select_bars(pdev, IORESOURCE_MEM);
4247 err = pci_enable_device_mem(pdev);
4248 }
4249 if (err) 4222 if (err)
4250 return err; 4223 return err;
4251 4224
@@ -4268,7 +4241,9 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4268 } 4241 }
4269 } 4242 }
4270 4243
4271 err = pci_request_selected_regions(pdev, bars, e1000e_driver_name); 4244 err = pci_request_selected_regions(pdev,
4245 pci_select_bars(pdev, IORESOURCE_MEM),
4246 e1000e_driver_name);
4272 if (err) 4247 if (err)
4273 goto err_pci_reg; 4248 goto err_pci_reg;
4274 4249
@@ -4293,8 +4268,6 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4293 adapter->hw.adapter = adapter; 4268 adapter->hw.adapter = adapter;
4294 adapter->hw.mac.type = ei->mac; 4269 adapter->hw.mac.type = ei->mac;
4295 adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1; 4270 adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1;
4296 adapter->bars = bars;
4297 adapter->need_ioport = need_ioport;
4298 4271
4299 mmio_start = pci_resource_start(pdev, 0); 4272 mmio_start = pci_resource_start(pdev, 0);
4300 mmio_len = pci_resource_len(pdev, 0); 4273 mmio_len = pci_resource_len(pdev, 0);
@@ -4366,8 +4339,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4366 } 4339 }
4367 4340
4368 if (e1000_check_reset_block(&adapter->hw)) 4341 if (e1000_check_reset_block(&adapter->hw))
4369 ndev_info(netdev, 4342 e_info("PHY reset is blocked due to SOL/IDER session.\n");
4370 "PHY reset is blocked due to SOL/IDER session.\n");
4371 4343
4372 netdev->features = NETIF_F_SG | 4344 netdev->features = NETIF_F_SG |
4373 NETIF_F_HW_CSUM | 4345 NETIF_F_HW_CSUM |
@@ -4411,25 +4383,26 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4411 if (e1000_validate_nvm_checksum(&adapter->hw) >= 0) 4383 if (e1000_validate_nvm_checksum(&adapter->hw) >= 0)
4412 break; 4384 break;
4413 if (i == 2) { 4385 if (i == 2) {
4414 ndev_err(netdev, "The NVM Checksum Is Not Valid\n"); 4386 e_err("The NVM Checksum Is Not Valid\n");
4415 err = -EIO; 4387 err = -EIO;
4416 goto err_eeprom; 4388 goto err_eeprom;
4417 } 4389 }
4418 } 4390 }
4419 4391
4392 e1000_eeprom_checks(adapter);
4393
4420 /* copy the MAC address out of the NVM */ 4394 /* copy the MAC address out of the NVM */
4421 if (e1000e_read_mac_addr(&adapter->hw)) 4395 if (e1000e_read_mac_addr(&adapter->hw))
4422 ndev_err(netdev, "NVM Read Error while reading MAC address\n"); 4396 e_err("NVM Read Error while reading MAC address\n");
4423 4397
4424 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); 4398 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
4425 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); 4399 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
4426 4400
4427 if (!is_valid_ether_addr(netdev->perm_addr)) { 4401 if (!is_valid_ether_addr(netdev->perm_addr)) {
4428 ndev_err(netdev, "Invalid MAC Address: " 4402 e_err("Invalid MAC Address: %02x:%02x:%02x:%02x:%02x:%02x\n",
4429 "%02x:%02x:%02x:%02x:%02x:%02x\n", 4403 netdev->perm_addr[0], netdev->perm_addr[1],
4430 netdev->perm_addr[0], netdev->perm_addr[1], 4404 netdev->perm_addr[2], netdev->perm_addr[3],
4431 netdev->perm_addr[2], netdev->perm_addr[3], 4405 netdev->perm_addr[4], netdev->perm_addr[5]);
4432 netdev->perm_addr[4], netdev->perm_addr[5]);
4433 err = -EIO; 4406 err = -EIO;
4434 goto err_eeprom; 4407 goto err_eeprom;
4435 } 4408 }
@@ -4499,8 +4472,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4499 * is up. For all other cases, let the f/w know that the h/w is now 4472 * is up. For all other cases, let the f/w know that the h/w is now
4500 * under the control of the driver. 4473 * under the control of the driver.
4501 */ 4474 */
4502 if (!(adapter->flags & FLAG_HAS_AMT) || 4475 if (!(adapter->flags & FLAG_HAS_AMT))
4503 !e1000e_check_mng_mode(&adapter->hw))
4504 e1000_get_hw_control(adapter); 4476 e1000_get_hw_control(adapter);
4505 4477
4506 /* tell the stack to leave us alone until e1000_open() is called */ 4478 /* tell the stack to leave us alone until e1000_open() is called */
@@ -4517,24 +4489,25 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4517 return 0; 4489 return 0;
4518 4490
4519err_register: 4491err_register:
4520err_hw_init: 4492 if (!(adapter->flags & FLAG_HAS_AMT))
4521 e1000_release_hw_control(adapter); 4493 e1000_release_hw_control(adapter);
4522err_eeprom: 4494err_eeprom:
4523 if (!e1000_check_reset_block(&adapter->hw)) 4495 if (!e1000_check_reset_block(&adapter->hw))
4524 e1000_phy_hw_reset(&adapter->hw); 4496 e1000_phy_hw_reset(&adapter->hw);
4497err_hw_init:
4525 4498
4526 if (adapter->hw.flash_address)
4527 iounmap(adapter->hw.flash_address);
4528
4529err_flashmap:
4530 kfree(adapter->tx_ring); 4499 kfree(adapter->tx_ring);
4531 kfree(adapter->rx_ring); 4500 kfree(adapter->rx_ring);
4532err_sw_init: 4501err_sw_init:
4502 if (adapter->hw.flash_address)
4503 iounmap(adapter->hw.flash_address);
4504err_flashmap:
4533 iounmap(adapter->hw.hw_addr); 4505 iounmap(adapter->hw.hw_addr);
4534err_ioremap: 4506err_ioremap:
4535 free_netdev(netdev); 4507 free_netdev(netdev);
4536err_alloc_etherdev: 4508err_alloc_etherdev:
4537 pci_release_selected_regions(pdev, bars); 4509 pci_release_selected_regions(pdev,
4510 pci_select_bars(pdev, IORESOURCE_MEM));
4538err_pci_reg: 4511err_pci_reg:
4539err_dma: 4512err_dma:
4540 pci_disable_device(pdev); 4513 pci_disable_device(pdev);
@@ -4582,7 +4555,8 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
4582 iounmap(adapter->hw.hw_addr); 4555 iounmap(adapter->hw.hw_addr);
4583 if (adapter->hw.flash_address) 4556 if (adapter->hw.flash_address)
4584 iounmap(adapter->hw.flash_address); 4557 iounmap(adapter->hw.flash_address);
4585 pci_release_selected_regions(pdev, adapter->bars); 4558 pci_release_selected_regions(pdev,
4559 pci_select_bars(pdev, IORESOURCE_MEM));
4586 4560
4587 free_netdev(netdev); 4561 free_netdev(netdev);
4588 4562
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c
index a66b92efcf80..8effc3107f9a 100644
--- a/drivers/net/e1000e/param.c
+++ b/drivers/net/e1000e/param.c
@@ -27,6 +27,7 @@
27*******************************************************************************/ 27*******************************************************************************/
28 28
29#include <linux/netdevice.h> 29#include <linux/netdevice.h>
30#include <linux/pci.h>
30 31
31#include "e1000.h" 32#include "e1000.h"
32 33
@@ -162,17 +163,16 @@ static int __devinit e1000_validate_option(unsigned int *value,
162 case enable_option: 163 case enable_option:
163 switch (*value) { 164 switch (*value) {
164 case OPTION_ENABLED: 165 case OPTION_ENABLED:
165 ndev_info(adapter->netdev, "%s Enabled\n", opt->name); 166 e_info("%s Enabled\n", opt->name);
166 return 0; 167 return 0;
167 case OPTION_DISABLED: 168 case OPTION_DISABLED:
168 ndev_info(adapter->netdev, "%s Disabled\n", opt->name); 169 e_info("%s Disabled\n", opt->name);
169 return 0; 170 return 0;
170 } 171 }
171 break; 172 break;
172 case range_option: 173 case range_option:
173 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { 174 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
174 ndev_info(adapter->netdev, 175 e_info("%s set to %i\n", opt->name, *value);
175 "%s set to %i\n", opt->name, *value);
176 return 0; 176 return 0;
177 } 177 }
178 break; 178 break;
@@ -184,8 +184,7 @@ static int __devinit e1000_validate_option(unsigned int *value,
184 ent = &opt->arg.l.p[i]; 184 ent = &opt->arg.l.p[i];
185 if (*value == ent->i) { 185 if (*value == ent->i) {
186 if (ent->str[0] != '\0') 186 if (ent->str[0] != '\0')
187 ndev_info(adapter->netdev, "%s\n", 187 e_info("%s\n", ent->str);
188 ent->str);
189 return 0; 188 return 0;
190 } 189 }
191 } 190 }
@@ -195,8 +194,8 @@ static int __devinit e1000_validate_option(unsigned int *value,
195 BUG(); 194 BUG();
196 } 195 }
197 196
198 ndev_info(adapter->netdev, "Invalid %s value specified (%i) %s\n", 197 e_info("Invalid %s value specified (%i) %s\n", opt->name, *value,
199 opt->name, *value, opt->err); 198 opt->err);
200 *value = opt->def; 199 *value = opt->def;
201 return -1; 200 return -1;
202} 201}
@@ -213,13 +212,11 @@ static int __devinit e1000_validate_option(unsigned int *value,
213void __devinit e1000e_check_options(struct e1000_adapter *adapter) 212void __devinit e1000e_check_options(struct e1000_adapter *adapter)
214{ 213{
215 struct e1000_hw *hw = &adapter->hw; 214 struct e1000_hw *hw = &adapter->hw;
216 struct net_device *netdev = adapter->netdev;
217 int bd = adapter->bd_number; 215 int bd = adapter->bd_number;
218 216
219 if (bd >= E1000_MAX_NIC) { 217 if (bd >= E1000_MAX_NIC) {
220 ndev_notice(netdev, 218 e_notice("Warning: no configuration for board #%i\n", bd);
221 "Warning: no configuration for board #%i\n", bd); 219 e_notice("Using defaults for all values\n");
222 ndev_notice(netdev, "Using defaults for all values\n");
223 } 220 }
224 221
225 { /* Transmit Interrupt Delay */ 222 { /* Transmit Interrupt Delay */
@@ -313,19 +310,15 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
313 adapter->itr = InterruptThrottleRate[bd]; 310 adapter->itr = InterruptThrottleRate[bd];
314 switch (adapter->itr) { 311 switch (adapter->itr) {
315 case 0: 312 case 0:
316 ndev_info(netdev, "%s turned off\n", 313 e_info("%s turned off\n", opt.name);
317 opt.name);
318 break; 314 break;
319 case 1: 315 case 1:
320 ndev_info(netdev, 316 e_info("%s set to dynamic mode\n", opt.name);
321 "%s set to dynamic mode\n",
322 opt.name);
323 adapter->itr_setting = adapter->itr; 317 adapter->itr_setting = adapter->itr;
324 adapter->itr = 20000; 318 adapter->itr = 20000;
325 break; 319 break;
326 case 3: 320 case 3:
327 ndev_info(netdev, 321 e_info("%s set to dynamic conservative mode\n",
328 "%s set to dynamic conservative mode\n",
329 opt.name); 322 opt.name);
330 adapter->itr_setting = adapter->itr; 323 adapter->itr_setting = adapter->itr;
331 adapter->itr = 20000; 324 adapter->itr = 20000;
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index 56f50491a453..1f11350e16cf 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -1283,14 +1283,6 @@ set_multicast_list(struct net_device *dev)
1283 1283
1284 if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || dev->mc_count > 63) 1284 if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || dev->mc_count > 63)
1285 { 1285 {
1286 /*
1287 * We must make the kernel realise we had to move
1288 * into promisc mode or we start all out war on
1289 * the cable. If it was a promisc request the
1290 * flag is already set. If not we assert it.
1291 */
1292 dev->flags|=IFF_PROMISC;
1293
1294 eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */ 1286 eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */
1295 mode = inb(ioaddr + REG2); 1287 mode = inb(ioaddr + REG2);
1296 outb(mode | PRMSC_Mode, ioaddr + REG2); 1288 outb(mode | PRMSC_Mode, ioaddr + REG2);
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 0920b796bd78..b70c5314f537 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -2937,9 +2937,9 @@ static void ehea_rereg_mrs(struct work_struct *work)
2937 } 2937 }
2938 } 2938 }
2939 } 2939 }
2940 mutex_unlock(&dlpar_mem_lock); 2940 ehea_info("re-initializing driver complete");
2941 ehea_info("re-initializing driver complete");
2942out: 2941out:
2942 mutex_unlock(&dlpar_mem_lock);
2943 return; 2943 return;
2944} 2944}
2945 2945
diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c
index c05cb159c772..aa0bf6e1c694 100644
--- a/drivers/net/enc28j60.c
+++ b/drivers/net/enc28j60.c
@@ -1547,8 +1547,10 @@ static int __devinit enc28j60_probe(struct spi_device *spi)
1547 random_ether_addr(dev->dev_addr); 1547 random_ether_addr(dev->dev_addr);
1548 enc28j60_set_hw_macaddr(dev); 1548 enc28j60_set_hw_macaddr(dev);
1549 1549
1550 ret = request_irq(spi->irq, enc28j60_irq, IRQF_TRIGGER_FALLING, 1550 /* Board setup must set the relevant edge trigger type;
1551 DRV_NAME, priv); 1551 * level triggers won't currently work.
1552 */
1553 ret = request_irq(spi->irq, enc28j60_irq, 0, DRV_NAME, priv);
1552 if (ret < 0) { 1554 if (ret < 0) {
1553 if (netif_msg_probe(priv)) 1555 if (netif_msg_probe(priv))
1554 dev_err(&spi->dev, DRV_NAME ": request irq %d failed " 1556 dev_err(&spi->dev, DRV_NAME ": request irq %d failed "
diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c
index e3dd8b136908..bee8b3fbc565 100644
--- a/drivers/net/eth16i.c
+++ b/drivers/net/eth16i.c
@@ -1356,7 +1356,6 @@ static void eth16i_multicast(struct net_device *dev)
1356 1356
1357 if(dev->mc_count || dev->flags&(IFF_ALLMULTI|IFF_PROMISC)) 1357 if(dev->mc_count || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
1358 { 1358 {
1359 dev->flags|=IFF_PROMISC; /* Must do this */
1360 outb(3, ioaddr + RECEIVE_MODE_REG); 1359 outb(3, ioaddr + RECEIVE_MODE_REG);
1361 } else { 1360 } else {
1362 outb(2, ioaddr + RECEIVE_MODE_REG); 1361 outb(2, ioaddr + RECEIVE_MODE_REG);
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 4ed89fa9ae46..053971e5fc94 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -77,26 +77,27 @@
77 * Hardware access: 77 * Hardware access:
78 */ 78 */
79 79
80#define DEV_NEED_TIMERIRQ 0x00001 /* set the timer irq flag in the irq mask */ 80#define DEV_NEED_TIMERIRQ 0x000001 /* set the timer irq flag in the irq mask */
81#define DEV_NEED_LINKTIMER 0x00002 /* poll link settings. Relies on the timer irq */ 81#define DEV_NEED_LINKTIMER 0x000002 /* poll link settings. Relies on the timer irq */
82#define DEV_HAS_LARGEDESC 0x00004 /* device supports jumbo frames and needs packet format 2 */ 82#define DEV_HAS_LARGEDESC 0x000004 /* device supports jumbo frames and needs packet format 2 */
83#define DEV_HAS_HIGH_DMA 0x00008 /* device supports 64bit dma */ 83#define DEV_HAS_HIGH_DMA 0x000008 /* device supports 64bit dma */
84#define DEV_HAS_CHECKSUM 0x00010 /* device supports tx and rx checksum offloads */ 84#define DEV_HAS_CHECKSUM 0x000010 /* device supports tx and rx checksum offloads */
85#define DEV_HAS_VLAN 0x00020 /* device supports vlan tagging and striping */ 85#define DEV_HAS_VLAN 0x000020 /* device supports vlan tagging and striping */
86#define DEV_HAS_MSI 0x00040 /* device supports MSI */ 86#define DEV_HAS_MSI 0x000040 /* device supports MSI */
87#define DEV_HAS_MSI_X 0x00080 /* device supports MSI-X */ 87#define DEV_HAS_MSI_X 0x000080 /* device supports MSI-X */
88#define DEV_HAS_POWER_CNTRL 0x00100 /* device supports power savings */ 88#define DEV_HAS_POWER_CNTRL 0x000100 /* device supports power savings */
89#define DEV_HAS_STATISTICS_V1 0x00200 /* device supports hw statistics version 1 */ 89#define DEV_HAS_STATISTICS_V1 0x000200 /* device supports hw statistics version 1 */
90#define DEV_HAS_STATISTICS_V2 0x00400 /* device supports hw statistics version 2 */ 90#define DEV_HAS_STATISTICS_V2 0x000400 /* device supports hw statistics version 2 */
91#define DEV_HAS_TEST_EXTENDED 0x00800 /* device supports extended diagnostic test */ 91#define DEV_HAS_STATISTICS_V3 0x000800 /* device supports hw statistics version 3 */
92#define DEV_HAS_MGMT_UNIT 0x01000 /* device supports management unit */ 92#define DEV_HAS_TEST_EXTENDED 0x001000 /* device supports extended diagnostic test */
93#define DEV_HAS_CORRECT_MACADDR 0x02000 /* device supports correct mac address order */ 93#define DEV_HAS_MGMT_UNIT 0x002000 /* device supports management unit */
94#define DEV_HAS_COLLISION_FIX 0x04000 /* device supports tx collision fix */ 94#define DEV_HAS_CORRECT_MACADDR 0x004000 /* device supports correct mac address order */
95#define DEV_HAS_PAUSEFRAME_TX_V1 0x08000 /* device supports tx pause frames version 1 */ 95#define DEV_HAS_COLLISION_FIX 0x008000 /* device supports tx collision fix */
96#define DEV_HAS_PAUSEFRAME_TX_V2 0x10000 /* device supports tx pause frames version 2 */ 96#define DEV_HAS_PAUSEFRAME_TX_V1 0x010000 /* device supports tx pause frames version 1 */
97#define DEV_HAS_PAUSEFRAME_TX_V3 0x20000 /* device supports tx pause frames version 3 */ 97#define DEV_HAS_PAUSEFRAME_TX_V2 0x020000 /* device supports tx pause frames version 2 */
98#define DEV_NEED_TX_LIMIT 0x40000 /* device needs to limit tx */ 98#define DEV_HAS_PAUSEFRAME_TX_V3 0x040000 /* device supports tx pause frames version 3 */
99#define DEV_HAS_GEAR_MODE 0x80000 /* device supports gear mode */ 99#define DEV_NEED_TX_LIMIT 0x080000 /* device needs to limit tx */
100#define DEV_HAS_GEAR_MODE 0x100000 /* device supports gear mode */
100 101
101enum { 102enum {
102 NvRegIrqStatus = 0x000, 103 NvRegIrqStatus = 0x000,
@@ -248,6 +249,8 @@ enum {
248#define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010 249#define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010
249#define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0 250#define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0
250#define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880 251#define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880
252 NvRegTxPauseFrameLimit = 0x174,
253#define NVREG_TX_PAUSEFRAMELIMIT_ENABLE 0x00010000
251 NvRegMIIStatus = 0x180, 254 NvRegMIIStatus = 0x180,
252#define NVREG_MIISTAT_ERROR 0x0001 255#define NVREG_MIISTAT_ERROR 0x0001
253#define NVREG_MIISTAT_LINKCHANGE 0x0008 256#define NVREG_MIISTAT_LINKCHANGE 0x0008
@@ -270,6 +273,9 @@ enum {
270#define NVREG_MIICTL_WRITE 0x00400 273#define NVREG_MIICTL_WRITE 0x00400
271#define NVREG_MIICTL_ADDRSHIFT 5 274#define NVREG_MIICTL_ADDRSHIFT 5
272 NvRegMIIData = 0x194, 275 NvRegMIIData = 0x194,
276 NvRegTxUnicast = 0x1a0,
277 NvRegTxMulticast = 0x1a4,
278 NvRegTxBroadcast = 0x1a8,
273 NvRegWakeUpFlags = 0x200, 279 NvRegWakeUpFlags = 0x200,
274#define NVREG_WAKEUPFLAGS_VAL 0x7770 280#define NVREG_WAKEUPFLAGS_VAL 0x7770
275#define NVREG_WAKEUPFLAGS_BUSYSHIFT 24 281#define NVREG_WAKEUPFLAGS_BUSYSHIFT 24
@@ -333,6 +339,7 @@ enum {
333 NvRegPowerState2 = 0x600, 339 NvRegPowerState2 = 0x600,
334#define NVREG_POWERSTATE2_POWERUP_MASK 0x0F11 340#define NVREG_POWERSTATE2_POWERUP_MASK 0x0F11
335#define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001 341#define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001
342#define NVREG_POWERSTATE2_PHY_RESET 0x0004
336}; 343};
337 344
338/* Big endian: should work, but is untested */ 345/* Big endian: should work, but is untested */
@@ -401,6 +408,7 @@ union ring_type {
401#define NV_RX_FRAMINGERR (1<<29) 408#define NV_RX_FRAMINGERR (1<<29)
402#define NV_RX_ERROR (1<<30) 409#define NV_RX_ERROR (1<<30)
403#define NV_RX_AVAIL (1<<31) 410#define NV_RX_AVAIL (1<<31)
411#define NV_RX_ERROR_MASK (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4|NV_RX_CRCERR|NV_RX_OVERFLOW|NV_RX_FRAMINGERR)
404 412
405#define NV_RX2_CHECKSUMMASK (0x1C000000) 413#define NV_RX2_CHECKSUMMASK (0x1C000000)
406#define NV_RX2_CHECKSUM_IP (0x10000000) 414#define NV_RX2_CHECKSUM_IP (0x10000000)
@@ -418,6 +426,7 @@ union ring_type {
418/* error and avail are the same for both */ 426/* error and avail are the same for both */
419#define NV_RX2_ERROR (1<<30) 427#define NV_RX2_ERROR (1<<30)
420#define NV_RX2_AVAIL (1<<31) 428#define NV_RX2_AVAIL (1<<31)
429#define NV_RX2_ERROR_MASK (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3|NV_RX2_ERROR4|NV_RX2_CRCERR|NV_RX2_OVERFLOW|NV_RX2_FRAMINGERR)
421 430
422#define NV_RX3_VLAN_TAG_PRESENT (1<<16) 431#define NV_RX3_VLAN_TAG_PRESENT (1<<16)
423#define NV_RX3_VLAN_TAG_MASK (0x0000FFFF) 432#define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
@@ -529,6 +538,7 @@ union ring_type {
529#define PHY_REALTEK_INIT_REG4 0x14 538#define PHY_REALTEK_INIT_REG4 0x14
530#define PHY_REALTEK_INIT_REG5 0x18 539#define PHY_REALTEK_INIT_REG5 0x18
531#define PHY_REALTEK_INIT_REG6 0x11 540#define PHY_REALTEK_INIT_REG6 0x11
541#define PHY_REALTEK_INIT_REG7 0x01
532#define PHY_REALTEK_INIT1 0x0000 542#define PHY_REALTEK_INIT1 0x0000
533#define PHY_REALTEK_INIT2 0x8e00 543#define PHY_REALTEK_INIT2 0x8e00
534#define PHY_REALTEK_INIT3 0x0001 544#define PHY_REALTEK_INIT3 0x0001
@@ -537,6 +547,9 @@ union ring_type {
537#define PHY_REALTEK_INIT6 0xf5c7 547#define PHY_REALTEK_INIT6 0xf5c7
538#define PHY_REALTEK_INIT7 0x1000 548#define PHY_REALTEK_INIT7 0x1000
539#define PHY_REALTEK_INIT8 0x0003 549#define PHY_REALTEK_INIT8 0x0003
550#define PHY_REALTEK_INIT9 0x0008
551#define PHY_REALTEK_INIT10 0x0005
552#define PHY_REALTEK_INIT11 0x0200
540#define PHY_REALTEK_INIT_MSK1 0x0003 553#define PHY_REALTEK_INIT_MSK1 0x0003
541 554
542#define PHY_GIGABIT 0x0100 555#define PHY_GIGABIT 0x0100
@@ -611,7 +624,12 @@ static const struct nv_ethtool_str nv_estats_str[] = {
611 { "rx_bytes" }, 624 { "rx_bytes" },
612 { "tx_pause" }, 625 { "tx_pause" },
613 { "rx_pause" }, 626 { "rx_pause" },
614 { "rx_drop_frame" } 627 { "rx_drop_frame" },
628
629 /* version 3 stats */
630 { "tx_unicast" },
631 { "tx_multicast" },
632 { "tx_broadcast" }
615}; 633};
616 634
617struct nv_ethtool_stats { 635struct nv_ethtool_stats {
@@ -647,9 +665,15 @@ struct nv_ethtool_stats {
647 u64 tx_pause; 665 u64 tx_pause;
648 u64 rx_pause; 666 u64 rx_pause;
649 u64 rx_drop_frame; 667 u64 rx_drop_frame;
668
669 /* version 3 stats */
670 u64 tx_unicast;
671 u64 tx_multicast;
672 u64 tx_broadcast;
650}; 673};
651 674
652#define NV_DEV_STATISTICS_V2_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64)) 675#define NV_DEV_STATISTICS_V3_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
676#define NV_DEV_STATISTICS_V2_COUNT (NV_DEV_STATISTICS_V3_COUNT - 3)
653#define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6) 677#define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6)
654 678
655/* diagnostics */ 679/* diagnostics */
@@ -1149,6 +1173,42 @@ static int phy_init(struct net_device *dev)
1149 return PHY_ERROR; 1173 return PHY_ERROR;
1150 } 1174 }
1151 } 1175 }
1176 if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1177 np->phy_rev == PHY_REV_REALTEK_8211C) {
1178 u32 powerstate = readl(base + NvRegPowerState2);
1179
1180 /* need to perform hw phy reset */
1181 powerstate |= NVREG_POWERSTATE2_PHY_RESET;
1182 writel(powerstate, base + NvRegPowerState2);
1183 msleep(25);
1184
1185 powerstate &= ~NVREG_POWERSTATE2_PHY_RESET;
1186 writel(powerstate, base + NvRegPowerState2);
1187 msleep(25);
1188
1189 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
1190 reg |= PHY_REALTEK_INIT9;
1191 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg)) {
1192 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1193 return PHY_ERROR;
1194 }
1195 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT10)) {
1196 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1197 return PHY_ERROR;
1198 }
1199 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ);
1200 if (!(reg & PHY_REALTEK_INIT11)) {
1201 reg |= PHY_REALTEK_INIT11;
1202 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg)) {
1203 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1204 return PHY_ERROR;
1205 }
1206 }
1207 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1208 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1209 return PHY_ERROR;
1210 }
1211 }
1152 if (np->phy_model == PHY_MODEL_REALTEK_8201) { 1212 if (np->phy_model == PHY_MODEL_REALTEK_8201) {
1153 if (np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_32 || 1213 if (np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_32 ||
1154 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_33 || 1214 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_33 ||
@@ -1201,12 +1261,23 @@ static int phy_init(struct net_device *dev)
1201 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1261 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1202 mii_control |= BMCR_ANENABLE; 1262 mii_control |= BMCR_ANENABLE;
1203 1263
1204 /* reset the phy 1264 if (np->phy_oui == PHY_OUI_REALTEK &&
1205 * (certain phys need bmcr to be setup with reset) 1265 np->phy_model == PHY_MODEL_REALTEK_8211 &&
1206 */ 1266 np->phy_rev == PHY_REV_REALTEK_8211C) {
1207 if (phy_reset(dev, mii_control)) { 1267 /* start autoneg since we already performed hw reset above */
1208 printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev)); 1268 mii_control |= BMCR_ANRESTART;
1209 return PHY_ERROR; 1269 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
1270 printk(KERN_INFO "%s: phy init failed\n", pci_name(np->pci_dev));
1271 return PHY_ERROR;
1272 }
1273 } else {
1274 /* reset the phy
1275 * (certain phys need bmcr to be setup with reset)
1276 */
1277 if (phy_reset(dev, mii_control)) {
1278 printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev));
1279 return PHY_ERROR;
1280 }
1210 } 1281 }
1211 1282
1212 /* phy vendor specific configuration */ 1283 /* phy vendor specific configuration */
@@ -1576,6 +1647,12 @@ static void nv_get_hw_stats(struct net_device *dev)
1576 np->estats.rx_pause += readl(base + NvRegRxPause); 1647 np->estats.rx_pause += readl(base + NvRegRxPause);
1577 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame); 1648 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
1578 } 1649 }
1650
1651 if (np->driver_data & DEV_HAS_STATISTICS_V3) {
1652 np->estats.tx_unicast += readl(base + NvRegTxUnicast);
1653 np->estats.tx_multicast += readl(base + NvRegTxMulticast);
1654 np->estats.tx_broadcast += readl(base + NvRegTxBroadcast);
1655 }
1579} 1656}
1580 1657
1581/* 1658/*
@@ -1589,7 +1666,7 @@ static struct net_device_stats *nv_get_stats(struct net_device *dev)
1589 struct fe_priv *np = netdev_priv(dev); 1666 struct fe_priv *np = netdev_priv(dev);
1590 1667
1591 /* If the nic supports hw counters then retrieve latest values */ 1668 /* If the nic supports hw counters then retrieve latest values */
1592 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2)) { 1669 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) {
1593 nv_get_hw_stats(dev); 1670 nv_get_hw_stats(dev);
1594 1671
1595 /* copy to net_device stats */ 1672 /* copy to net_device stats */
@@ -2580,7 +2657,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
2580 if (likely(flags & NV_RX_DESCRIPTORVALID)) { 2657 if (likely(flags & NV_RX_DESCRIPTORVALID)) {
2581 len = flags & LEN_MASK_V1; 2658 len = flags & LEN_MASK_V1;
2582 if (unlikely(flags & NV_RX_ERROR)) { 2659 if (unlikely(flags & NV_RX_ERROR)) {
2583 if (flags & NV_RX_ERROR4) { 2660 if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) {
2584 len = nv_getlen(dev, skb->data, len); 2661 len = nv_getlen(dev, skb->data, len);
2585 if (len < 0) { 2662 if (len < 0) {
2586 dev->stats.rx_errors++; 2663 dev->stats.rx_errors++;
@@ -2589,7 +2666,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
2589 } 2666 }
2590 } 2667 }
2591 /* framing errors are soft errors */ 2668 /* framing errors are soft errors */
2592 else if (flags & NV_RX_FRAMINGERR) { 2669 else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) {
2593 if (flags & NV_RX_SUBSTRACT1) { 2670 if (flags & NV_RX_SUBSTRACT1) {
2594 len--; 2671 len--;
2595 } 2672 }
@@ -2615,7 +2692,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
2615 if (likely(flags & NV_RX2_DESCRIPTORVALID)) { 2692 if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2616 len = flags & LEN_MASK_V2; 2693 len = flags & LEN_MASK_V2;
2617 if (unlikely(flags & NV_RX2_ERROR)) { 2694 if (unlikely(flags & NV_RX2_ERROR)) {
2618 if (flags & NV_RX2_ERROR4) { 2695 if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
2619 len = nv_getlen(dev, skb->data, len); 2696 len = nv_getlen(dev, skb->data, len);
2620 if (len < 0) { 2697 if (len < 0) {
2621 dev->stats.rx_errors++; 2698 dev->stats.rx_errors++;
@@ -2624,7 +2701,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
2624 } 2701 }
2625 } 2702 }
2626 /* framing errors are soft errors */ 2703 /* framing errors are soft errors */
2627 else if (flags & NV_RX2_FRAMINGERR) { 2704 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
2628 if (flags & NV_RX2_SUBSTRACT1) { 2705 if (flags & NV_RX2_SUBSTRACT1) {
2629 len--; 2706 len--;
2630 } 2707 }
@@ -2714,7 +2791,7 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
2714 if (likely(flags & NV_RX2_DESCRIPTORVALID)) { 2791 if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2715 len = flags & LEN_MASK_V2; 2792 len = flags & LEN_MASK_V2;
2716 if (unlikely(flags & NV_RX2_ERROR)) { 2793 if (unlikely(flags & NV_RX2_ERROR)) {
2717 if (flags & NV_RX2_ERROR4) { 2794 if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
2718 len = nv_getlen(dev, skb->data, len); 2795 len = nv_getlen(dev, skb->data, len);
2719 if (len < 0) { 2796 if (len < 0) {
2720 dev_kfree_skb(skb); 2797 dev_kfree_skb(skb);
@@ -2722,7 +2799,7 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
2722 } 2799 }
2723 } 2800 }
2724 /* framing errors are soft errors */ 2801 /* framing errors are soft errors */
2725 else if (flags & NV_RX2_FRAMINGERR) { 2802 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
2726 if (flags & NV_RX2_SUBSTRACT1) { 2803 if (flags & NV_RX2_SUBSTRACT1) {
2727 len--; 2804 len--;
2728 } 2805 }
@@ -3001,8 +3078,11 @@ static void nv_update_pause(struct net_device *dev, u32 pause_flags)
3001 u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1; 3078 u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1;
3002 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) 3079 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2)
3003 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2; 3080 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2;
3004 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) 3081 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) {
3005 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3; 3082 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3;
3083 /* limit the number of tx pause frames to a default of 8 */
3084 writel(readl(base + NvRegTxPauseFrameLimit)|NVREG_TX_PAUSEFRAMELIMIT_ENABLE, base + NvRegTxPauseFrameLimit);
3085 }
3006 writel(pause_enable, base + NvRegTxPauseFrame); 3086 writel(pause_enable, base + NvRegTxPauseFrame);
3007 writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1); 3087 writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
3008 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3088 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
@@ -4688,6 +4768,8 @@ static int nv_get_sset_count(struct net_device *dev, int sset)
4688 return NV_DEV_STATISTICS_V1_COUNT; 4768 return NV_DEV_STATISTICS_V1_COUNT;
4689 else if (np->driver_data & DEV_HAS_STATISTICS_V2) 4769 else if (np->driver_data & DEV_HAS_STATISTICS_V2)
4690 return NV_DEV_STATISTICS_V2_COUNT; 4770 return NV_DEV_STATISTICS_V2_COUNT;
4771 else if (np->driver_data & DEV_HAS_STATISTICS_V3)
4772 return NV_DEV_STATISTICS_V3_COUNT;
4691 else 4773 else
4692 return 0; 4774 return 0;
4693 default: 4775 default:
@@ -5272,7 +5354,7 @@ static int nv_open(struct net_device *dev)
5272 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 5354 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
5273 5355
5274 /* start statistics timer */ 5356 /* start statistics timer */
5275 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2)) 5357 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
5276 mod_timer(&np->stats_poll, 5358 mod_timer(&np->stats_poll,
5277 round_jiffies(jiffies + STATS_INTERVAL)); 5359 round_jiffies(jiffies + STATS_INTERVAL));
5278 5360
@@ -5376,7 +5458,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5376 if (err < 0) 5458 if (err < 0)
5377 goto out_disable; 5459 goto out_disable;
5378 5460
5379 if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2)) 5461 if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
5380 np->register_size = NV_PCI_REGSZ_VER3; 5462 np->register_size = NV_PCI_REGSZ_VER3;
5381 else if (id->driver_data & DEV_HAS_STATISTICS_V1) 5463 else if (id->driver_data & DEV_HAS_STATISTICS_V1)
5382 np->register_size = NV_PCI_REGSZ_VER2; 5464 np->register_size = NV_PCI_REGSZ_VER2;
@@ -6031,35 +6113,35 @@ static struct pci_device_id pci_tbl[] = {
6031 }, 6113 },
6032 { /* MCP77 Ethernet Controller */ 6114 { /* MCP77 Ethernet Controller */
6033 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32), 6115 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32),
6034 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6116 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6035 }, 6117 },
6036 { /* MCP77 Ethernet Controller */ 6118 { /* MCP77 Ethernet Controller */
6037 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33), 6119 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33),
6038 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6120 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6039 }, 6121 },
6040 { /* MCP77 Ethernet Controller */ 6122 { /* MCP77 Ethernet Controller */
6041 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34), 6123 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34),
6042 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6124 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6043 }, 6125 },
6044 { /* MCP77 Ethernet Controller */ 6126 { /* MCP77 Ethernet Controller */
6045 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35), 6127 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35),
6046 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6128 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6047 }, 6129 },
6048 { /* MCP79 Ethernet Controller */ 6130 { /* MCP79 Ethernet Controller */
6049 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36), 6131 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36),
6050 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6132 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6051 }, 6133 },
6052 { /* MCP79 Ethernet Controller */ 6134 { /* MCP79 Ethernet Controller */
6053 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37), 6135 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37),
6054 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6136 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6055 }, 6137 },
6056 { /* MCP79 Ethernet Controller */ 6138 { /* MCP79 Ethernet Controller */
6057 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38), 6139 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38),
6058 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6140 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6059 }, 6141 },
6060 { /* MCP79 Ethernet Controller */ 6142 { /* MCP79 Ethernet Controller */
6061 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39), 6143 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39),
6062 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6144 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6063 }, 6145 },
6064 {0,}, 6146 {0,},
6065}; 6147};
diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c
index 0a97fc2d97ec..1c7ef812a8e3 100644
--- a/drivers/net/fs_enet/mac-fcc.c
+++ b/drivers/net/fs_enet/mac-fcc.c
@@ -126,7 +126,7 @@ out:
126#define FCC_NAPI_RX_EVENT_MSK (FCC_ENET_RXF | FCC_ENET_RXB) 126#define FCC_NAPI_RX_EVENT_MSK (FCC_ENET_RXF | FCC_ENET_RXB)
127#define FCC_RX_EVENT (FCC_ENET_RXF) 127#define FCC_RX_EVENT (FCC_ENET_RXF)
128#define FCC_TX_EVENT (FCC_ENET_TXB) 128#define FCC_TX_EVENT (FCC_ENET_TXB)
129#define FCC_ERR_EVENT_MSK (FCC_ENET_TXE | FCC_ENET_BSY) 129#define FCC_ERR_EVENT_MSK (FCC_ENET_TXE)
130 130
131static int setup_data(struct net_device *dev) 131static int setup_data(struct net_device *dev)
132{ 132{
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index b8394cf134e8..ca6cf6ecb37b 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -414,9 +414,7 @@ static int gfar_suspend(struct platform_device *pdev, pm_message_t state)
414 spin_unlock(&priv->rxlock); 414 spin_unlock(&priv->rxlock);
415 spin_unlock_irqrestore(&priv->txlock, flags); 415 spin_unlock_irqrestore(&priv->txlock, flags);
416 416
417#ifdef CONFIG_GFAR_NAPI
418 napi_disable(&priv->napi); 417 napi_disable(&priv->napi);
419#endif
420 418
421 if (magic_packet) { 419 if (magic_packet) {
422 /* Enable interrupt on Magic Packet */ 420 /* Enable interrupt on Magic Packet */
@@ -469,9 +467,7 @@ static int gfar_resume(struct platform_device *pdev)
469 467
470 netif_device_attach(dev); 468 netif_device_attach(dev);
471 469
472#ifdef CONFIG_GFAR_NAPI
473 napi_enable(&priv->napi); 470 napi_enable(&priv->napi);
474#endif
475 471
476 return 0; 472 return 0;
477} 473}
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 3249df5e0f17..b8e25c4624d2 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -548,7 +548,7 @@ static int ax_xmit(struct sk_buff *skb, struct net_device *dev)
548 } 548 }
549 549
550 printk(KERN_ERR "mkiss: %s: transmit timed out, %s?\n", dev->name, 550 printk(KERN_ERR "mkiss: %s: transmit timed out, %s?\n", dev->name,
551 (ax->tty->ops->chars_in_buffer(ax->tty) || ax->xleft) ? 551 (tty_chars_in_buffer(ax->tty) || ax->xleft) ?
552 "bad line quality" : "driver error"); 552 "bad line quality" : "driver error");
553 553
554 ax->xleft = 0; 554 ax->xleft = 0;
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 0960e69b2da4..e4fbefc8c82f 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -69,18 +69,20 @@ static void ri_tasklet(unsigned long dev)
69 struct net_device *_dev = (struct net_device *)dev; 69 struct net_device *_dev = (struct net_device *)dev;
70 struct ifb_private *dp = netdev_priv(_dev); 70 struct ifb_private *dp = netdev_priv(_dev);
71 struct net_device_stats *stats = &_dev->stats; 71 struct net_device_stats *stats = &_dev->stats;
72 struct netdev_queue *txq;
72 struct sk_buff *skb; 73 struct sk_buff *skb;
73 74
75 txq = netdev_get_tx_queue(_dev, 0);
74 dp->st_task_enter++; 76 dp->st_task_enter++;
75 if ((skb = skb_peek(&dp->tq)) == NULL) { 77 if ((skb = skb_peek(&dp->tq)) == NULL) {
76 dp->st_txq_refl_try++; 78 dp->st_txq_refl_try++;
77 if (netif_tx_trylock(_dev)) { 79 if (__netif_tx_trylock(txq)) {
78 dp->st_rxq_enter++; 80 dp->st_rxq_enter++;
79 while ((skb = skb_dequeue(&dp->rq)) != NULL) { 81 while ((skb = skb_dequeue(&dp->rq)) != NULL) {
80 skb_queue_tail(&dp->tq, skb); 82 skb_queue_tail(&dp->tq, skb);
81 dp->st_rx2tx_tran++; 83 dp->st_rx2tx_tran++;
82 } 84 }
83 netif_tx_unlock(_dev); 85 __netif_tx_unlock(txq);
84 } else { 86 } else {
85 /* reschedule */ 87 /* reschedule */
86 dp->st_rxq_notenter++; 88 dp->st_rxq_notenter++;
@@ -115,7 +117,7 @@ static void ri_tasklet(unsigned long dev)
115 BUG(); 117 BUG();
116 } 118 }
117 119
118 if (netif_tx_trylock(_dev)) { 120 if (__netif_tx_trylock(txq)) {
119 dp->st_rxq_check++; 121 dp->st_rxq_check++;
120 if ((skb = skb_peek(&dp->rq)) == NULL) { 122 if ((skb = skb_peek(&dp->rq)) == NULL) {
121 dp->tasklet_pending = 0; 123 dp->tasklet_pending = 0;
@@ -123,10 +125,10 @@ static void ri_tasklet(unsigned long dev)
123 netif_wake_queue(_dev); 125 netif_wake_queue(_dev);
124 } else { 126 } else {
125 dp->st_rxq_rsch++; 127 dp->st_rxq_rsch++;
126 netif_tx_unlock(_dev); 128 __netif_tx_unlock(txq);
127 goto resched; 129 goto resched;
128 } 130 }
129 netif_tx_unlock(_dev); 131 __netif_tx_unlock(txq);
130 } else { 132 } else {
131resched: 133resched:
132 dp->tasklet_pending = 1; 134 dp->tasklet_pending = 1;
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index e098f234770f..bb823acc7443 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -850,7 +850,7 @@ void igb_update_mc_addr_list_82575(struct e1000_hw *hw,
850 for (; mc_addr_count > 0; mc_addr_count--) { 850 for (; mc_addr_count > 0; mc_addr_count--) {
851 hash_value = igb_hash_mc_addr(hw, mc_addr_list); 851 hash_value = igb_hash_mc_addr(hw, mc_addr_list);
852 hw_dbg("Hash value = 0x%03X\n", hash_value); 852 hw_dbg("Hash value = 0x%03X\n", hash_value);
853 hw->mac.ops.mta_set(hw, hash_value); 853 igb_mta_set(hw, hash_value);
854 mc_addr_list += ETH_ALEN; 854 mc_addr_list += ETH_ALEN;
855 } 855 }
856} 856}
@@ -1136,6 +1136,12 @@ static s32 igb_setup_fiber_serdes_link_82575(struct e1000_hw *hw)
1136 E1000_PCS_LCTL_FORCE_LINK; /* Force Link */ 1136 E1000_PCS_LCTL_FORCE_LINK; /* Force Link */
1137 hw_dbg("Configuring Forced Link; PCS_LCTL = 0x%08X\n", reg); 1137 hw_dbg("Configuring Forced Link; PCS_LCTL = 0x%08X\n", reg);
1138 } 1138 }
1139
1140 if (hw->mac.type == e1000_82576) {
1141 reg |= E1000_PCS_LCTL_FORCE_FCTRL;
1142 igb_force_mac_fc(hw);
1143 }
1144
1139 wr32(E1000_PCS_LCTL, reg); 1145 wr32(E1000_PCS_LCTL, reg);
1140 1146
1141 return 0; 1147 return 0;
@@ -1232,70 +1238,6 @@ out:
1232} 1238}
1233 1239
1234/** 1240/**
1235 * igb_translate_register_82576 - Translate the proper register offset
1236 * @reg: e1000 register to be read
1237 *
1238 * Registers in 82576 are located in different offsets than other adapters
1239 * even though they function in the same manner. This function takes in
1240 * the name of the register to read and returns the correct offset for
1241 * 82576 silicon.
1242 **/
1243u32 igb_translate_register_82576(u32 reg)
1244{
1245 /*
1246 * Some of the Kawela registers are located at different
1247 * offsets than they are in older adapters.
1248 * Despite the difference in location, the registers
1249 * function in the same manner.
1250 */
1251 switch (reg) {
1252 case E1000_TDBAL(0):
1253 reg = 0x0E000;
1254 break;
1255 case E1000_TDBAH(0):
1256 reg = 0x0E004;
1257 break;
1258 case E1000_TDLEN(0):
1259 reg = 0x0E008;
1260 break;
1261 case E1000_TDH(0):
1262 reg = 0x0E010;
1263 break;
1264 case E1000_TDT(0):
1265 reg = 0x0E018;
1266 break;
1267 case E1000_TXDCTL(0):
1268 reg = 0x0E028;
1269 break;
1270 case E1000_RDBAL(0):
1271 reg = 0x0C000;
1272 break;
1273 case E1000_RDBAH(0):
1274 reg = 0x0C004;
1275 break;
1276 case E1000_RDLEN(0):
1277 reg = 0x0C008;
1278 break;
1279 case E1000_RDH(0):
1280 reg = 0x0C010;
1281 break;
1282 case E1000_RDT(0):
1283 reg = 0x0C018;
1284 break;
1285 case E1000_RXDCTL(0):
1286 reg = 0x0C028;
1287 break;
1288 case E1000_SRRCTL(0):
1289 reg = 0x0C00C;
1290 break;
1291 default:
1292 break;
1293 }
1294
1295 return reg;
1296}
1297
1298/**
1299 * igb_reset_init_script_82575 - Inits HW defaults after reset 1241 * igb_reset_init_script_82575 - Inits HW defaults after reset
1300 * @hw: pointer to the HW structure 1242 * @hw: pointer to the HW structure
1301 * 1243 *
diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h
index 2f848e578a24..c1928b5efe1f 100644
--- a/drivers/net/igb/e1000_82575.h
+++ b/drivers/net/igb/e1000_82575.h
@@ -28,7 +28,6 @@
28#ifndef _E1000_82575_H_ 28#ifndef _E1000_82575_H_
29#define _E1000_82575_H_ 29#define _E1000_82575_H_
30 30
31u32 igb_translate_register_82576(u32 reg);
32void igb_update_mc_addr_list_82575(struct e1000_hw*, u8*, u32, u32, u32); 31void igb_update_mc_addr_list_82575(struct e1000_hw*, u8*, u32, u32, u32);
33extern void igb_shutdown_fiber_serdes_link_82575(struct e1000_hw *hw); 32extern void igb_shutdown_fiber_serdes_link_82575(struct e1000_hw *hw);
34extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw); 33extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index afdba3c9073c..ce700689fb57 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -257,6 +257,7 @@
257#define E1000_PCS_LCTL_FDV_FULL 8 257#define E1000_PCS_LCTL_FDV_FULL 8
258#define E1000_PCS_LCTL_FSD 0x10 258#define E1000_PCS_LCTL_FSD 0x10
259#define E1000_PCS_LCTL_FORCE_LINK 0x20 259#define E1000_PCS_LCTL_FORCE_LINK 0x20
260#define E1000_PCS_LCTL_FORCE_FCTRL 0x80
260#define E1000_PCS_LCTL_AN_ENABLE 0x10000 261#define E1000_PCS_LCTL_AN_ENABLE 0x10000
261#define E1000_PCS_LCTL_AN_RESTART 0x20000 262#define E1000_PCS_LCTL_AN_RESTART 0x20000
262#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000 263#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
index 19fa4ee96f2e..a65ccc3095c3 100644
--- a/drivers/net/igb/e1000_hw.h
+++ b/drivers/net/igb/e1000_hw.h
@@ -420,7 +420,6 @@ struct e1000_mac_operations {
420 void (*rar_set)(struct e1000_hw *, u8 *, u32); 420 void (*rar_set)(struct e1000_hw *, u8 *, u32);
421 s32 (*read_mac_addr)(struct e1000_hw *); 421 s32 (*read_mac_addr)(struct e1000_hw *);
422 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *); 422 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
423 void (*mta_set)(struct e1000_hw *, u32);
424}; 423};
425 424
426struct e1000_phy_operations { 425struct e1000_phy_operations {
diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c
index 20408aa1f916..e18747c70bec 100644
--- a/drivers/net/igb/e1000_mac.c
+++ b/drivers/net/igb/e1000_mac.c
@@ -144,34 +144,6 @@ void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
144} 144}
145 145
146/** 146/**
147 * igb_init_rx_addrs - Initialize receive address's
148 * @hw: pointer to the HW structure
149 * @rar_count: receive address registers
150 *
151 * Setups the receive address registers by setting the base receive address
152 * register to the devices MAC address and clearing all the other receive
153 * address registers to 0.
154 **/
155void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
156{
157 u32 i;
158
159 /* Setup the receive address */
160 hw_dbg("Programming MAC Address into RAR[0]\n");
161
162 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
163
164 /* Zero out the other (rar_entry_count - 1) receive addresses */
165 hw_dbg("Clearing RAR[1-%u]\n", rar_count-1);
166 for (i = 1; i < rar_count; i++) {
167 array_wr32(E1000_RA, (i << 1), 0);
168 wrfl();
169 array_wr32(E1000_RA, ((i << 1) + 1), 0);
170 wrfl();
171 }
172}
173
174/**
175 * igb_check_alt_mac_addr - Check for alternate MAC addr 147 * igb_check_alt_mac_addr - Check for alternate MAC addr
176 * @hw: pointer to the HW structure 148 * @hw: pointer to the HW structure
177 * 149 *
@@ -271,7 +243,7 @@ void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
271 * current value is read, the new bit is OR'd in and the new value is 243 * current value is read, the new bit is OR'd in and the new value is
272 * written back into the register. 244 * written back into the register.
273 **/ 245 **/
274static void igb_mta_set(struct e1000_hw *hw, u32 hash_value) 246void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
275{ 247{
276 u32 hash_bit, hash_reg, mta; 248 u32 hash_bit, hash_reg, mta;
277 249
@@ -297,60 +269,6 @@ static void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
297} 269}
298 270
299/** 271/**
300 * igb_update_mc_addr_list - Update Multicast addresses
301 * @hw: pointer to the HW structure
302 * @mc_addr_list: array of multicast addresses to program
303 * @mc_addr_count: number of multicast addresses to program
304 * @rar_used_count: the first RAR register free to program
305 * @rar_count: total number of supported Receive Address Registers
306 *
307 * Updates the Receive Address Registers and Multicast Table Array.
308 * The caller must have a packed mc_addr_list of multicast addresses.
309 * The parameter rar_count will usually be hw->mac.rar_entry_count
310 * unless there are workarounds that change this.
311 **/
312void igb_update_mc_addr_list(struct e1000_hw *hw,
313 u8 *mc_addr_list, u32 mc_addr_count,
314 u32 rar_used_count, u32 rar_count)
315{
316 u32 hash_value;
317 u32 i;
318
319 /*
320 * Load the first set of multicast addresses into the exact
321 * filters (RAR). If there are not enough to fill the RAR
322 * array, clear the filters.
323 */
324 for (i = rar_used_count; i < rar_count; i++) {
325 if (mc_addr_count) {
326 hw->mac.ops.rar_set(hw, mc_addr_list, i);
327 mc_addr_count--;
328 mc_addr_list += ETH_ALEN;
329 } else {
330 array_wr32(E1000_RA, i << 1, 0);
331 wrfl();
332 array_wr32(E1000_RA, (i << 1) + 1, 0);
333 wrfl();
334 }
335 }
336
337 /* Clear the old settings from the MTA */
338 hw_dbg("Clearing MTA\n");
339 for (i = 0; i < hw->mac.mta_reg_count; i++) {
340 array_wr32(E1000_MTA, i, 0);
341 wrfl();
342 }
343
344 /* Load any remaining multicast addresses into the hash table. */
345 for (; mc_addr_count > 0; mc_addr_count--) {
346 hash_value = igb_hash_mc_addr(hw, mc_addr_list);
347 hw_dbg("Hash value = 0x%03X\n", hash_value);
348 igb_mta_set(hw, hash_value);
349 mc_addr_list += ETH_ALEN;
350 }
351}
352
353/**
354 * igb_hash_mc_addr - Generate a multicast hash value 272 * igb_hash_mc_addr - Generate a multicast hash value
355 * @hw: pointer to the HW structure 273 * @hw: pointer to the HW structure
356 * @mc_addr: pointer to a multicast address 274 * @mc_addr: pointer to a multicast address
diff --git a/drivers/net/igb/e1000_mac.h b/drivers/net/igb/e1000_mac.h
index dc2f8cce15e7..cbee6af7d912 100644
--- a/drivers/net/igb/e1000_mac.h
+++ b/drivers/net/igb/e1000_mac.h
@@ -51,9 +51,6 @@ s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
51 u16 *duplex); 51 u16 *duplex);
52s32 igb_id_led_init(struct e1000_hw *hw); 52s32 igb_id_led_init(struct e1000_hw *hw);
53s32 igb_led_off(struct e1000_hw *hw); 53s32 igb_led_off(struct e1000_hw *hw);
54void igb_update_mc_addr_list(struct e1000_hw *hw,
55 u8 *mc_addr_list, u32 mc_addr_count,
56 u32 rar_used_count, u32 rar_count);
57s32 igb_setup_link(struct e1000_hw *hw); 54s32 igb_setup_link(struct e1000_hw *hw);
58s32 igb_validate_mdi_setting(struct e1000_hw *hw); 55s32 igb_validate_mdi_setting(struct e1000_hw *hw);
59s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, 56s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
@@ -62,7 +59,7 @@ s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
62void igb_clear_hw_cntrs_base(struct e1000_hw *hw); 59void igb_clear_hw_cntrs_base(struct e1000_hw *hw);
63void igb_clear_vfta(struct e1000_hw *hw); 60void igb_clear_vfta(struct e1000_hw *hw);
64void igb_config_collision_dist(struct e1000_hw *hw); 61void igb_config_collision_dist(struct e1000_hw *hw);
65void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); 62void igb_mta_set(struct e1000_hw *hw, u32 hash_value);
66void igb_put_hw_semaphore(struct e1000_hw *hw); 63void igb_put_hw_semaphore(struct e1000_hw *hw);
67void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); 64void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
68s32 igb_check_alt_mac_addr(struct e1000_hw *hw); 65s32 igb_check_alt_mac_addr(struct e1000_hw *hw);
diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/igb/e1000_regs.h
index b95093d24c09..95523af26056 100644
--- a/drivers/net/igb/e1000_regs.h
+++ b/drivers/net/igb/e1000_regs.h
@@ -262,9 +262,6 @@
262#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) 262#define E1000_RETA(_i) (0x05C00 + ((_i) * 4))
263#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW Array */ 263#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW Array */
264 264
265#define E1000_REGISTER(a, reg) (((a)->mac.type < e1000_82576) \
266 ? reg : e1000_translate_register_82576(reg))
267
268#define wr32(reg, value) (writel(value, hw->hw_addr + reg)) 265#define wr32(reg, value) (writel(value, hw->hw_addr + reg))
269#define rd32(reg) (readl(hw->hw_addr + reg)) 266#define rd32(reg) (readl(hw->hw_addr + reg))
270#define wrfl() ((void)rd32(E1000_STATUS)) 267#define wrfl() ((void)rd32(E1000_STATUS))
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index b602c4dd0d14..8f66e15ec8d6 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -311,7 +311,7 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
311 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); 311 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
312 break; 312 break;
313 case e1000_82576: 313 case e1000_82576:
314 /* Kawela uses a table-based method for assigning vectors. 314 /* The 82576 uses a table-based method for assigning vectors.
315 Each queue has a single entry in the table to which we write 315 Each queue has a single entry in the table to which we write
316 a vector number along with a "valid" bit. Sadly, the layout 316 a vector number along with a "valid" bit. Sadly, the layout
317 of the table is somewhat counterintuitive. */ 317 of the table is somewhat counterintuitive. */
@@ -720,28 +720,6 @@ static void igb_get_hw_control(struct igb_adapter *adapter)
720 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 720 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
721} 721}
722 722
723static void igb_init_manageability(struct igb_adapter *adapter)
724{
725 struct e1000_hw *hw = &adapter->hw;
726
727 if (adapter->en_mng_pt) {
728 u32 manc2h = rd32(E1000_MANC2H);
729 u32 manc = rd32(E1000_MANC);
730
731 /* enable receiving management packets to the host */
732 /* this will probably generate destination unreachable messages
733 * from the host OS, but the packets will be handled on SMBUS */
734 manc |= E1000_MANC_EN_MNG2HOST;
735#define E1000_MNG2HOST_PORT_623 (1 << 5)
736#define E1000_MNG2HOST_PORT_664 (1 << 6)
737 manc2h |= E1000_MNG2HOST_PORT_623;
738 manc2h |= E1000_MNG2HOST_PORT_664;
739 wr32(E1000_MANC2H, manc2h);
740
741 wr32(E1000_MANC, manc);
742 }
743}
744
745/** 723/**
746 * igb_configure - configure the hardware for RX and TX 724 * igb_configure - configure the hardware for RX and TX
747 * @adapter: private board structure 725 * @adapter: private board structure
@@ -755,7 +733,6 @@ static void igb_configure(struct igb_adapter *adapter)
755 igb_set_multi(netdev); 733 igb_set_multi(netdev);
756 734
757 igb_restore_vlan(adapter); 735 igb_restore_vlan(adapter);
758 igb_init_manageability(adapter);
759 736
760 igb_configure_tx(adapter); 737 igb_configure_tx(adapter);
761 igb_setup_rctl(adapter); 738 igb_setup_rctl(adapter);
@@ -1372,7 +1349,8 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1372 1349
1373 unregister_netdev(netdev); 1350 unregister_netdev(netdev);
1374 1351
1375 if (!igb_check_reset_block(&adapter->hw)) 1352 if (adapter->hw.phy.ops.reset_phy &&
1353 !igb_check_reset_block(&adapter->hw))
1376 adapter->hw.phy.ops.reset_phy(&adapter->hw); 1354 adapter->hw.phy.ops.reset_phy(&adapter->hw);
1377 1355
1378 igb_remove_device(&adapter->hw); 1356 igb_remove_device(&adapter->hw);
@@ -4523,8 +4501,6 @@ static void igb_io_resume(struct pci_dev *pdev)
4523 struct net_device *netdev = pci_get_drvdata(pdev); 4501 struct net_device *netdev = pci_get_drvdata(pdev);
4524 struct igb_adapter *adapter = netdev_priv(netdev); 4502 struct igb_adapter *adapter = netdev_priv(netdev);
4525 4503
4526 igb_init_manageability(adapter);
4527
4528 if (netif_running(netdev)) { 4504 if (netif_running(netdev)) {
4529 if (igb_up(adapter)) { 4505 if (igb_up(adapter)) {
4530 dev_err(&pdev->dev, "igb_up failed after reset\n"); 4506 dev_err(&pdev->dev, "igb_up failed after reset\n");
diff --git a/drivers/net/irda/act200l-sir.c b/drivers/net/irda/act200l-sir.c
index d8b89c74aabd..37ab8c855719 100644
--- a/drivers/net/irda/act200l-sir.c
+++ b/drivers/net/irda/act200l-sir.c
@@ -107,7 +107,7 @@ static int act200l_open(struct sir_dev *dev)
107{ 107{
108 struct qos_info *qos = &dev->qos; 108 struct qos_info *qos = &dev->qos;
109 109
110 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 110 IRDA_DEBUG(2, "%s()\n", __func__ );
111 111
112 /* Power on the dongle */ 112 /* Power on the dongle */
113 sirdev_set_dtr_rts(dev, TRUE, TRUE); 113 sirdev_set_dtr_rts(dev, TRUE, TRUE);
@@ -124,7 +124,7 @@ static int act200l_open(struct sir_dev *dev)
124 124
125static int act200l_close(struct sir_dev *dev) 125static int act200l_close(struct sir_dev *dev)
126{ 126{
127 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 127 IRDA_DEBUG(2, "%s()\n", __func__ );
128 128
129 /* Power off the dongle */ 129 /* Power off the dongle */
130 sirdev_set_dtr_rts(dev, FALSE, FALSE); 130 sirdev_set_dtr_rts(dev, FALSE, FALSE);
@@ -143,7 +143,7 @@ static int act200l_change_speed(struct sir_dev *dev, unsigned speed)
143 u8 control[3]; 143 u8 control[3];
144 int ret = 0; 144 int ret = 0;
145 145
146 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 146 IRDA_DEBUG(2, "%s()\n", __func__ );
147 147
148 /* Clear DTR and set RTS to enter command mode */ 148 /* Clear DTR and set RTS to enter command mode */
149 sirdev_set_dtr_rts(dev, FALSE, TRUE); 149 sirdev_set_dtr_rts(dev, FALSE, TRUE);
@@ -212,7 +212,7 @@ static int act200l_reset(struct sir_dev *dev)
212 }; 212 };
213 int ret = 0; 213 int ret = 0;
214 214
215 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 215 IRDA_DEBUG(2, "%s()\n", __func__ );
216 216
217 switch (state) { 217 switch (state) {
218 case SIRDEV_STATE_DONGLE_RESET: 218 case SIRDEV_STATE_DONGLE_RESET:
@@ -240,7 +240,7 @@ static int act200l_reset(struct sir_dev *dev)
240 dev->speed = 9600; 240 dev->speed = 9600;
241 break; 241 break;
242 default: 242 default:
243 IRDA_ERROR("%s(), unknown state %d\n", __FUNCTION__, state); 243 IRDA_ERROR("%s(), unknown state %d\n", __func__, state);
244 ret = -1; 244 ret = -1;
245 break; 245 break;
246 } 246 }
diff --git a/drivers/net/irda/actisys-sir.c b/drivers/net/irda/actisys-sir.c
index 736d2473b7e1..50b2141a6103 100644
--- a/drivers/net/irda/actisys-sir.c
+++ b/drivers/net/irda/actisys-sir.c
@@ -165,7 +165,7 @@ static int actisys_change_speed(struct sir_dev *dev, unsigned speed)
165 int ret = 0; 165 int ret = 0;
166 int i = 0; 166 int i = 0;
167 167
168 IRDA_DEBUG(4, "%s(), speed=%d (was %d)\n", __FUNCTION__, 168 IRDA_DEBUG(4, "%s(), speed=%d (was %d)\n", __func__,
169 speed, dev->speed); 169 speed, dev->speed);
170 170
171 /* dongle was already resetted from irda_request state machine, 171 /* dongle was already resetted from irda_request state machine,
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index 083b0dd70fef..2ff181861d2d 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -152,7 +152,7 @@ static int __init ali_ircc_init(void)
152 int reg, revision; 152 int reg, revision;
153 int i = 0; 153 int i = 0;
154 154
155 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__); 155 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
156 156
157 ret = platform_driver_register(&ali_ircc_driver); 157 ret = platform_driver_register(&ali_ircc_driver);
158 if (ret) { 158 if (ret) {
@@ -166,7 +166,7 @@ static int __init ali_ircc_init(void)
166 /* Probe for all the ALi chipsets we know about */ 166 /* Probe for all the ALi chipsets we know about */
167 for (chip= chips; chip->name; chip++, i++) 167 for (chip= chips; chip->name; chip++, i++)
168 { 168 {
169 IRDA_DEBUG(2, "%s(), Probing for %s ...\n", __FUNCTION__, chip->name); 169 IRDA_DEBUG(2, "%s(), Probing for %s ...\n", __func__, chip->name);
170 170
171 /* Try all config registers for this chip */ 171 /* Try all config registers for this chip */
172 for (cfg=0; cfg<2; cfg++) 172 for (cfg=0; cfg<2; cfg++)
@@ -196,11 +196,11 @@ static int __init ali_ircc_init(void)
196 196
197 if (reg == chip->cid_value) 197 if (reg == chip->cid_value)
198 { 198 {
199 IRDA_DEBUG(2, "%s(), Chip found at 0x%03x\n", __FUNCTION__, cfg_base); 199 IRDA_DEBUG(2, "%s(), Chip found at 0x%03x\n", __func__, cfg_base);
200 200
201 outb(0x1F, cfg_base); 201 outb(0x1F, cfg_base);
202 revision = inb(cfg_base+1); 202 revision = inb(cfg_base+1);
203 IRDA_DEBUG(2, "%s(), Found %s chip, revision=%d\n", __FUNCTION__, 203 IRDA_DEBUG(2, "%s(), Found %s chip, revision=%d\n", __func__,
204 chip->name, revision); 204 chip->name, revision);
205 205
206 /* 206 /*
@@ -223,14 +223,14 @@ static int __init ali_ircc_init(void)
223 } 223 }
224 else 224 else
225 { 225 {
226 IRDA_DEBUG(2, "%s(), No %s chip at 0x%03x\n", __FUNCTION__, chip->name, cfg_base); 226 IRDA_DEBUG(2, "%s(), No %s chip at 0x%03x\n", __func__, chip->name, cfg_base);
227 } 227 }
228 /* Exit configuration */ 228 /* Exit configuration */
229 outb(0xbb, cfg_base); 229 outb(0xbb, cfg_base);
230 } 230 }
231 } 231 }
232 232
233 IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __FUNCTION__); 233 IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__);
234 234
235 if (ret) 235 if (ret)
236 platform_driver_unregister(&ali_ircc_driver); 236 platform_driver_unregister(&ali_ircc_driver);
@@ -248,7 +248,7 @@ static void __exit ali_ircc_cleanup(void)
248{ 248{
249 int i; 249 int i;
250 250
251 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__); 251 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
252 252
253 for (i=0; i < ARRAY_SIZE(dev_self); i++) { 253 for (i=0; i < ARRAY_SIZE(dev_self); i++) {
254 if (dev_self[i]) 254 if (dev_self[i])
@@ -257,7 +257,7 @@ static void __exit ali_ircc_cleanup(void)
257 257
258 platform_driver_unregister(&ali_ircc_driver); 258 platform_driver_unregister(&ali_ircc_driver);
259 259
260 IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __FUNCTION__); 260 IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__);
261} 261}
262 262
263/* 263/*
@@ -273,11 +273,11 @@ static int ali_ircc_open(int i, chipio_t *info)
273 int dongle_id; 273 int dongle_id;
274 int err; 274 int err;
275 275
276 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__); 276 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
277 277
278 if (i >= ARRAY_SIZE(dev_self)) { 278 if (i >= ARRAY_SIZE(dev_self)) {
279 IRDA_ERROR("%s(), maximum number of supported chips reached!\n", 279 IRDA_ERROR("%s(), maximum number of supported chips reached!\n",
280 __FUNCTION__); 280 __func__);
281 return -ENOMEM; 281 return -ENOMEM;
282 } 282 }
283 283
@@ -288,7 +288,7 @@ static int ali_ircc_open(int i, chipio_t *info)
288 dev = alloc_irdadev(sizeof(*self)); 288 dev = alloc_irdadev(sizeof(*self));
289 if (dev == NULL) { 289 if (dev == NULL) {
290 IRDA_ERROR("%s(), can't allocate memory for control block!\n", 290 IRDA_ERROR("%s(), can't allocate memory for control block!\n",
291 __FUNCTION__); 291 __func__);
292 return -ENOMEM; 292 return -ENOMEM;
293 } 293 }
294 294
@@ -312,7 +312,7 @@ static int ali_ircc_open(int i, chipio_t *info)
312 /* Reserve the ioports that we need */ 312 /* Reserve the ioports that we need */
313 if (!request_region(self->io.fir_base, self->io.fir_ext, 313 if (!request_region(self->io.fir_base, self->io.fir_ext,
314 ALI_IRCC_DRIVER_NAME)) { 314 ALI_IRCC_DRIVER_NAME)) {
315 IRDA_WARNING("%s(), can't get iobase of 0x%03x\n", __FUNCTION__, 315 IRDA_WARNING("%s(), can't get iobase of 0x%03x\n", __func__,
316 self->io.fir_base); 316 self->io.fir_base);
317 err = -ENODEV; 317 err = -ENODEV;
318 goto err_out1; 318 goto err_out1;
@@ -370,19 +370,19 @@ static int ali_ircc_open(int i, chipio_t *info)
370 370
371 err = register_netdev(dev); 371 err = register_netdev(dev);
372 if (err) { 372 if (err) {
373 IRDA_ERROR("%s(), register_netdev() failed!\n", __FUNCTION__); 373 IRDA_ERROR("%s(), register_netdev() failed!\n", __func__);
374 goto err_out4; 374 goto err_out4;
375 } 375 }
376 IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name); 376 IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name);
377 377
378 /* Check dongle id */ 378 /* Check dongle id */
379 dongle_id = ali_ircc_read_dongle_id(i, info); 379 dongle_id = ali_ircc_read_dongle_id(i, info);
380 IRDA_MESSAGE("%s(), %s, Found dongle: %s\n", __FUNCTION__, 380 IRDA_MESSAGE("%s(), %s, Found dongle: %s\n", __func__,
381 ALI_IRCC_DRIVER_NAME, dongle_types[dongle_id]); 381 ALI_IRCC_DRIVER_NAME, dongle_types[dongle_id]);
382 382
383 self->io.dongle_id = dongle_id; 383 self->io.dongle_id = dongle_id;
384 384
385 IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __FUNCTION__); 385 IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__);
386 386
387 return 0; 387 return 0;
388 388
@@ -411,7 +411,7 @@ static int __exit ali_ircc_close(struct ali_ircc_cb *self)
411{ 411{
412 int iobase; 412 int iobase;
413 413
414 IRDA_DEBUG(4, "%s(), ---------------- Start ----------------\n", __FUNCTION__); 414 IRDA_DEBUG(4, "%s(), ---------------- Start ----------------\n", __func__);
415 415
416 IRDA_ASSERT(self != NULL, return -1;); 416 IRDA_ASSERT(self != NULL, return -1;);
417 417
@@ -421,7 +421,7 @@ static int __exit ali_ircc_close(struct ali_ircc_cb *self)
421 unregister_netdev(self->netdev); 421 unregister_netdev(self->netdev);
422 422
423 /* Release the PORT that this driver is using */ 423 /* Release the PORT that this driver is using */
424 IRDA_DEBUG(4, "%s(), Releasing Region %03x\n", __FUNCTION__, self->io.fir_base); 424 IRDA_DEBUG(4, "%s(), Releasing Region %03x\n", __func__, self->io.fir_base);
425 release_region(self->io.fir_base, self->io.fir_ext); 425 release_region(self->io.fir_base, self->io.fir_ext);
426 426
427 if (self->tx_buff.head) 427 if (self->tx_buff.head)
@@ -435,7 +435,7 @@ static int __exit ali_ircc_close(struct ali_ircc_cb *self)
435 dev_self[self->index] = NULL; 435 dev_self[self->index] = NULL;
436 free_netdev(self->netdev); 436 free_netdev(self->netdev);
437 437
438 IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __FUNCTION__); 438 IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__);
439 439
440 return 0; 440 return 0;
441} 441}
@@ -478,7 +478,7 @@ static int ali_ircc_probe_53(ali_chip_t *chip, chipio_t *info)
478 int cfg_base = info->cfg_base; 478 int cfg_base = info->cfg_base;
479 int hi, low, reg; 479 int hi, low, reg;
480 480
481 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__); 481 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
482 482
483 /* Enter Configuration */ 483 /* Enter Configuration */
484 outb(chip->entr1, cfg_base); 484 outb(chip->entr1, cfg_base);
@@ -497,13 +497,13 @@ static int ali_ircc_probe_53(ali_chip_t *chip, chipio_t *info)
497 497
498 info->sir_base = info->fir_base; 498 info->sir_base = info->fir_base;
499 499
500 IRDA_DEBUG(2, "%s(), probing fir_base=0x%03x\n", __FUNCTION__, info->fir_base); 500 IRDA_DEBUG(2, "%s(), probing fir_base=0x%03x\n", __func__, info->fir_base);
501 501
502 /* Read IRQ control register */ 502 /* Read IRQ control register */
503 outb(0x70, cfg_base); 503 outb(0x70, cfg_base);
504 reg = inb(cfg_base+1); 504 reg = inb(cfg_base+1);
505 info->irq = reg & 0x0f; 505 info->irq = reg & 0x0f;
506 IRDA_DEBUG(2, "%s(), probing irq=%d\n", __FUNCTION__, info->irq); 506 IRDA_DEBUG(2, "%s(), probing irq=%d\n", __func__, info->irq);
507 507
508 /* Read DMA channel */ 508 /* Read DMA channel */
509 outb(0x74, cfg_base); 509 outb(0x74, cfg_base);
@@ -511,26 +511,26 @@ static int ali_ircc_probe_53(ali_chip_t *chip, chipio_t *info)
511 info->dma = reg & 0x07; 511 info->dma = reg & 0x07;
512 512
513 if(info->dma == 0x04) 513 if(info->dma == 0x04)
514 IRDA_WARNING("%s(), No DMA channel assigned !\n", __FUNCTION__); 514 IRDA_WARNING("%s(), No DMA channel assigned !\n", __func__);
515 else 515 else
516 IRDA_DEBUG(2, "%s(), probing dma=%d\n", __FUNCTION__, info->dma); 516 IRDA_DEBUG(2, "%s(), probing dma=%d\n", __func__, info->dma);
517 517
518 /* Read Enabled Status */ 518 /* Read Enabled Status */
519 outb(0x30, cfg_base); 519 outb(0x30, cfg_base);
520 reg = inb(cfg_base+1); 520 reg = inb(cfg_base+1);
521 info->enabled = (reg & 0x80) && (reg & 0x01); 521 info->enabled = (reg & 0x80) && (reg & 0x01);
522 IRDA_DEBUG(2, "%s(), probing enabled=%d\n", __FUNCTION__, info->enabled); 522 IRDA_DEBUG(2, "%s(), probing enabled=%d\n", __func__, info->enabled);
523 523
524 /* Read Power Status */ 524 /* Read Power Status */
525 outb(0x22, cfg_base); 525 outb(0x22, cfg_base);
526 reg = inb(cfg_base+1); 526 reg = inb(cfg_base+1);
527 info->suspended = (reg & 0x20); 527 info->suspended = (reg & 0x20);
528 IRDA_DEBUG(2, "%s(), probing suspended=%d\n", __FUNCTION__, info->suspended); 528 IRDA_DEBUG(2, "%s(), probing suspended=%d\n", __func__, info->suspended);
529 529
530 /* Exit configuration */ 530 /* Exit configuration */
531 outb(0xbb, cfg_base); 531 outb(0xbb, cfg_base);
532 532
533 IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __FUNCTION__); 533 IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__);
534 534
535 return 0; 535 return 0;
536} 536}
@@ -548,7 +548,7 @@ static int ali_ircc_setup(chipio_t *info)
548 int version; 548 int version;
549 int iobase = info->fir_base; 549 int iobase = info->fir_base;
550 550
551 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__); 551 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
552 552
553 /* Locking comments : 553 /* Locking comments :
554 * Most operations here need to be protected. We are called before 554 * Most operations here need to be protected. We are called before
@@ -609,7 +609,7 @@ static int ali_ircc_setup(chipio_t *info)
609 // outb(UART_IER_RDI, iobase+UART_IER); //benjamin 2000/11/23 01:25PM 609 // outb(UART_IER_RDI, iobase+UART_IER); //benjamin 2000/11/23 01:25PM
610 // Turn on the interrupts in ali_ircc_net_open 610 // Turn on the interrupts in ali_ircc_net_open
611 611
612 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__); 612 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__);
613 613
614 return 0; 614 return 0;
615} 615}
@@ -626,7 +626,7 @@ static int ali_ircc_read_dongle_id (int i, chipio_t *info)
626 int dongle_id, reg; 626 int dongle_id, reg;
627 int cfg_base = info->cfg_base; 627 int cfg_base = info->cfg_base;
628 628
629 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__); 629 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
630 630
631 /* Enter Configuration */ 631 /* Enter Configuration */
632 outb(chips[i].entr1, cfg_base); 632 outb(chips[i].entr1, cfg_base);
@@ -640,13 +640,13 @@ static int ali_ircc_read_dongle_id (int i, chipio_t *info)
640 outb(0xf0, cfg_base); 640 outb(0xf0, cfg_base);
641 reg = inb(cfg_base+1); 641 reg = inb(cfg_base+1);
642 dongle_id = ((reg>>6)&0x02) | ((reg>>5)&0x01); 642 dongle_id = ((reg>>6)&0x02) | ((reg>>5)&0x01);
643 IRDA_DEBUG(2, "%s(), probing dongle_id=%d, dongle_types=%s\n", __FUNCTION__, 643 IRDA_DEBUG(2, "%s(), probing dongle_id=%d, dongle_types=%s\n", __func__,
644 dongle_id, dongle_types[dongle_id]); 644 dongle_id, dongle_types[dongle_id]);
645 645
646 /* Exit configuration */ 646 /* Exit configuration */
647 outb(0xbb, cfg_base); 647 outb(0xbb, cfg_base);
648 648
649 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__); 649 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__);
650 650
651 return dongle_id; 651 return dongle_id;
652} 652}
@@ -663,7 +663,7 @@ static irqreturn_t ali_ircc_interrupt(int irq, void *dev_id)
663 struct ali_ircc_cb *self; 663 struct ali_ircc_cb *self;
664 int ret; 664 int ret;
665 665
666 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__); 666 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
667 667
668 self = dev->priv; 668 self = dev->priv;
669 669
@@ -677,7 +677,7 @@ static irqreturn_t ali_ircc_interrupt(int irq, void *dev_id)
677 677
678 spin_unlock(&self->lock); 678 spin_unlock(&self->lock);
679 679
680 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__); 680 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__);
681 return ret; 681 return ret;
682} 682}
683/* 683/*
@@ -691,7 +691,7 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
691 __u8 eir, OldMessageCount; 691 __u8 eir, OldMessageCount;
692 int iobase, tmp; 692 int iobase, tmp;
693 693
694 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __FUNCTION__); 694 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__);
695 695
696 iobase = self->io.fir_base; 696 iobase = self->io.fir_base;
697 697
@@ -704,10 +704,10 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
704 //self->ier = inb(iobase+FIR_IER); 2000/12/1 04:32PM 704 //self->ier = inb(iobase+FIR_IER); 2000/12/1 04:32PM
705 eir = self->InterruptID & self->ier; /* Mask out the interesting ones */ 705 eir = self->InterruptID & self->ier; /* Mask out the interesting ones */
706 706
707 IRDA_DEBUG(1, "%s(), self->InterruptID = %x\n", __FUNCTION__,self->InterruptID); 707 IRDA_DEBUG(1, "%s(), self->InterruptID = %x\n", __func__,self->InterruptID);
708 IRDA_DEBUG(1, "%s(), self->LineStatus = %x\n", __FUNCTION__,self->LineStatus); 708 IRDA_DEBUG(1, "%s(), self->LineStatus = %x\n", __func__,self->LineStatus);
709 IRDA_DEBUG(1, "%s(), self->ier = %x\n", __FUNCTION__,self->ier); 709 IRDA_DEBUG(1, "%s(), self->ier = %x\n", __func__,self->ier);
710 IRDA_DEBUG(1, "%s(), eir = %x\n", __FUNCTION__,eir); 710 IRDA_DEBUG(1, "%s(), eir = %x\n", __func__,eir);
711 711
712 /* Disable interrupts */ 712 /* Disable interrupts */
713 SetCOMInterrupts(self, FALSE); 713 SetCOMInterrupts(self, FALSE);
@@ -718,7 +718,7 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
718 { 718 {
719 if (self->io.direction == IO_XMIT) /* TX */ 719 if (self->io.direction == IO_XMIT) /* TX */
720 { 720 {
721 IRDA_DEBUG(1, "%s(), ******* IIR_EOM (Tx) *******\n", __FUNCTION__); 721 IRDA_DEBUG(1, "%s(), ******* IIR_EOM (Tx) *******\n", __func__);
722 722
723 if(ali_ircc_dma_xmit_complete(self)) 723 if(ali_ircc_dma_xmit_complete(self))
724 { 724 {
@@ -737,23 +737,23 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
737 } 737 }
738 else /* RX */ 738 else /* RX */
739 { 739 {
740 IRDA_DEBUG(1, "%s(), ******* IIR_EOM (Rx) *******\n", __FUNCTION__); 740 IRDA_DEBUG(1, "%s(), ******* IIR_EOM (Rx) *******\n", __func__);
741 741
742 if(OldMessageCount > ((self->LineStatus+1) & 0x07)) 742 if(OldMessageCount > ((self->LineStatus+1) & 0x07))
743 { 743 {
744 self->rcvFramesOverflow = TRUE; 744 self->rcvFramesOverflow = TRUE;
745 IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE ******** \n", __FUNCTION__); 745 IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE ******** \n", __func__);
746 } 746 }
747 747
748 if (ali_ircc_dma_receive_complete(self)) 748 if (ali_ircc_dma_receive_complete(self))
749 { 749 {
750 IRDA_DEBUG(1, "%s(), ******* receive complete ******** \n", __FUNCTION__); 750 IRDA_DEBUG(1, "%s(), ******* receive complete ******** \n", __func__);
751 751
752 self->ier = IER_EOM; 752 self->ier = IER_EOM;
753 } 753 }
754 else 754 else
755 { 755 {
756 IRDA_DEBUG(1, "%s(), ******* Not receive complete ******** \n", __FUNCTION__); 756 IRDA_DEBUG(1, "%s(), ******* Not receive complete ******** \n", __func__);
757 757
758 self->ier = IER_EOM | IER_TIMER; 758 self->ier = IER_EOM | IER_TIMER;
759 } 759 }
@@ -766,7 +766,7 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
766 if(OldMessageCount > ((self->LineStatus+1) & 0x07)) 766 if(OldMessageCount > ((self->LineStatus+1) & 0x07))
767 { 767 {
768 self->rcvFramesOverflow = TRUE; 768 self->rcvFramesOverflow = TRUE;
769 IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE ******* \n", __FUNCTION__); 769 IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE ******* \n", __func__);
770 } 770 }
771 /* Disable Timer */ 771 /* Disable Timer */
772 switch_bank(iobase, BANK1); 772 switch_bank(iobase, BANK1);
@@ -798,7 +798,7 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
798 /* Restore Interrupt */ 798 /* Restore Interrupt */
799 SetCOMInterrupts(self, TRUE); 799 SetCOMInterrupts(self, TRUE);
800 800
801 IRDA_DEBUG(1, "%s(), ----------------- End ---------------\n", __FUNCTION__); 801 IRDA_DEBUG(1, "%s(), ----------------- End ---------------\n", __func__);
802 return IRQ_RETVAL(eir); 802 return IRQ_RETVAL(eir);
803} 803}
804 804
@@ -813,7 +813,7 @@ static irqreturn_t ali_ircc_sir_interrupt(struct ali_ircc_cb *self)
813 int iobase; 813 int iobase;
814 int iir, lsr; 814 int iir, lsr;
815 815
816 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__); 816 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
817 817
818 iobase = self->io.sir_base; 818 iobase = self->io.sir_base;
819 819
@@ -822,13 +822,13 @@ static irqreturn_t ali_ircc_sir_interrupt(struct ali_ircc_cb *self)
822 /* Clear interrupt */ 822 /* Clear interrupt */
823 lsr = inb(iobase+UART_LSR); 823 lsr = inb(iobase+UART_LSR);
824 824
825 IRDA_DEBUG(4, "%s(), iir=%02x, lsr=%02x, iobase=%#x\n", __FUNCTION__, 825 IRDA_DEBUG(4, "%s(), iir=%02x, lsr=%02x, iobase=%#x\n", __func__,
826 iir, lsr, iobase); 826 iir, lsr, iobase);
827 827
828 switch (iir) 828 switch (iir)
829 { 829 {
830 case UART_IIR_RLSI: 830 case UART_IIR_RLSI:
831 IRDA_DEBUG(2, "%s(), RLSI\n", __FUNCTION__); 831 IRDA_DEBUG(2, "%s(), RLSI\n", __func__);
832 break; 832 break;
833 case UART_IIR_RDI: 833 case UART_IIR_RDI:
834 /* Receive interrupt */ 834 /* Receive interrupt */
@@ -842,14 +842,14 @@ static irqreturn_t ali_ircc_sir_interrupt(struct ali_ircc_cb *self)
842 } 842 }
843 break; 843 break;
844 default: 844 default:
845 IRDA_DEBUG(0, "%s(), unhandled IIR=%#x\n", __FUNCTION__, iir); 845 IRDA_DEBUG(0, "%s(), unhandled IIR=%#x\n", __func__, iir);
846 break; 846 break;
847 } 847 }
848 848
849 } 849 }
850 850
851 851
852 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__); 852 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__);
853 853
854 return IRQ_RETVAL(iir); 854 return IRQ_RETVAL(iir);
855} 855}
@@ -866,7 +866,7 @@ static void ali_ircc_sir_receive(struct ali_ircc_cb *self)
866 int boguscount = 0; 866 int boguscount = 0;
867 int iobase; 867 int iobase;
868 868
869 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__); 869 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
870 IRDA_ASSERT(self != NULL, return;); 870 IRDA_ASSERT(self != NULL, return;);
871 871
872 iobase = self->io.sir_base; 872 iobase = self->io.sir_base;
@@ -881,12 +881,12 @@ static void ali_ircc_sir_receive(struct ali_ircc_cb *self)
881 881
882 /* Make sure we don't stay here too long */ 882 /* Make sure we don't stay here too long */
883 if (boguscount++ > 32) { 883 if (boguscount++ > 32) {
884 IRDA_DEBUG(2,"%s(), breaking!\n", __FUNCTION__); 884 IRDA_DEBUG(2,"%s(), breaking!\n", __func__);
885 break; 885 break;
886 } 886 }
887 } while (inb(iobase+UART_LSR) & UART_LSR_DR); 887 } while (inb(iobase+UART_LSR) & UART_LSR_DR);
888 888
889 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ ); 889 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
890} 890}
891 891
892/* 892/*
@@ -903,7 +903,7 @@ static void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self)
903 903
904 IRDA_ASSERT(self != NULL, return;); 904 IRDA_ASSERT(self != NULL, return;);
905 905
906 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__ ); 906 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ );
907 907
908 iobase = self->io.sir_base; 908 iobase = self->io.sir_base;
909 909
@@ -922,16 +922,16 @@ static void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self)
922 { 922 {
923 /* We must wait until all data are gone */ 923 /* We must wait until all data are gone */
924 while(!(inb(iobase+UART_LSR) & UART_LSR_TEMT)) 924 while(!(inb(iobase+UART_LSR) & UART_LSR_TEMT))
925 IRDA_DEBUG(1, "%s(), UART_LSR_THRE\n", __FUNCTION__ ); 925 IRDA_DEBUG(1, "%s(), UART_LSR_THRE\n", __func__ );
926 926
927 IRDA_DEBUG(1, "%s(), Changing speed! self->new_speed = %d\n", __FUNCTION__ , self->new_speed); 927 IRDA_DEBUG(1, "%s(), Changing speed! self->new_speed = %d\n", __func__ , self->new_speed);
928 ali_ircc_change_speed(self, self->new_speed); 928 ali_ircc_change_speed(self, self->new_speed);
929 self->new_speed = 0; 929 self->new_speed = 0;
930 930
931 // benjamin 2000/11/10 06:32PM 931 // benjamin 2000/11/10 06:32PM
932 if (self->io.speed > 115200) 932 if (self->io.speed > 115200)
933 { 933 {
934 IRDA_DEBUG(2, "%s(), ali_ircc_change_speed from UART_LSR_TEMT \n", __FUNCTION__ ); 934 IRDA_DEBUG(2, "%s(), ali_ircc_change_speed from UART_LSR_TEMT \n", __func__ );
935 935
936 self->ier = IER_EOM; 936 self->ier = IER_EOM;
937 // SetCOMInterrupts(self, TRUE); 937 // SetCOMInterrupts(self, TRUE);
@@ -949,7 +949,7 @@ static void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self)
949 outb(UART_IER_RDI, iobase+UART_IER); 949 outb(UART_IER_RDI, iobase+UART_IER);
950 } 950 }
951 951
952 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ ); 952 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
953} 953}
954 954
955static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud) 955static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud)
@@ -957,9 +957,9 @@ static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud)
957 struct net_device *dev = self->netdev; 957 struct net_device *dev = self->netdev;
958 int iobase; 958 int iobase;
959 959
960 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __FUNCTION__ ); 960 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
961 961
962 IRDA_DEBUG(2, "%s(), setting speed = %d \n", __FUNCTION__ , baud); 962 IRDA_DEBUG(2, "%s(), setting speed = %d \n", __func__ , baud);
963 963
964 /* This function *must* be called with irq off and spin-lock. 964 /* This function *must* be called with irq off and spin-lock.
965 * - Jean II */ 965 * - Jean II */
@@ -998,7 +998,7 @@ static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud)
998 998
999 netif_wake_queue(self->netdev); 999 netif_wake_queue(self->netdev);
1000 1000
1001 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ ); 1001 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
1002} 1002}
1003 1003
1004static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud) 1004static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud)
@@ -1008,14 +1008,14 @@ static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud)
1008 struct ali_ircc_cb *self = (struct ali_ircc_cb *) priv; 1008 struct ali_ircc_cb *self = (struct ali_ircc_cb *) priv;
1009 struct net_device *dev; 1009 struct net_device *dev;
1010 1010
1011 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __FUNCTION__ ); 1011 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
1012 1012
1013 IRDA_ASSERT(self != NULL, return;); 1013 IRDA_ASSERT(self != NULL, return;);
1014 1014
1015 dev = self->netdev; 1015 dev = self->netdev;
1016 iobase = self->io.fir_base; 1016 iobase = self->io.fir_base;
1017 1017
1018 IRDA_DEBUG(1, "%s(), self->io.speed = %d, change to speed = %d\n", __FUNCTION__ ,self->io.speed,baud); 1018 IRDA_DEBUG(1, "%s(), self->io.speed = %d, change to speed = %d\n", __func__ ,self->io.speed,baud);
1019 1019
1020 /* Come from SIR speed */ 1020 /* Come from SIR speed */
1021 if(self->io.speed <=115200) 1021 if(self->io.speed <=115200)
@@ -1029,7 +1029,7 @@ static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud)
1029 // Set Dongle Speed mode 1029 // Set Dongle Speed mode
1030 ali_ircc_change_dongle_speed(self, baud); 1030 ali_ircc_change_dongle_speed(self, baud);
1031 1031
1032 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ ); 1032 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
1033} 1033}
1034 1034
1035/* 1035/*
@@ -1047,9 +1047,9 @@ static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed)
1047 int lcr; /* Line control reg */ 1047 int lcr; /* Line control reg */
1048 int divisor; 1048 int divisor;
1049 1049
1050 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __FUNCTION__ ); 1050 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
1051 1051
1052 IRDA_DEBUG(1, "%s(), Setting speed to: %d\n", __FUNCTION__ , speed); 1052 IRDA_DEBUG(1, "%s(), Setting speed to: %d\n", __func__ , speed);
1053 1053
1054 IRDA_ASSERT(self != NULL, return;); 1054 IRDA_ASSERT(self != NULL, return;);
1055 1055
@@ -1103,7 +1103,7 @@ static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed)
1103 1103
1104 spin_unlock_irqrestore(&self->lock, flags); 1104 spin_unlock_irqrestore(&self->lock, flags);
1105 1105
1106 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ ); 1106 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
1107} 1107}
1108 1108
1109static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed) 1109static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed)
@@ -1113,14 +1113,14 @@ static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed)
1113 int iobase,dongle_id; 1113 int iobase,dongle_id;
1114 int tmp = 0; 1114 int tmp = 0;
1115 1115
1116 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __FUNCTION__ ); 1116 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
1117 1117
1118 iobase = self->io.fir_base; /* or iobase = self->io.sir_base; */ 1118 iobase = self->io.fir_base; /* or iobase = self->io.sir_base; */
1119 dongle_id = self->io.dongle_id; 1119 dongle_id = self->io.dongle_id;
1120 1120
1121 /* We are already locked, no need to do it again */ 1121 /* We are already locked, no need to do it again */
1122 1122
1123 IRDA_DEBUG(1, "%s(), Set Speed for %s , Speed = %d\n", __FUNCTION__ , dongle_types[dongle_id], speed); 1123 IRDA_DEBUG(1, "%s(), Set Speed for %s , Speed = %d\n", __func__ , dongle_types[dongle_id], speed);
1124 1124
1125 switch_bank(iobase, BANK2); 1125 switch_bank(iobase, BANK2);
1126 tmp = inb(iobase+FIR_IRDA_CR); 1126 tmp = inb(iobase+FIR_IRDA_CR);
@@ -1284,7 +1284,7 @@ static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed)
1284 1284
1285 switch_bank(iobase, BANK0); 1285 switch_bank(iobase, BANK0);
1286 1286
1287 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ ); 1287 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
1288} 1288}
1289 1289
1290/* 1290/*
@@ -1297,11 +1297,11 @@ static int ali_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len)
1297{ 1297{
1298 int actual = 0; 1298 int actual = 0;
1299 1299
1300 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__ ); 1300 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ );
1301 1301
1302 /* Tx FIFO should be empty! */ 1302 /* Tx FIFO should be empty! */
1303 if (!(inb(iobase+UART_LSR) & UART_LSR_THRE)) { 1303 if (!(inb(iobase+UART_LSR) & UART_LSR_THRE)) {
1304 IRDA_DEBUG(0, "%s(), failed, fifo not empty!\n", __FUNCTION__ ); 1304 IRDA_DEBUG(0, "%s(), failed, fifo not empty!\n", __func__ );
1305 return 0; 1305 return 0;
1306 } 1306 }
1307 1307
@@ -1313,7 +1313,7 @@ static int ali_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len)
1313 actual++; 1313 actual++;
1314 } 1314 }
1315 1315
1316 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ ); 1316 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
1317 return actual; 1317 return actual;
1318} 1318}
1319 1319
@@ -1329,7 +1329,7 @@ static int ali_ircc_net_open(struct net_device *dev)
1329 int iobase; 1329 int iobase;
1330 char hwname[32]; 1330 char hwname[32];
1331 1331
1332 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__ ); 1332 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ );
1333 1333
1334 IRDA_ASSERT(dev != NULL, return -1;); 1334 IRDA_ASSERT(dev != NULL, return -1;);
1335 1335
@@ -1375,7 +1375,7 @@ static int ali_ircc_net_open(struct net_device *dev)
1375 */ 1375 */
1376 self->irlap = irlap_open(dev, &self->qos, hwname); 1376 self->irlap = irlap_open(dev, &self->qos, hwname);
1377 1377
1378 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ ); 1378 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
1379 1379
1380 return 0; 1380 return 0;
1381} 1381}
@@ -1392,7 +1392,7 @@ static int ali_ircc_net_close(struct net_device *dev)
1392 struct ali_ircc_cb *self; 1392 struct ali_ircc_cb *self;
1393 //int iobase; 1393 //int iobase;
1394 1394
1395 IRDA_DEBUG(4, "%s(), ---------------- Start ----------------\n", __FUNCTION__ ); 1395 IRDA_DEBUG(4, "%s(), ---------------- Start ----------------\n", __func__ );
1396 1396
1397 IRDA_ASSERT(dev != NULL, return -1;); 1397 IRDA_ASSERT(dev != NULL, return -1;);
1398 1398
@@ -1415,7 +1415,7 @@ static int ali_ircc_net_close(struct net_device *dev)
1415 free_irq(self->io.irq, dev); 1415 free_irq(self->io.irq, dev);
1416 free_dma(self->io.dma); 1416 free_dma(self->io.dma);
1417 1417
1418 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ ); 1418 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
1419 1419
1420 return 0; 1420 return 0;
1421} 1421}
@@ -1434,7 +1434,7 @@ static int ali_ircc_fir_hard_xmit(struct sk_buff *skb, struct net_device *dev)
1434 __u32 speed; 1434 __u32 speed;
1435 int mtt, diff; 1435 int mtt, diff;
1436 1436
1437 IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __FUNCTION__ ); 1437 IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ );
1438 1438
1439 self = (struct ali_ircc_cb *) dev->priv; 1439 self = (struct ali_ircc_cb *) dev->priv;
1440 iobase = self->io.fir_base; 1440 iobase = self->io.fir_base;
@@ -1488,7 +1488,7 @@ static int ali_ircc_fir_hard_xmit(struct sk_buff *skb, struct net_device *dev)
1488 diff = self->now.tv_usec - self->stamp.tv_usec; 1488 diff = self->now.tv_usec - self->stamp.tv_usec;
1489 /* self->stamp is set from ali_ircc_dma_receive_complete() */ 1489 /* self->stamp is set from ali_ircc_dma_receive_complete() */
1490 1490
1491 IRDA_DEBUG(1, "%s(), ******* diff = %d ******* \n", __FUNCTION__ , diff); 1491 IRDA_DEBUG(1, "%s(), ******* diff = %d ******* \n", __func__ , diff);
1492 1492
1493 if (diff < 0) 1493 if (diff < 0)
1494 diff += 1000000; 1494 diff += 1000000;
@@ -1510,7 +1510,7 @@ static int ali_ircc_fir_hard_xmit(struct sk_buff *skb, struct net_device *dev)
1510 /* Adjust for timer resolution */ 1510 /* Adjust for timer resolution */
1511 mtt = (mtt+250) / 500; /* 4 discard, 5 get advanced, Let's round off */ 1511 mtt = (mtt+250) / 500; /* 4 discard, 5 get advanced, Let's round off */
1512 1512
1513 IRDA_DEBUG(1, "%s(), ************** mtt = %d ***********\n", __FUNCTION__ , mtt); 1513 IRDA_DEBUG(1, "%s(), ************** mtt = %d ***********\n", __func__ , mtt);
1514 1514
1515 /* Setup timer */ 1515 /* Setup timer */
1516 if (mtt == 1) /* 500 us */ 1516 if (mtt == 1) /* 500 us */
@@ -1567,7 +1567,7 @@ static int ali_ircc_fir_hard_xmit(struct sk_buff *skb, struct net_device *dev)
1567 spin_unlock_irqrestore(&self->lock, flags); 1567 spin_unlock_irqrestore(&self->lock, flags);
1568 dev_kfree_skb(skb); 1568 dev_kfree_skb(skb);
1569 1569
1570 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ ); 1570 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
1571 return 0; 1571 return 0;
1572} 1572}
1573 1573
@@ -1578,7 +1578,7 @@ static void ali_ircc_dma_xmit(struct ali_ircc_cb *self)
1578 unsigned char FIFO_OPTI, Hi, Lo; 1578 unsigned char FIFO_OPTI, Hi, Lo;
1579 1579
1580 1580
1581 IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __FUNCTION__ ); 1581 IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ );
1582 1582
1583 iobase = self->io.fir_base; 1583 iobase = self->io.fir_base;
1584 1584
@@ -1629,7 +1629,7 @@ static void ali_ircc_dma_xmit(struct ali_ircc_cb *self)
1629 tmp = inb(iobase+FIR_LCR_B); 1629 tmp = inb(iobase+FIR_LCR_B);
1630 tmp &= ~0x20; // Disable SIP 1630 tmp &= ~0x20; // Disable SIP
1631 outb(((unsigned char)(tmp & 0x3f) | LCR_B_TX_MODE) & ~LCR_B_BW, iobase+FIR_LCR_B); 1631 outb(((unsigned char)(tmp & 0x3f) | LCR_B_TX_MODE) & ~LCR_B_BW, iobase+FIR_LCR_B);
1632 IRDA_DEBUG(1, "%s(), ******* Change to TX mode: FIR_LCR_B = 0x%x ******* \n", __FUNCTION__ , inb(iobase+FIR_LCR_B)); 1632 IRDA_DEBUG(1, "%s(), ******* Change to TX mode: FIR_LCR_B = 0x%x ******* \n", __func__ , inb(iobase+FIR_LCR_B));
1633 1633
1634 outb(0, iobase+FIR_LSR); 1634 outb(0, iobase+FIR_LSR);
1635 1635
@@ -1639,7 +1639,7 @@ static void ali_ircc_dma_xmit(struct ali_ircc_cb *self)
1639 1639
1640 switch_bank(iobase, BANK0); 1640 switch_bank(iobase, BANK0);
1641 1641
1642 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ ); 1642 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
1643} 1643}
1644 1644
1645static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self) 1645static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self)
@@ -1647,7 +1647,7 @@ static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self)
1647 int iobase; 1647 int iobase;
1648 int ret = TRUE; 1648 int ret = TRUE;
1649 1649
1650 IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __FUNCTION__ ); 1650 IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ );
1651 1651
1652 iobase = self->io.fir_base; 1652 iobase = self->io.fir_base;
1653 1653
@@ -1660,7 +1660,7 @@ static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self)
1660 if((inb(iobase+FIR_LSR) & LSR_FRAME_ABORT) == LSR_FRAME_ABORT) 1660 if((inb(iobase+FIR_LSR) & LSR_FRAME_ABORT) == LSR_FRAME_ABORT)
1661 1661
1662 { 1662 {
1663 IRDA_ERROR("%s(), ********* LSR_FRAME_ABORT *********\n", __FUNCTION__); 1663 IRDA_ERROR("%s(), ********* LSR_FRAME_ABORT *********\n", __func__);
1664 self->stats.tx_errors++; 1664 self->stats.tx_errors++;
1665 self->stats.tx_fifo_errors++; 1665 self->stats.tx_fifo_errors++;
1666 } 1666 }
@@ -1703,7 +1703,7 @@ static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self)
1703 1703
1704 switch_bank(iobase, BANK0); 1704 switch_bank(iobase, BANK0);
1705 1705
1706 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ ); 1706 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
1707 return ret; 1707 return ret;
1708} 1708}
1709 1709
@@ -1718,7 +1718,7 @@ static int ali_ircc_dma_receive(struct ali_ircc_cb *self)
1718{ 1718{
1719 int iobase, tmp; 1719 int iobase, tmp;
1720 1720
1721 IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __FUNCTION__ ); 1721 IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ );
1722 1722
1723 iobase = self->io.fir_base; 1723 iobase = self->io.fir_base;
1724 1724
@@ -1756,7 +1756,7 @@ static int ali_ircc_dma_receive(struct ali_ircc_cb *self)
1756 //switch_bank(iobase, BANK0); 1756 //switch_bank(iobase, BANK0);
1757 tmp = inb(iobase+FIR_LCR_B); 1757 tmp = inb(iobase+FIR_LCR_B);
1758 outb((unsigned char)(tmp &0x3f) | LCR_B_RX_MODE | LCR_B_BW , iobase + FIR_LCR_B); // 2000/12/1 05:16PM 1758 outb((unsigned char)(tmp &0x3f) | LCR_B_RX_MODE | LCR_B_BW , iobase + FIR_LCR_B); // 2000/12/1 05:16PM
1759 IRDA_DEBUG(1, "%s(), *** Change To RX mode: FIR_LCR_B = 0x%x *** \n", __FUNCTION__ , inb(iobase+FIR_LCR_B)); 1759 IRDA_DEBUG(1, "%s(), *** Change To RX mode: FIR_LCR_B = 0x%x *** \n", __func__ , inb(iobase+FIR_LCR_B));
1760 1760
1761 /* Set Rx Threshold */ 1761 /* Set Rx Threshold */
1762 switch_bank(iobase, BANK1); 1762 switch_bank(iobase, BANK1);
@@ -1768,7 +1768,7 @@ static int ali_ircc_dma_receive(struct ali_ircc_cb *self)
1768 outb(CR_DMA_EN | CR_DMA_BURST, iobase+FIR_CR); 1768 outb(CR_DMA_EN | CR_DMA_BURST, iobase+FIR_CR);
1769 1769
1770 switch_bank(iobase, BANK0); 1770 switch_bank(iobase, BANK0);
1771 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ ); 1771 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
1772 return 0; 1772 return 0;
1773} 1773}
1774 1774
@@ -1779,7 +1779,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
1779 __u8 status, MessageCount; 1779 __u8 status, MessageCount;
1780 int len, i, iobase, val; 1780 int len, i, iobase, val;
1781 1781
1782 IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __FUNCTION__ ); 1782 IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ );
1783 1783
1784 st_fifo = &self->st_fifo; 1784 st_fifo = &self->st_fifo;
1785 iobase = self->io.fir_base; 1785 iobase = self->io.fir_base;
@@ -1788,7 +1788,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
1788 MessageCount = inb(iobase+ FIR_LSR)&0x07; 1788 MessageCount = inb(iobase+ FIR_LSR)&0x07;
1789 1789
1790 if (MessageCount > 0) 1790 if (MessageCount > 0)
1791 IRDA_DEBUG(0, "%s(), Messsage count = %d,\n", __FUNCTION__ , MessageCount); 1791 IRDA_DEBUG(0, "%s(), Messsage count = %d,\n", __func__ , MessageCount);
1792 1792
1793 for (i=0; i<=MessageCount; i++) 1793 for (i=0; i<=MessageCount; i++)
1794 { 1794 {
@@ -1801,11 +1801,11 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
1801 len = len << 8; 1801 len = len << 8;
1802 len |= inb(iobase+FIR_RX_DSR_LO); 1802 len |= inb(iobase+FIR_RX_DSR_LO);
1803 1803
1804 IRDA_DEBUG(1, "%s(), RX Length = 0x%.2x,\n", __FUNCTION__ , len); 1804 IRDA_DEBUG(1, "%s(), RX Length = 0x%.2x,\n", __func__ , len);
1805 IRDA_DEBUG(1, "%s(), RX Status = 0x%.2x,\n", __FUNCTION__ , status); 1805 IRDA_DEBUG(1, "%s(), RX Status = 0x%.2x,\n", __func__ , status);
1806 1806
1807 if (st_fifo->tail >= MAX_RX_WINDOW) { 1807 if (st_fifo->tail >= MAX_RX_WINDOW) {
1808 IRDA_DEBUG(0, "%s(), window is full!\n", __FUNCTION__ ); 1808 IRDA_DEBUG(0, "%s(), window is full!\n", __func__ );
1809 continue; 1809 continue;
1810 } 1810 }
1811 1811
@@ -1828,7 +1828,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
1828 /* Check for errors */ 1828 /* Check for errors */
1829 if ((status & 0xd8) || self->rcvFramesOverflow || (len==0)) 1829 if ((status & 0xd8) || self->rcvFramesOverflow || (len==0))
1830 { 1830 {
1831 IRDA_DEBUG(0,"%s(), ************* RX Errors ************ \n", __FUNCTION__ ); 1831 IRDA_DEBUG(0,"%s(), ************* RX Errors ************ \n", __func__ );
1832 1832
1833 /* Skip frame */ 1833 /* Skip frame */
1834 self->stats.rx_errors++; 1834 self->stats.rx_errors++;
@@ -1838,29 +1838,29 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
1838 if (status & LSR_FIFO_UR) 1838 if (status & LSR_FIFO_UR)
1839 { 1839 {
1840 self->stats.rx_frame_errors++; 1840 self->stats.rx_frame_errors++;
1841 IRDA_DEBUG(0,"%s(), ************* FIFO Errors ************ \n", __FUNCTION__ ); 1841 IRDA_DEBUG(0,"%s(), ************* FIFO Errors ************ \n", __func__ );
1842 } 1842 }
1843 if (status & LSR_FRAME_ERROR) 1843 if (status & LSR_FRAME_ERROR)
1844 { 1844 {
1845 self->stats.rx_frame_errors++; 1845 self->stats.rx_frame_errors++;
1846 IRDA_DEBUG(0,"%s(), ************* FRAME Errors ************ \n", __FUNCTION__ ); 1846 IRDA_DEBUG(0,"%s(), ************* FRAME Errors ************ \n", __func__ );
1847 } 1847 }
1848 1848
1849 if (status & LSR_CRC_ERROR) 1849 if (status & LSR_CRC_ERROR)
1850 { 1850 {
1851 self->stats.rx_crc_errors++; 1851 self->stats.rx_crc_errors++;
1852 IRDA_DEBUG(0,"%s(), ************* CRC Errors ************ \n", __FUNCTION__ ); 1852 IRDA_DEBUG(0,"%s(), ************* CRC Errors ************ \n", __func__ );
1853 } 1853 }
1854 1854
1855 if(self->rcvFramesOverflow) 1855 if(self->rcvFramesOverflow)
1856 { 1856 {
1857 self->stats.rx_frame_errors++; 1857 self->stats.rx_frame_errors++;
1858 IRDA_DEBUG(0,"%s(), ************* Overran DMA buffer ************ \n", __FUNCTION__ ); 1858 IRDA_DEBUG(0,"%s(), ************* Overran DMA buffer ************ \n", __func__ );
1859 } 1859 }
1860 if(len == 0) 1860 if(len == 0)
1861 { 1861 {
1862 self->stats.rx_frame_errors++; 1862 self->stats.rx_frame_errors++;
1863 IRDA_DEBUG(0,"%s(), ********** Receive Frame Size = 0 ********* \n", __FUNCTION__ ); 1863 IRDA_DEBUG(0,"%s(), ********** Receive Frame Size = 0 ********* \n", __func__ );
1864 } 1864 }
1865 } 1865 }
1866 else 1866 else
@@ -1872,7 +1872,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
1872 val = inb(iobase+FIR_BSR); 1872 val = inb(iobase+FIR_BSR);
1873 if ((val& BSR_FIFO_NOT_EMPTY)== 0x80) 1873 if ((val& BSR_FIFO_NOT_EMPTY)== 0x80)
1874 { 1874 {
1875 IRDA_DEBUG(0, "%s(), ************* BSR_FIFO_NOT_EMPTY ************ \n", __FUNCTION__ ); 1875 IRDA_DEBUG(0, "%s(), ************* BSR_FIFO_NOT_EMPTY ************ \n", __func__ );
1876 1876
1877 /* Put this entry back in fifo */ 1877 /* Put this entry back in fifo */
1878 st_fifo->head--; 1878 st_fifo->head--;
@@ -1909,7 +1909,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
1909 { 1909 {
1910 IRDA_WARNING("%s(), memory squeeze, " 1910 IRDA_WARNING("%s(), memory squeeze, "
1911 "dropping frame.\n", 1911 "dropping frame.\n",
1912 __FUNCTION__); 1912 __func__);
1913 self->stats.rx_dropped++; 1913 self->stats.rx_dropped++;
1914 1914
1915 return FALSE; 1915 return FALSE;
@@ -1937,7 +1937,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
1937 1937
1938 switch_bank(iobase, BANK0); 1938 switch_bank(iobase, BANK0);
1939 1939
1940 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ ); 1940 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
1941 return TRUE; 1941 return TRUE;
1942} 1942}
1943 1943
@@ -1956,7 +1956,7 @@ static int ali_ircc_sir_hard_xmit(struct sk_buff *skb, struct net_device *dev)
1956 int iobase; 1956 int iobase;
1957 __u32 speed; 1957 __u32 speed;
1958 1958
1959 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__ ); 1959 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ );
1960 1960
1961 IRDA_ASSERT(dev != NULL, return 0;); 1961 IRDA_ASSERT(dev != NULL, return 0;);
1962 1962
@@ -2005,7 +2005,7 @@ static int ali_ircc_sir_hard_xmit(struct sk_buff *skb, struct net_device *dev)
2005 2005
2006 dev_kfree_skb(skb); 2006 dev_kfree_skb(skb);
2007 2007
2008 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ ); 2008 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
2009 2009
2010 return 0; 2010 return 0;
2011} 2011}
@@ -2024,7 +2024,7 @@ static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2024 unsigned long flags; 2024 unsigned long flags;
2025 int ret = 0; 2025 int ret = 0;
2026 2026
2027 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__ ); 2027 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ );
2028 2028
2029 IRDA_ASSERT(dev != NULL, return -1;); 2029 IRDA_ASSERT(dev != NULL, return -1;);
2030 2030
@@ -2032,11 +2032,11 @@ static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2032 2032
2033 IRDA_ASSERT(self != NULL, return -1;); 2033 IRDA_ASSERT(self != NULL, return -1;);
2034 2034
2035 IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__ , dev->name, cmd); 2035 IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__ , dev->name, cmd);
2036 2036
2037 switch (cmd) { 2037 switch (cmd) {
2038 case SIOCSBANDWIDTH: /* Set bandwidth */ 2038 case SIOCSBANDWIDTH: /* Set bandwidth */
2039 IRDA_DEBUG(1, "%s(), SIOCSBANDWIDTH\n", __FUNCTION__ ); 2039 IRDA_DEBUG(1, "%s(), SIOCSBANDWIDTH\n", __func__ );
2040 /* 2040 /*
2041 * This function will also be used by IrLAP to change the 2041 * This function will also be used by IrLAP to change the
2042 * speed, so we still must allow for speed change within 2042 * speed, so we still must allow for speed change within
@@ -2050,13 +2050,13 @@ static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2050 spin_unlock_irqrestore(&self->lock, flags); 2050 spin_unlock_irqrestore(&self->lock, flags);
2051 break; 2051 break;
2052 case SIOCSMEDIABUSY: /* Set media busy */ 2052 case SIOCSMEDIABUSY: /* Set media busy */
2053 IRDA_DEBUG(1, "%s(), SIOCSMEDIABUSY\n", __FUNCTION__ ); 2053 IRDA_DEBUG(1, "%s(), SIOCSMEDIABUSY\n", __func__ );
2054 if (!capable(CAP_NET_ADMIN)) 2054 if (!capable(CAP_NET_ADMIN))
2055 return -EPERM; 2055 return -EPERM;
2056 irda_device_set_media_busy(self->netdev, TRUE); 2056 irda_device_set_media_busy(self->netdev, TRUE);
2057 break; 2057 break;
2058 case SIOCGRECEIVING: /* Check if we are receiving right now */ 2058 case SIOCGRECEIVING: /* Check if we are receiving right now */
2059 IRDA_DEBUG(2, "%s(), SIOCGRECEIVING\n", __FUNCTION__ ); 2059 IRDA_DEBUG(2, "%s(), SIOCGRECEIVING\n", __func__ );
2060 /* This is protected */ 2060 /* This is protected */
2061 irq->ifr_receiving = ali_ircc_is_receiving(self); 2061 irq->ifr_receiving = ali_ircc_is_receiving(self);
2062 break; 2062 break;
@@ -2064,7 +2064,7 @@ static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2064 ret = -EOPNOTSUPP; 2064 ret = -EOPNOTSUPP;
2065 } 2065 }
2066 2066
2067 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ ); 2067 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
2068 2068
2069 return ret; 2069 return ret;
2070} 2070}
@@ -2081,7 +2081,7 @@ static int ali_ircc_is_receiving(struct ali_ircc_cb *self)
2081 int status = FALSE; 2081 int status = FALSE;
2082 int iobase; 2082 int iobase;
2083 2083
2084 IRDA_DEBUG(2, "%s(), ---------------- Start -----------------\n", __FUNCTION__ ); 2084 IRDA_DEBUG(2, "%s(), ---------------- Start -----------------\n", __func__ );
2085 2085
2086 IRDA_ASSERT(self != NULL, return FALSE;); 2086 IRDA_ASSERT(self != NULL, return FALSE;);
2087 2087
@@ -2095,7 +2095,7 @@ static int ali_ircc_is_receiving(struct ali_ircc_cb *self)
2095 if((inb(iobase+FIR_FIFO_FR) & 0x3f) != 0) 2095 if((inb(iobase+FIR_FIFO_FR) & 0x3f) != 0)
2096 { 2096 {
2097 /* We are receiving something */ 2097 /* We are receiving something */
2098 IRDA_DEBUG(1, "%s(), We are receiving something\n", __FUNCTION__ ); 2098 IRDA_DEBUG(1, "%s(), We are receiving something\n", __func__ );
2099 status = TRUE; 2099 status = TRUE;
2100 } 2100 }
2101 switch_bank(iobase, BANK0); 2101 switch_bank(iobase, BANK0);
@@ -2107,7 +2107,7 @@ static int ali_ircc_is_receiving(struct ali_ircc_cb *self)
2107 2107
2108 spin_unlock_irqrestore(&self->lock, flags); 2108 spin_unlock_irqrestore(&self->lock, flags);
2109 2109
2110 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ ); 2110 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
2111 2111
2112 return status; 2112 return status;
2113} 2113}
@@ -2116,9 +2116,9 @@ static struct net_device_stats *ali_ircc_net_get_stats(struct net_device *dev)
2116{ 2116{
2117 struct ali_ircc_cb *self = (struct ali_ircc_cb *) dev->priv; 2117 struct ali_ircc_cb *self = (struct ali_ircc_cb *) dev->priv;
2118 2118
2119 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__ ); 2119 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ );
2120 2120
2121 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ ); 2121 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
2122 2122
2123 return &self->stats; 2123 return &self->stats;
2124} 2124}
@@ -2164,7 +2164,7 @@ static void SetCOMInterrupts(struct ali_ircc_cb *self , unsigned char enable)
2164 2164
2165 int iobase = self->io.fir_base; /* or sir_base */ 2165 int iobase = self->io.fir_base; /* or sir_base */
2166 2166
2167 IRDA_DEBUG(2, "%s(), -------- Start -------- ( Enable = %d )\n", __FUNCTION__ , enable); 2167 IRDA_DEBUG(2, "%s(), -------- Start -------- ( Enable = %d )\n", __func__ , enable);
2168 2168
2169 /* Enable the interrupt which we wish to */ 2169 /* Enable the interrupt which we wish to */
2170 if (enable){ 2170 if (enable){
@@ -2205,14 +2205,14 @@ static void SetCOMInterrupts(struct ali_ircc_cb *self , unsigned char enable)
2205 else 2205 else
2206 outb(newMask, iobase+UART_IER); 2206 outb(newMask, iobase+UART_IER);
2207 2207
2208 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ ); 2208 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
2209} 2209}
2210 2210
2211static void SIR2FIR(int iobase) 2211static void SIR2FIR(int iobase)
2212{ 2212{
2213 //unsigned char tmp; 2213 //unsigned char tmp;
2214 2214
2215 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __FUNCTION__ ); 2215 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
2216 2216
2217 /* Already protected (change_speed() or setup()), no need to lock. 2217 /* Already protected (change_speed() or setup()), no need to lock.
2218 * Jean II */ 2218 * Jean II */
@@ -2228,14 +2228,14 @@ static void SIR2FIR(int iobase)
2228 //tmp |= 0x20; 2228 //tmp |= 0x20;
2229 //outb(tmp, iobase+FIR_LCR_B); 2229 //outb(tmp, iobase+FIR_LCR_B);
2230 2230
2231 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ ); 2231 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
2232} 2232}
2233 2233
2234static void FIR2SIR(int iobase) 2234static void FIR2SIR(int iobase)
2235{ 2235{
2236 unsigned char val; 2236 unsigned char val;
2237 2237
2238 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __FUNCTION__ ); 2238 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
2239 2239
2240 /* Already protected (change_speed() or setup()), no need to lock. 2240 /* Already protected (change_speed() or setup()), no need to lock.
2241 * Jean II */ 2241 * Jean II */
@@ -2251,7 +2251,7 @@ static void FIR2SIR(int iobase)
2251 val = inb(iobase+UART_LSR); 2251 val = inb(iobase+UART_LSR);
2252 val = inb(iobase+UART_MSR); 2252 val = inb(iobase+UART_MSR);
2253 2253
2254 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ ); 2254 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
2255} 2255}
2256 2256
2257MODULE_AUTHOR("Benjamin Kong <benjamin_kong@ali.com.tw>"); 2257MODULE_AUTHOR("Benjamin Kong <benjamin_kong@ali.com.tw>");
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index 34ad189fff67..69d16b30323b 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -245,7 +245,7 @@ toshoboe_dumpregs (struct toshoboe_cb *self)
245{ 245{
246 __u32 ringbase; 246 __u32 ringbase;
247 247
248 IRDA_DEBUG (4, "%s()\n", __FUNCTION__); 248 IRDA_DEBUG (4, "%s()\n", __func__);
249 249
250 ringbase = INB (OBOE_RING_BASE0) << 10; 250 ringbase = INB (OBOE_RING_BASE0) << 10;
251 ringbase |= INB (OBOE_RING_BASE1) << 18; 251 ringbase |= INB (OBOE_RING_BASE1) << 18;
@@ -293,7 +293,7 @@ static void
293toshoboe_disablebm (struct toshoboe_cb *self) 293toshoboe_disablebm (struct toshoboe_cb *self)
294{ 294{
295 __u8 command; 295 __u8 command;
296 IRDA_DEBUG (4, "%s()\n", __FUNCTION__); 296 IRDA_DEBUG (4, "%s()\n", __func__);
297 297
298 pci_read_config_byte (self->pdev, PCI_COMMAND, &command); 298 pci_read_config_byte (self->pdev, PCI_COMMAND, &command);
299 command &= ~PCI_COMMAND_MASTER; 299 command &= ~PCI_COMMAND_MASTER;
@@ -305,7 +305,7 @@ toshoboe_disablebm (struct toshoboe_cb *self)
305static void 305static void
306toshoboe_stopchip (struct toshoboe_cb *self) 306toshoboe_stopchip (struct toshoboe_cb *self)
307{ 307{
308 IRDA_DEBUG (4, "%s()\n", __FUNCTION__); 308 IRDA_DEBUG (4, "%s()\n", __func__);
309 309
310 /*Disable interrupts */ 310 /*Disable interrupts */
311 OUTB (0x0, OBOE_IER); 311 OUTB (0x0, OBOE_IER);
@@ -350,7 +350,7 @@ toshoboe_setbaud (struct toshoboe_cb *self)
350 __u16 pconfig = 0; 350 __u16 pconfig = 0;
351 __u8 config0l = 0; 351 __u8 config0l = 0;
352 352
353 IRDA_DEBUG (2, "%s(%d/%d)\n", __FUNCTION__, self->speed, self->io.speed); 353 IRDA_DEBUG (2, "%s(%d/%d)\n", __func__, self->speed, self->io.speed);
354 354
355 switch (self->speed) 355 switch (self->speed)
356 { 356 {
@@ -482,7 +482,7 @@ toshoboe_setbaud (struct toshoboe_cb *self)
482static void 482static void
483toshoboe_enablebm (struct toshoboe_cb *self) 483toshoboe_enablebm (struct toshoboe_cb *self)
484{ 484{
485 IRDA_DEBUG (4, "%s()\n", __FUNCTION__); 485 IRDA_DEBUG (4, "%s()\n", __func__);
486 pci_set_master (self->pdev); 486 pci_set_master (self->pdev);
487} 487}
488 488
@@ -492,7 +492,7 @@ toshoboe_initring (struct toshoboe_cb *self)
492{ 492{
493 int i; 493 int i;
494 494
495 IRDA_DEBUG (4, "%s()\n", __FUNCTION__); 495 IRDA_DEBUG (4, "%s()\n", __func__);
496 496
497 for (i = 0; i < TX_SLOTS; ++i) 497 for (i = 0; i < TX_SLOTS; ++i)
498 { 498 {
@@ -550,7 +550,7 @@ toshoboe_startchip (struct toshoboe_cb *self)
550{ 550{
551 __u32 physaddr; 551 __u32 physaddr;
552 552
553 IRDA_DEBUG (4, "%s()\n", __FUNCTION__); 553 IRDA_DEBUG (4, "%s()\n", __func__);
554 554
555 toshoboe_initring (self); 555 toshoboe_initring (self);
556 toshoboe_enablebm (self); 556 toshoboe_enablebm (self);
@@ -824,7 +824,7 @@ toshoboe_probe (struct toshoboe_cb *self)
824#endif 824#endif
825 unsigned long flags; 825 unsigned long flags;
826 826
827 IRDA_DEBUG (4, "%s()\n", __FUNCTION__); 827 IRDA_DEBUG (4, "%s()\n", __func__);
828 828
829 if (request_irq (self->io.irq, toshoboe_probeinterrupt, 829 if (request_irq (self->io.irq, toshoboe_probeinterrupt,
830 self->io.irqflags, "toshoboe", (void *) self)) 830 self->io.irqflags, "toshoboe", (void *) self))
@@ -983,10 +983,10 @@ toshoboe_hard_xmit (struct sk_buff *skb, struct net_device *dev)
983 983
984 IRDA_ASSERT (self != NULL, return 0; ); 984 IRDA_ASSERT (self != NULL, return 0; );
985 985
986 IRDA_DEBUG (1, "%s.tx:%x(%x)%x\n", __FUNCTION__ 986 IRDA_DEBUG (1, "%s.tx:%x(%x)%x\n", __func__
987 ,skb->len,self->txpending,INB (OBOE_ENABLEH)); 987 ,skb->len,self->txpending,INB (OBOE_ENABLEH));
988 if (!cb->magic) { 988 if (!cb->magic) {
989 IRDA_DEBUG (2, "%s.Not IrLAP:%x\n", __FUNCTION__, cb->magic); 989 IRDA_DEBUG (2, "%s.Not IrLAP:%x\n", __func__, cb->magic);
990#ifdef DUMP_PACKETS 990#ifdef DUMP_PACKETS
991 _dumpbufs(skb->data,skb->len,'>'); 991 _dumpbufs(skb->data,skb->len,'>');
992#endif 992#endif
@@ -1015,7 +1015,7 @@ toshoboe_hard_xmit (struct sk_buff *skb, struct net_device *dev)
1015 { 1015 {
1016 self->new_speed = speed; 1016 self->new_speed = speed;
1017 IRDA_DEBUG (1, "%s: Queued TxDone scheduled speed change %d\n" , 1017 IRDA_DEBUG (1, "%s: Queued TxDone scheduled speed change %d\n" ,
1018 __FUNCTION__, speed); 1018 __func__, speed);
1019 /* if no data, that's all! */ 1019 /* if no data, that's all! */
1020 if (!skb->len) 1020 if (!skb->len)
1021 { 1021 {
@@ -1057,7 +1057,7 @@ toshoboe_hard_xmit (struct sk_buff *skb, struct net_device *dev)
1057 /* which we will add a wrong checksum to */ 1057 /* which we will add a wrong checksum to */
1058 1058
1059 mtt = toshoboe_makemttpacket (self, self->tx_bufs[self->txs], mtt); 1059 mtt = toshoboe_makemttpacket (self, self->tx_bufs[self->txs], mtt);
1060 IRDA_DEBUG (1, "%s.mtt:%x(%x)%d\n", __FUNCTION__ 1060 IRDA_DEBUG (1, "%s.mtt:%x(%x)%d\n", __func__
1061 ,skb->len,mtt,self->txpending); 1061 ,skb->len,mtt,self->txpending);
1062 if (mtt) 1062 if (mtt)
1063 { 1063 {
@@ -1101,7 +1101,7 @@ dumpbufs(skb->data,skb->len,'>');
1101 1101
1102 if (self->ring->tx[self->txs].control & OBOE_CTL_TX_HW_OWNS) 1102 if (self->ring->tx[self->txs].control & OBOE_CTL_TX_HW_OWNS)
1103 { 1103 {
1104 IRDA_DEBUG (0, "%s.ful:%x(%x)%x\n", __FUNCTION__ 1104 IRDA_DEBUG (0, "%s.ful:%x(%x)%x\n", __func__
1105 ,skb->len, self->ring->tx[self->txs].control, self->txpending); 1105 ,skb->len, self->ring->tx[self->txs].control, self->txpending);
1106 toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX); 1106 toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX);
1107 spin_unlock_irqrestore(&self->spinlock, flags); 1107 spin_unlock_irqrestore(&self->spinlock, flags);
@@ -1179,7 +1179,7 @@ toshoboe_interrupt (int irq, void *dev_id)
1179 if (self->ring->tx[i].control & OBOE_CTL_TX_HW_OWNS) 1179 if (self->ring->tx[i].control & OBOE_CTL_TX_HW_OWNS)
1180 self->txpending++; 1180 self->txpending++;
1181 } 1181 }
1182 IRDA_DEBUG (1, "%s.txd(%x)%x/%x\n", __FUNCTION__ 1182 IRDA_DEBUG (1, "%s.txd(%x)%x/%x\n", __func__
1183 ,irqstat,txp,self->txpending); 1183 ,irqstat,txp,self->txpending);
1184 1184
1185 txp = INB (OBOE_TXSLOT) & OBOE_SLOT_MASK; 1185 txp = INB (OBOE_TXSLOT) & OBOE_SLOT_MASK;
@@ -1209,7 +1209,7 @@ toshoboe_interrupt (int irq, void *dev_id)
1209 { 1209 {
1210 self->speed = self->new_speed; 1210 self->speed = self->new_speed;
1211 IRDA_DEBUG (1, "%s: Executed TxDone scheduled speed change %d\n", 1211 IRDA_DEBUG (1, "%s: Executed TxDone scheduled speed change %d\n",
1212 __FUNCTION__, self->speed); 1212 __func__, self->speed);
1213 toshoboe_setbaud (self); 1213 toshoboe_setbaud (self);
1214 } 1214 }
1215 1215
@@ -1224,7 +1224,7 @@ toshoboe_interrupt (int irq, void *dev_id)
1224 { 1224 {
1225 int len = self->ring->rx[self->rxs].len; 1225 int len = self->ring->rx[self->rxs].len;
1226 skb = NULL; 1226 skb = NULL;
1227 IRDA_DEBUG (3, "%s.rcv:%x(%x)\n", __FUNCTION__ 1227 IRDA_DEBUG (3, "%s.rcv:%x(%x)\n", __func__
1228 ,len,self->ring->rx[self->rxs].control); 1228 ,len,self->ring->rx[self->rxs].control);
1229 1229
1230#ifdef DUMP_PACKETS 1230#ifdef DUMP_PACKETS
@@ -1246,7 +1246,7 @@ dumpbufs(self->rx_bufs[self->rxs],len,'<');
1246 len -= 2; 1246 len -= 2;
1247 else 1247 else
1248 len = 0; 1248 len = 0;
1249 IRDA_DEBUG (1, "%s.SIR:%x(%x)\n", __FUNCTION__, len,enable); 1249 IRDA_DEBUG (1, "%s.SIR:%x(%x)\n", __func__, len,enable);
1250 } 1250 }
1251 1251
1252#ifdef USE_MIR 1252#ifdef USE_MIR
@@ -1256,7 +1256,7 @@ dumpbufs(self->rx_bufs[self->rxs],len,'<');
1256 len -= 2; 1256 len -= 2;
1257 else 1257 else
1258 len = 0; 1258 len = 0;
1259 IRDA_DEBUG (2, "%s.MIR:%x(%x)\n", __FUNCTION__, len,enable); 1259 IRDA_DEBUG (2, "%s.MIR:%x(%x)\n", __func__, len,enable);
1260 } 1260 }
1261#endif 1261#endif
1262 else if (enable & OBOE_ENABLEH_FIRON) 1262 else if (enable & OBOE_ENABLEH_FIRON)
@@ -1265,10 +1265,10 @@ dumpbufs(self->rx_bufs[self->rxs],len,'<');
1265 len -= 4; /*FIXME: check this */ 1265 len -= 4; /*FIXME: check this */
1266 else 1266 else
1267 len = 0; 1267 len = 0;
1268 IRDA_DEBUG (1, "%s.FIR:%x(%x)\n", __FUNCTION__, len,enable); 1268 IRDA_DEBUG (1, "%s.FIR:%x(%x)\n", __func__, len,enable);
1269 } 1269 }
1270 else 1270 else
1271 IRDA_DEBUG (0, "%s.?IR:%x(%x)\n", __FUNCTION__, len,enable); 1271 IRDA_DEBUG (0, "%s.?IR:%x(%x)\n", __func__, len,enable);
1272 1272
1273 if (len) 1273 if (len)
1274 { 1274 {
@@ -1289,7 +1289,7 @@ dumpbufs(self->rx_bufs[self->rxs],len,'<');
1289 { 1289 {
1290 printk (KERN_INFO 1290 printk (KERN_INFO
1291 "%s(), memory squeeze, dropping frame.\n", 1291 "%s(), memory squeeze, dropping frame.\n",
1292 __FUNCTION__); 1292 __func__);
1293 } 1293 }
1294 } 1294 }
1295 } 1295 }
@@ -1301,7 +1301,7 @@ dumpbufs(self->rx_bufs[self->rxs],len,'<');
1301 /* (SIR) data is splitted in several slots. */ 1301 /* (SIR) data is splitted in several slots. */
1302 /* we have to join all the received buffers received */ 1302 /* we have to join all the received buffers received */
1303 /*in a large buffer before checking CRC. */ 1303 /*in a large buffer before checking CRC. */
1304 IRDA_DEBUG (0, "%s.err:%x(%x)\n", __FUNCTION__ 1304 IRDA_DEBUG (0, "%s.err:%x(%x)\n", __func__
1305 ,len,self->ring->rx[self->rxs].control); 1305 ,len,self->ring->rx[self->rxs].control);
1306 } 1306 }
1307 1307
@@ -1329,7 +1329,7 @@ dumpbufs(self->rx_bufs[self->rxs],len,'<');
1329 if (irqstat & OBOE_INT_SIP) 1329 if (irqstat & OBOE_INT_SIP)
1330 { 1330 {
1331 self->int_sip++; 1331 self->int_sip++;
1332 IRDA_DEBUG (1, "%s.sip:%x(%x)%x\n", __FUNCTION__ 1332 IRDA_DEBUG (1, "%s.sip:%x(%x)%x\n", __func__
1333 ,self->int_sip,irqstat,self->txpending); 1333 ,self->int_sip,irqstat,self->txpending);
1334 } 1334 }
1335 return IRQ_HANDLED; 1335 return IRQ_HANDLED;
@@ -1343,7 +1343,7 @@ toshoboe_net_open (struct net_device *dev)
1343 unsigned long flags; 1343 unsigned long flags;
1344 int rc; 1344 int rc;
1345 1345
1346 IRDA_DEBUG (4, "%s()\n", __FUNCTION__); 1346 IRDA_DEBUG (4, "%s()\n", __func__);
1347 1347
1348 self = netdev_priv(dev); 1348 self = netdev_priv(dev);
1349 1349
@@ -1381,7 +1381,7 @@ toshoboe_net_close (struct net_device *dev)
1381{ 1381{
1382 struct toshoboe_cb *self; 1382 struct toshoboe_cb *self;
1383 1383
1384 IRDA_DEBUG (4, "%s()\n", __FUNCTION__); 1384 IRDA_DEBUG (4, "%s()\n", __func__);
1385 1385
1386 IRDA_ASSERT (dev != NULL, return -1; ); 1386 IRDA_ASSERT (dev != NULL, return -1; );
1387 self = (struct toshoboe_cb *) dev->priv; 1387 self = (struct toshoboe_cb *) dev->priv;
@@ -1426,7 +1426,7 @@ toshoboe_net_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1426 1426
1427 IRDA_ASSERT (self != NULL, return -1; ); 1427 IRDA_ASSERT (self != NULL, return -1; );
1428 1428
1429 IRDA_DEBUG (5, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__, dev->name, cmd); 1429 IRDA_DEBUG (5, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
1430 1430
1431 /* Disable interrupts & save flags */ 1431 /* Disable interrupts & save flags */
1432 spin_lock_irqsave(&self->spinlock, flags); 1432 spin_lock_irqsave(&self->spinlock, flags);
@@ -1438,7 +1438,7 @@ toshoboe_net_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1438 * speed, so we still must allow for speed change within 1438 * speed, so we still must allow for speed change within
1439 * interrupt context. 1439 * interrupt context.
1440 */ 1440 */
1441 IRDA_DEBUG (1, "%s(BANDWIDTH), %s, (%X/%ld\n", __FUNCTION__ 1441 IRDA_DEBUG (1, "%s(BANDWIDTH), %s, (%X/%ld\n", __func__
1442 ,dev->name, INB (OBOE_STATUS), irq->ifr_baudrate ); 1442 ,dev->name, INB (OBOE_STATUS), irq->ifr_baudrate );
1443 if (!in_interrupt () && !capable (CAP_NET_ADMIN)) { 1443 if (!in_interrupt () && !capable (CAP_NET_ADMIN)) {
1444 ret = -EPERM; 1444 ret = -EPERM;
@@ -1451,7 +1451,7 @@ toshoboe_net_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1451 self->new_speed = irq->ifr_baudrate; 1451 self->new_speed = irq->ifr_baudrate;
1452 break; 1452 break;
1453 case SIOCSMEDIABUSY: /* Set media busy */ 1453 case SIOCSMEDIABUSY: /* Set media busy */
1454 IRDA_DEBUG (1, "%s(MEDIABUSY), %s, (%X/%x)\n", __FUNCTION__ 1454 IRDA_DEBUG (1, "%s(MEDIABUSY), %s, (%X/%x)\n", __func__
1455 ,dev->name, INB (OBOE_STATUS), capable (CAP_NET_ADMIN) ); 1455 ,dev->name, INB (OBOE_STATUS), capable (CAP_NET_ADMIN) );
1456 if (!capable (CAP_NET_ADMIN)) { 1456 if (!capable (CAP_NET_ADMIN)) {
1457 ret = -EPERM; 1457 ret = -EPERM;
@@ -1461,11 +1461,11 @@ toshoboe_net_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1461 break; 1461 break;
1462 case SIOCGRECEIVING: /* Check if we are receiving right now */ 1462 case SIOCGRECEIVING: /* Check if we are receiving right now */
1463 irq->ifr_receiving = (INB (OBOE_STATUS) & OBOE_STATUS_RXBUSY) ? 1 : 0; 1463 irq->ifr_receiving = (INB (OBOE_STATUS) & OBOE_STATUS_RXBUSY) ? 1 : 0;
1464 IRDA_DEBUG (3, "%s(RECEIVING), %s, (%X/%x)\n", __FUNCTION__ 1464 IRDA_DEBUG (3, "%s(RECEIVING), %s, (%X/%x)\n", __func__
1465 ,dev->name, INB (OBOE_STATUS), irq->ifr_receiving ); 1465 ,dev->name, INB (OBOE_STATUS), irq->ifr_receiving );
1466 break; 1466 break;
1467 default: 1467 default:
1468 IRDA_DEBUG (1, "%s(?), %s, (cmd=0x%X)\n", __FUNCTION__, dev->name, cmd); 1468 IRDA_DEBUG (1, "%s(?), %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
1469 ret = -EOPNOTSUPP; 1469 ret = -EOPNOTSUPP;
1470 } 1470 }
1471out: 1471out:
@@ -1492,7 +1492,7 @@ toshoboe_close (struct pci_dev *pci_dev)
1492 int i; 1492 int i;
1493 struct toshoboe_cb *self = (struct toshoboe_cb*)pci_get_drvdata(pci_dev); 1493 struct toshoboe_cb *self = (struct toshoboe_cb*)pci_get_drvdata(pci_dev);
1494 1494
1495 IRDA_DEBUG (4, "%s()\n", __FUNCTION__); 1495 IRDA_DEBUG (4, "%s()\n", __func__);
1496 1496
1497 IRDA_ASSERT (self != NULL, return; ); 1497 IRDA_ASSERT (self != NULL, return; );
1498 1498
@@ -1533,7 +1533,7 @@ toshoboe_open (struct pci_dev *pci_dev, const struct pci_device_id *pdid)
1533 int ok = 0; 1533 int ok = 0;
1534 int err; 1534 int err;
1535 1535
1536 IRDA_DEBUG (4, "%s()\n", __FUNCTION__); 1536 IRDA_DEBUG (4, "%s()\n", __func__);
1537 1537
1538 if ((err=pci_enable_device(pci_dev))) 1538 if ((err=pci_enable_device(pci_dev)))
1539 return err; 1539 return err;
@@ -1700,7 +1700,7 @@ toshoboe_gotosleep (struct pci_dev *pci_dev, pm_message_t crap)
1700 unsigned long flags; 1700 unsigned long flags;
1701 int i = 10; 1701 int i = 10;
1702 1702
1703 IRDA_DEBUG (4, "%s()\n", __FUNCTION__); 1703 IRDA_DEBUG (4, "%s()\n", __func__);
1704 1704
1705 if (!self || self->stopped) 1705 if (!self || self->stopped)
1706 return 0; 1706 return 0;
@@ -1728,7 +1728,7 @@ toshoboe_wakeup (struct pci_dev *pci_dev)
1728 struct toshoboe_cb *self = (struct toshoboe_cb*)pci_get_drvdata(pci_dev); 1728 struct toshoboe_cb *self = (struct toshoboe_cb*)pci_get_drvdata(pci_dev);
1729 unsigned long flags; 1729 unsigned long flags;
1730 1730
1731 IRDA_DEBUG (4, "%s()\n", __FUNCTION__); 1731 IRDA_DEBUG (4, "%s()\n", __func__);
1732 1732
1733 if (!self || !self->stopped) 1733 if (!self || !self->stopped)
1734 return 0; 1734 return 0;
diff --git a/drivers/net/irda/ep7211-sir.c b/drivers/net/irda/ep7211-sir.c
index 831572429bb9..f83c5b881d2d 100644
--- a/drivers/net/irda/ep7211-sir.c
+++ b/drivers/net/irda/ep7211-sir.c
@@ -14,7 +14,7 @@
14#include <net/irda/irda_device.h> 14#include <net/irda/irda_device.h>
15 15
16#include <asm/io.h> 16#include <asm/io.h>
17#include <asm/hardware.h> 17#include <mach/hardware.h>
18 18
19#include "sir-dev.h" 19#include "sir-dev.h"
20 20
diff --git a/drivers/net/irda/girbil-sir.c b/drivers/net/irda/girbil-sir.c
index 738531b16bd3..a31b8fa8aaa9 100644
--- a/drivers/net/irda/girbil-sir.c
+++ b/drivers/net/irda/girbil-sir.c
@@ -86,7 +86,7 @@ static int girbil_open(struct sir_dev *dev)
86{ 86{
87 struct qos_info *qos = &dev->qos; 87 struct qos_info *qos = &dev->qos;
88 88
89 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 89 IRDA_DEBUG(2, "%s()\n", __func__);
90 90
91 /* Power on dongle */ 91 /* Power on dongle */
92 sirdev_set_dtr_rts(dev, TRUE, TRUE); 92 sirdev_set_dtr_rts(dev, TRUE, TRUE);
@@ -102,7 +102,7 @@ static int girbil_open(struct sir_dev *dev)
102 102
103static int girbil_close(struct sir_dev *dev) 103static int girbil_close(struct sir_dev *dev)
104{ 104{
105 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 105 IRDA_DEBUG(2, "%s()\n", __func__);
106 106
107 /* Power off dongle */ 107 /* Power off dongle */
108 sirdev_set_dtr_rts(dev, FALSE, FALSE); 108 sirdev_set_dtr_rts(dev, FALSE, FALSE);
@@ -126,7 +126,7 @@ static int girbil_change_speed(struct sir_dev *dev, unsigned speed)
126 u8 control[2]; 126 u8 control[2];
127 static int ret = 0; 127 static int ret = 0;
128 128
129 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 129 IRDA_DEBUG(2, "%s()\n", __func__);
130 130
131 /* dongle alread reset - port and dongle at default speed */ 131 /* dongle alread reset - port and dongle at default speed */
132 132
@@ -179,7 +179,7 @@ static int girbil_change_speed(struct sir_dev *dev, unsigned speed)
179 break; 179 break;
180 180
181 default: 181 default:
182 IRDA_ERROR("%s - undefined state %d\n", __FUNCTION__, state); 182 IRDA_ERROR("%s - undefined state %d\n", __func__, state);
183 ret = -EINVAL; 183 ret = -EINVAL;
184 break; 184 break;
185 } 185 }
@@ -209,7 +209,7 @@ static int girbil_reset(struct sir_dev *dev)
209 u8 control = GIRBIL_TXEN | GIRBIL_RXEN; 209 u8 control = GIRBIL_TXEN | GIRBIL_RXEN;
210 int ret = 0; 210 int ret = 0;
211 211
212 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 212 IRDA_DEBUG(2, "%s()\n", __func__);
213 213
214 switch (state) { 214 switch (state) {
215 case SIRDEV_STATE_DONGLE_RESET: 215 case SIRDEV_STATE_DONGLE_RESET:
@@ -241,7 +241,7 @@ static int girbil_reset(struct sir_dev *dev)
241 break; 241 break;
242 242
243 default: 243 default:
244 IRDA_ERROR("%s(), undefined state %d\n", __FUNCTION__, state); 244 IRDA_ERROR("%s(), undefined state %d\n", __func__, state);
245 ret = -1; 245 ret = -1;
246 break; 246 break;
247 } 247 }
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 18b471cd1447..b5d6b9ac162a 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -177,12 +177,12 @@ static void irda_usb_build_header(struct irda_usb_cb *self,
177 (!force) && (self->speed != -1)) { 177 (!force) && (self->speed != -1)) {
178 /* No speed and xbofs change here 178 /* No speed and xbofs change here
179 * (we'll do it later in the write callback) */ 179 * (we'll do it later in the write callback) */
180 IRDA_DEBUG(2, "%s(), not changing speed yet\n", __FUNCTION__); 180 IRDA_DEBUG(2, "%s(), not changing speed yet\n", __func__);
181 *header = 0; 181 *header = 0;
182 return; 182 return;
183 } 183 }
184 184
185 IRDA_DEBUG(2, "%s(), changing speed to %d\n", __FUNCTION__, self->new_speed); 185 IRDA_DEBUG(2, "%s(), changing speed to %d\n", __func__, self->new_speed);
186 self->speed = self->new_speed; 186 self->speed = self->new_speed;
187 /* We will do ` self->new_speed = -1; ' in the completion 187 /* We will do ` self->new_speed = -1; ' in the completion
188 * handler just in case the current URB fail - Jean II */ 188 * handler just in case the current URB fail - Jean II */
@@ -228,7 +228,7 @@ static void irda_usb_build_header(struct irda_usb_cb *self,
228 228
229 /* Set the negotiated additional XBOFS */ 229 /* Set the negotiated additional XBOFS */
230 if (self->new_xbofs != -1) { 230 if (self->new_xbofs != -1) {
231 IRDA_DEBUG(2, "%s(), changing xbofs to %d\n", __FUNCTION__, self->new_xbofs); 231 IRDA_DEBUG(2, "%s(), changing xbofs to %d\n", __func__, self->new_xbofs);
232 self->xbofs = self->new_xbofs; 232 self->xbofs = self->new_xbofs;
233 /* We will do ` self->new_xbofs = -1; ' in the completion 233 /* We will do ` self->new_xbofs = -1; ' in the completion
234 * handler just in case the current URB fail - Jean II */ 234 * handler just in case the current URB fail - Jean II */
@@ -302,13 +302,13 @@ static void irda_usb_change_speed_xbofs(struct irda_usb_cb *self)
302 struct urb *urb; 302 struct urb *urb;
303 int ret; 303 int ret;
304 304
305 IRDA_DEBUG(2, "%s(), speed=%d, xbofs=%d\n", __FUNCTION__, 305 IRDA_DEBUG(2, "%s(), speed=%d, xbofs=%d\n", __func__,
306 self->new_speed, self->new_xbofs); 306 self->new_speed, self->new_xbofs);
307 307
308 /* Grab the speed URB */ 308 /* Grab the speed URB */
309 urb = self->speed_urb; 309 urb = self->speed_urb;
310 if (urb->status != 0) { 310 if (urb->status != 0) {
311 IRDA_WARNING("%s(), URB still in use!\n", __FUNCTION__); 311 IRDA_WARNING("%s(), URB still in use!\n", __func__);
312 return; 312 return;
313 } 313 }
314 314
@@ -334,7 +334,7 @@ static void irda_usb_change_speed_xbofs(struct irda_usb_cb *self)
334 334
335 /* Irq disabled -> GFP_ATOMIC */ 335 /* Irq disabled -> GFP_ATOMIC */
336 if ((ret = usb_submit_urb(urb, GFP_ATOMIC))) { 336 if ((ret = usb_submit_urb(urb, GFP_ATOMIC))) {
337 IRDA_WARNING("%s(), failed Speed URB\n", __FUNCTION__); 337 IRDA_WARNING("%s(), failed Speed URB\n", __func__);
338 } 338 }
339} 339}
340 340
@@ -347,7 +347,7 @@ static void speed_bulk_callback(struct urb *urb)
347{ 347{
348 struct irda_usb_cb *self = urb->context; 348 struct irda_usb_cb *self = urb->context;
349 349
350 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 350 IRDA_DEBUG(2, "%s()\n", __func__);
351 351
352 /* We should always have a context */ 352 /* We should always have a context */
353 IRDA_ASSERT(self != NULL, return;); 353 IRDA_ASSERT(self != NULL, return;);
@@ -357,7 +357,7 @@ static void speed_bulk_callback(struct urb *urb)
357 /* Check for timeout and other USB nasties */ 357 /* Check for timeout and other USB nasties */
358 if (urb->status != 0) { 358 if (urb->status != 0) {
359 /* I get a lot of -ECONNABORTED = -103 here - Jean II */ 359 /* I get a lot of -ECONNABORTED = -103 here - Jean II */
360 IRDA_DEBUG(0, "%s(), URB complete status %d, transfer_flags 0x%04X\n", __FUNCTION__, urb->status, urb->transfer_flags); 360 IRDA_DEBUG(0, "%s(), URB complete status %d, transfer_flags 0x%04X\n", __func__, urb->status, urb->transfer_flags);
361 361
362 /* Don't do anything here, that might confuse the USB layer. 362 /* Don't do anything here, that might confuse the USB layer.
363 * Instead, we will wait for irda_usb_net_timeout(), the 363 * Instead, we will wait for irda_usb_net_timeout(), the
@@ -392,7 +392,7 @@ static int irda_usb_hard_xmit(struct sk_buff *skb, struct net_device *netdev)
392 int res, mtt; 392 int res, mtt;
393 int err = 1; /* Failed */ 393 int err = 1; /* Failed */
394 394
395 IRDA_DEBUG(4, "%s() on %s\n", __FUNCTION__, netdev->name); 395 IRDA_DEBUG(4, "%s() on %s\n", __func__, netdev->name);
396 396
397 netif_stop_queue(netdev); 397 netif_stop_queue(netdev);
398 398
@@ -403,7 +403,7 @@ static int irda_usb_hard_xmit(struct sk_buff *skb, struct net_device *netdev)
403 * We need to check self->present under the spinlock because 403 * We need to check self->present under the spinlock because
404 * of irda_usb_disconnect() is synchronous - Jean II */ 404 * of irda_usb_disconnect() is synchronous - Jean II */
405 if (!self->present) { 405 if (!self->present) {
406 IRDA_DEBUG(0, "%s(), Device is gone...\n", __FUNCTION__); 406 IRDA_DEBUG(0, "%s(), Device is gone...\n", __func__);
407 goto drop; 407 goto drop;
408 } 408 }
409 409
@@ -437,7 +437,7 @@ static int irda_usb_hard_xmit(struct sk_buff *skb, struct net_device *netdev)
437 } 437 }
438 438
439 if (urb->status != 0) { 439 if (urb->status != 0) {
440 IRDA_WARNING("%s(), URB still in use!\n", __FUNCTION__); 440 IRDA_WARNING("%s(), URB still in use!\n", __func__);
441 goto drop; 441 goto drop;
442 } 442 }
443 443
@@ -524,7 +524,7 @@ static int irda_usb_hard_xmit(struct sk_buff *skb, struct net_device *netdev)
524 524
525 /* Ask USB to send the packet - Irq disabled -> GFP_ATOMIC */ 525 /* Ask USB to send the packet - Irq disabled -> GFP_ATOMIC */
526 if ((res = usb_submit_urb(urb, GFP_ATOMIC))) { 526 if ((res = usb_submit_urb(urb, GFP_ATOMIC))) {
527 IRDA_WARNING("%s(), failed Tx URB\n", __FUNCTION__); 527 IRDA_WARNING("%s(), failed Tx URB\n", __func__);
528 self->stats.tx_errors++; 528 self->stats.tx_errors++;
529 /* Let USB recover : We will catch that in the watchdog */ 529 /* Let USB recover : We will catch that in the watchdog */
530 /*netif_start_queue(netdev);*/ 530 /*netif_start_queue(netdev);*/
@@ -556,7 +556,7 @@ static void write_bulk_callback(struct urb *urb)
556 struct sk_buff *skb = urb->context; 556 struct sk_buff *skb = urb->context;
557 struct irda_usb_cb *self = ((struct irda_skb_cb *) skb->cb)->context; 557 struct irda_usb_cb *self = ((struct irda_skb_cb *) skb->cb)->context;
558 558
559 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 559 IRDA_DEBUG(2, "%s()\n", __func__);
560 560
561 /* We should always have a context */ 561 /* We should always have a context */
562 IRDA_ASSERT(self != NULL, return;); 562 IRDA_ASSERT(self != NULL, return;);
@@ -570,7 +570,7 @@ static void write_bulk_callback(struct urb *urb)
570 /* Check for timeout and other USB nasties */ 570 /* Check for timeout and other USB nasties */
571 if (urb->status != 0) { 571 if (urb->status != 0) {
572 /* I get a lot of -ECONNABORTED = -103 here - Jean II */ 572 /* I get a lot of -ECONNABORTED = -103 here - Jean II */
573 IRDA_DEBUG(0, "%s(), URB complete status %d, transfer_flags 0x%04X\n", __FUNCTION__, urb->status, urb->transfer_flags); 573 IRDA_DEBUG(0, "%s(), URB complete status %d, transfer_flags 0x%04X\n", __func__, urb->status, urb->transfer_flags);
574 574
575 /* Don't do anything here, that might confuse the USB layer, 575 /* Don't do anything here, that might confuse the USB layer,
576 * and we could go in recursion and blow the kernel stack... 576 * and we could go in recursion and blow the kernel stack...
@@ -589,7 +589,7 @@ static void write_bulk_callback(struct urb *urb)
589 589
590 /* If the network is closed, stop everything */ 590 /* If the network is closed, stop everything */
591 if ((!self->netopen) || (!self->present)) { 591 if ((!self->netopen) || (!self->present)) {
592 IRDA_DEBUG(0, "%s(), Network is gone...\n", __FUNCTION__); 592 IRDA_DEBUG(0, "%s(), Network is gone...\n", __func__);
593 spin_unlock_irqrestore(&self->lock, flags); 593 spin_unlock_irqrestore(&self->lock, flags);
594 return; 594 return;
595 } 595 }
@@ -600,7 +600,7 @@ static void write_bulk_callback(struct urb *urb)
600 (self->new_xbofs != self->xbofs)) { 600 (self->new_xbofs != self->xbofs)) {
601 /* We haven't changed speed yet (because of 601 /* We haven't changed speed yet (because of
602 * IUC_SPEED_BUG), so do it now - Jean II */ 602 * IUC_SPEED_BUG), so do it now - Jean II */
603 IRDA_DEBUG(1, "%s(), Changing speed now...\n", __FUNCTION__); 603 IRDA_DEBUG(1, "%s(), Changing speed now...\n", __func__);
604 irda_usb_change_speed_xbofs(self); 604 irda_usb_change_speed_xbofs(self);
605 } else { 605 } else {
606 /* New speed and xbof is now commited in hardware */ 606 /* New speed and xbof is now commited in hardware */
@@ -632,7 +632,7 @@ static void irda_usb_net_timeout(struct net_device *netdev)
632 struct urb *urb; 632 struct urb *urb;
633 int done = 0; /* If we have made any progress */ 633 int done = 0; /* If we have made any progress */
634 634
635 IRDA_DEBUG(0, "%s(), Network layer thinks we timed out!\n", __FUNCTION__); 635 IRDA_DEBUG(0, "%s(), Network layer thinks we timed out!\n", __func__);
636 IRDA_ASSERT(self != NULL, return;); 636 IRDA_ASSERT(self != NULL, return;);
637 637
638 /* Protect us from USB callbacks, net Tx and else. */ 638 /* Protect us from USB callbacks, net Tx and else. */
@@ -640,7 +640,7 @@ static void irda_usb_net_timeout(struct net_device *netdev)
640 640
641 /* self->present *MUST* be read under spinlock */ 641 /* self->present *MUST* be read under spinlock */
642 if (!self->present) { 642 if (!self->present) {
643 IRDA_WARNING("%s(), device not present!\n", __FUNCTION__); 643 IRDA_WARNING("%s(), device not present!\n", __func__);
644 netif_stop_queue(netdev); 644 netif_stop_queue(netdev);
645 spin_unlock_irqrestore(&self->lock, flags); 645 spin_unlock_irqrestore(&self->lock, flags);
646 return; 646 return;
@@ -763,7 +763,7 @@ static void irda_usb_submit(struct irda_usb_cb *self, struct sk_buff *skb, struc
763 struct irda_skb_cb *cb; 763 struct irda_skb_cb *cb;
764 int ret; 764 int ret;
765 765
766 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 766 IRDA_DEBUG(2, "%s()\n", __func__);
767 767
768 /* This should never happen */ 768 /* This should never happen */
769 IRDA_ASSERT(skb != NULL, return;); 769 IRDA_ASSERT(skb != NULL, return;);
@@ -786,7 +786,7 @@ static void irda_usb_submit(struct irda_usb_cb *self, struct sk_buff *skb, struc
786 /* If this ever happen, we are in deep s***. 786 /* If this ever happen, we are in deep s***.
787 * Basically, the Rx path will stop... */ 787 * Basically, the Rx path will stop... */
788 IRDA_WARNING("%s(), Failed to submit Rx URB %d\n", 788 IRDA_WARNING("%s(), Failed to submit Rx URB %d\n",
789 __FUNCTION__, ret); 789 __func__, ret);
790 } 790 }
791} 791}
792 792
@@ -807,7 +807,7 @@ static void irda_usb_receive(struct urb *urb)
807 struct urb *next_urb; 807 struct urb *next_urb;
808 unsigned int len, docopy; 808 unsigned int len, docopy;
809 809
810 IRDA_DEBUG(2, "%s(), len=%d\n", __FUNCTION__, urb->actual_length); 810 IRDA_DEBUG(2, "%s(), len=%d\n", __func__, urb->actual_length);
811 811
812 /* Find ourselves */ 812 /* Find ourselves */
813 cb = (struct irda_skb_cb *) skb->cb; 813 cb = (struct irda_skb_cb *) skb->cb;
@@ -817,7 +817,7 @@ static void irda_usb_receive(struct urb *urb)
817 817
818 /* If the network is closed or the device gone, stop everything */ 818 /* If the network is closed or the device gone, stop everything */
819 if ((!self->netopen) || (!self->present)) { 819 if ((!self->netopen) || (!self->present)) {
820 IRDA_DEBUG(0, "%s(), Network is gone!\n", __FUNCTION__); 820 IRDA_DEBUG(0, "%s(), Network is gone!\n", __func__);
821 /* Don't re-submit the URB : will stall the Rx path */ 821 /* Don't re-submit the URB : will stall the Rx path */
822 return; 822 return;
823 } 823 }
@@ -840,7 +840,7 @@ static void irda_usb_receive(struct urb *urb)
840 /* Usually precursor to a hot-unplug on OHCI. */ 840 /* Usually precursor to a hot-unplug on OHCI. */
841 default: 841 default:
842 self->stats.rx_errors++; 842 self->stats.rx_errors++;
843 IRDA_DEBUG(0, "%s(), RX status %d, transfer_flags 0x%04X \n", __FUNCTION__, urb->status, urb->transfer_flags); 843 IRDA_DEBUG(0, "%s(), RX status %d, transfer_flags 0x%04X \n", __func__, urb->status, urb->transfer_flags);
844 break; 844 break;
845 } 845 }
846 /* If we received an error, we don't want to resubmit the 846 /* If we received an error, we don't want to resubmit the
@@ -861,7 +861,7 @@ static void irda_usb_receive(struct urb *urb)
861 861
862 /* Check for empty frames */ 862 /* Check for empty frames */
863 if (urb->actual_length <= self->header_length) { 863 if (urb->actual_length <= self->header_length) {
864 IRDA_WARNING("%s(), empty frame!\n", __FUNCTION__); 864 IRDA_WARNING("%s(), empty frame!\n", __func__);
865 goto done; 865 goto done;
866 } 866 }
867 867
@@ -967,7 +967,7 @@ static void irda_usb_rx_defer_expired(unsigned long data)
967 struct irda_skb_cb *cb; 967 struct irda_skb_cb *cb;
968 struct urb *next_urb; 968 struct urb *next_urb;
969 969
970 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 970 IRDA_DEBUG(2, "%s()\n", __func__);
971 971
972 /* Find ourselves */ 972 /* Find ourselves */
973 cb = (struct irda_skb_cb *) skb->cb; 973 cb = (struct irda_skb_cb *) skb->cb;
@@ -1053,7 +1053,7 @@ static int stir421x_fw_upload(struct irda_usb_cb *self,
1053 patch_block, block_size, 1053 patch_block, block_size,
1054 &actual_len, msecs_to_jiffies(500)); 1054 &actual_len, msecs_to_jiffies(500));
1055 IRDA_DEBUG(3,"%s(): Bulk send %u bytes, ret=%d\n", 1055 IRDA_DEBUG(3,"%s(): Bulk send %u bytes, ret=%d\n",
1056 __FUNCTION__, actual_len, ret); 1056 __func__, actual_len, ret);
1057 1057
1058 if (ret < 0) 1058 if (ret < 0)
1059 break; 1059 break;
@@ -1092,7 +1092,7 @@ static int stir421x_patch_device(struct irda_usb_cb *self)
1092 1092
1093 /* We get a patch from userspace */ 1093 /* We get a patch from userspace */
1094 IRDA_MESSAGE("%s(): Received firmware %s (%zu bytes)\n", 1094 IRDA_MESSAGE("%s(): Received firmware %s (%zu bytes)\n",
1095 __FUNCTION__, stir421x_fw_name, fw->size); 1095 __func__, stir421x_fw_name, fw->size);
1096 1096
1097 ret = -EINVAL; 1097 ret = -EINVAL;
1098 1098
@@ -1116,7 +1116,7 @@ static int stir421x_patch_device(struct irda_usb_cb *self)
1116 + (build % 10); 1116 + (build % 10);
1117 1117
1118 IRDA_DEBUG(3, "%s(): Firmware Product version %ld\n", 1118 IRDA_DEBUG(3, "%s(): Firmware Product version %ld\n",
1119 __FUNCTION__, fw_version); 1119 __func__, fw_version);
1120 } 1120 }
1121 } 1121 }
1122 1122
@@ -1172,7 +1172,7 @@ static int irda_usb_net_open(struct net_device *netdev)
1172 char hwname[16]; 1172 char hwname[16];
1173 int i; 1173 int i;
1174 1174
1175 IRDA_DEBUG(1, "%s()\n", __FUNCTION__); 1175 IRDA_DEBUG(1, "%s()\n", __func__);
1176 1176
1177 IRDA_ASSERT(netdev != NULL, return -1;); 1177 IRDA_ASSERT(netdev != NULL, return -1;);
1178 self = (struct irda_usb_cb *) netdev->priv; 1178 self = (struct irda_usb_cb *) netdev->priv;
@@ -1182,13 +1182,13 @@ static int irda_usb_net_open(struct net_device *netdev)
1182 /* Can only open the device if it's there */ 1182 /* Can only open the device if it's there */
1183 if(!self->present) { 1183 if(!self->present) {
1184 spin_unlock_irqrestore(&self->lock, flags); 1184 spin_unlock_irqrestore(&self->lock, flags);
1185 IRDA_WARNING("%s(), device not present!\n", __FUNCTION__); 1185 IRDA_WARNING("%s(), device not present!\n", __func__);
1186 return -1; 1186 return -1;
1187 } 1187 }
1188 1188
1189 if(self->needspatch) { 1189 if(self->needspatch) {
1190 spin_unlock_irqrestore(&self->lock, flags); 1190 spin_unlock_irqrestore(&self->lock, flags);
1191 IRDA_WARNING("%s(), device needs patch\n", __FUNCTION__) ; 1191 IRDA_WARNING("%s(), device needs patch\n", __func__) ;
1192 return -EIO ; 1192 return -EIO ;
1193 } 1193 }
1194 1194
@@ -1231,7 +1231,7 @@ static int irda_usb_net_open(struct net_device *netdev)
1231 /* If this ever happen, we are in deep s***. 1231 /* If this ever happen, we are in deep s***.
1232 * Basically, we can't start the Rx path... */ 1232 * Basically, we can't start the Rx path... */
1233 IRDA_WARNING("%s(), Failed to allocate Rx skb\n", 1233 IRDA_WARNING("%s(), Failed to allocate Rx skb\n",
1234 __FUNCTION__); 1234 __func__);
1235 return -1; 1235 return -1;
1236 } 1236 }
1237 //skb_reserve(newskb, USB_IRDA_HEADER - 1); 1237 //skb_reserve(newskb, USB_IRDA_HEADER - 1);
@@ -1254,7 +1254,7 @@ static int irda_usb_net_close(struct net_device *netdev)
1254 struct irda_usb_cb *self; 1254 struct irda_usb_cb *self;
1255 int i; 1255 int i;
1256 1256
1257 IRDA_DEBUG(1, "%s()\n", __FUNCTION__); 1257 IRDA_DEBUG(1, "%s()\n", __func__);
1258 1258
1259 IRDA_ASSERT(netdev != NULL, return -1;); 1259 IRDA_ASSERT(netdev != NULL, return -1;);
1260 self = (struct irda_usb_cb *) netdev->priv; 1260 self = (struct irda_usb_cb *) netdev->priv;
@@ -1309,7 +1309,7 @@ static int irda_usb_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1309 self = dev->priv; 1309 self = dev->priv;
1310 IRDA_ASSERT(self != NULL, return -1;); 1310 IRDA_ASSERT(self != NULL, return -1;);
1311 1311
1312 IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__, dev->name, cmd); 1312 IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
1313 1313
1314 switch (cmd) { 1314 switch (cmd) {
1315 case SIOCSBANDWIDTH: /* Set bandwidth */ 1315 case SIOCSBANDWIDTH: /* Set bandwidth */
@@ -1367,7 +1367,7 @@ static inline void irda_usb_init_qos(struct irda_usb_cb *self)
1367{ 1367{
1368 struct irda_class_desc *desc; 1368 struct irda_class_desc *desc;
1369 1369
1370 IRDA_DEBUG(3, "%s()\n", __FUNCTION__); 1370 IRDA_DEBUG(3, "%s()\n", __func__);
1371 1371
1372 desc = self->irda_desc; 1372 desc = self->irda_desc;
1373 1373
@@ -1384,7 +1384,7 @@ static inline void irda_usb_init_qos(struct irda_usb_cb *self)
1384 self->qos.data_size.bits = desc->bmDataSize; 1384 self->qos.data_size.bits = desc->bmDataSize;
1385 1385
1386 IRDA_DEBUG(0, "%s(), dongle says speed=0x%X, size=0x%X, window=0x%X, bofs=0x%X, turn=0x%X\n", 1386 IRDA_DEBUG(0, "%s(), dongle says speed=0x%X, size=0x%X, window=0x%X, bofs=0x%X, turn=0x%X\n",
1387 __FUNCTION__, self->qos.baud_rate.bits, self->qos.data_size.bits, self->qos.window_size.bits, self->qos.additional_bofs.bits, self->qos.min_turn_time.bits); 1387 __func__, self->qos.baud_rate.bits, self->qos.data_size.bits, self->qos.window_size.bits, self->qos.additional_bofs.bits, self->qos.min_turn_time.bits);
1388 1388
1389 /* Don't always trust what the dongle tell us */ 1389 /* Don't always trust what the dongle tell us */
1390 if(self->capability & IUC_SIR_ONLY) 1390 if(self->capability & IUC_SIR_ONLY)
@@ -1419,7 +1419,7 @@ static inline int irda_usb_open(struct irda_usb_cb *self)
1419{ 1419{
1420 struct net_device *netdev = self->netdev; 1420 struct net_device *netdev = self->netdev;
1421 1421
1422 IRDA_DEBUG(1, "%s()\n", __FUNCTION__); 1422 IRDA_DEBUG(1, "%s()\n", __func__);
1423 1423
1424 irda_usb_init_qos(self); 1424 irda_usb_init_qos(self);
1425 1425
@@ -1442,7 +1442,7 @@ static inline int irda_usb_open(struct irda_usb_cb *self)
1442 */ 1442 */
1443static inline void irda_usb_close(struct irda_usb_cb *self) 1443static inline void irda_usb_close(struct irda_usb_cb *self)
1444{ 1444{
1445 IRDA_DEBUG(1, "%s()\n", __FUNCTION__); 1445 IRDA_DEBUG(1, "%s()\n", __func__);
1446 1446
1447 /* Remove netdevice */ 1447 /* Remove netdevice */
1448 unregister_netdev(self->netdev); 1448 unregister_netdev(self->netdev);
@@ -1515,13 +1515,13 @@ static inline int irda_usb_parse_endpoints(struct irda_usb_cb *self, struct usb_
1515 /* This is our interrupt endpoint */ 1515 /* This is our interrupt endpoint */
1516 self->bulk_int_ep = ep; 1516 self->bulk_int_ep = ep;
1517 } else { 1517 } else {
1518 IRDA_ERROR("%s(), Unrecognised endpoint %02X.\n", __FUNCTION__, ep); 1518 IRDA_ERROR("%s(), Unrecognised endpoint %02X.\n", __func__, ep);
1519 } 1519 }
1520 } 1520 }
1521 } 1521 }
1522 1522
1523 IRDA_DEBUG(0, "%s(), And our endpoints are : in=%02X, out=%02X (%d), int=%02X\n", 1523 IRDA_DEBUG(0, "%s(), And our endpoints are : in=%02X, out=%02X (%d), int=%02X\n",
1524 __FUNCTION__, self->bulk_in_ep, self->bulk_out_ep, self->bulk_out_mtu, self->bulk_int_ep); 1524 __func__, self->bulk_in_ep, self->bulk_out_ep, self->bulk_out_mtu, self->bulk_int_ep);
1525 1525
1526 return((self->bulk_in_ep != 0) && (self->bulk_out_ep != 0)); 1526 return((self->bulk_in_ep != 0) && (self->bulk_out_ep != 0));
1527} 1527}
@@ -1583,7 +1583,7 @@ static inline struct irda_class_desc *irda_usb_find_class_desc(struct usb_interf
1583 0, intf->altsetting->desc.bInterfaceNumber, desc, 1583 0, intf->altsetting->desc.bInterfaceNumber, desc,
1584 sizeof(*desc), 500); 1584 sizeof(*desc), 500);
1585 1585
1586 IRDA_DEBUG(1, "%s(), ret=%d\n", __FUNCTION__, ret); 1586 IRDA_DEBUG(1, "%s(), ret=%d\n", __func__, ret);
1587 if (ret < sizeof(*desc)) { 1587 if (ret < sizeof(*desc)) {
1588 IRDA_WARNING("usb-irda: class_descriptor read %s (%d)\n", 1588 IRDA_WARNING("usb-irda: class_descriptor read %s (%d)\n",
1589 (ret<0) ? "failed" : "too short", ret); 1589 (ret<0) ? "failed" : "too short", ret);
@@ -1696,10 +1696,10 @@ static int irda_usb_probe(struct usb_interface *intf,
1696 /* Martin Diehl says if we get a -EPIPE we should 1696 /* Martin Diehl says if we get a -EPIPE we should
1697 * be fine and we don't need to do a usb_clear_halt(). 1697 * be fine and we don't need to do a usb_clear_halt().
1698 * - Jean II */ 1698 * - Jean II */
1699 IRDA_DEBUG(0, "%s(), Received -EPIPE, ignoring...\n", __FUNCTION__); 1699 IRDA_DEBUG(0, "%s(), Received -EPIPE, ignoring...\n", __func__);
1700 break; 1700 break;
1701 default: 1701 default:
1702 IRDA_DEBUG(0, "%s(), Unknown error %d\n", __FUNCTION__, ret); 1702 IRDA_DEBUG(0, "%s(), Unknown error %d\n", __func__, ret);
1703 ret = -EIO; 1703 ret = -EIO;
1704 goto err_out_3; 1704 goto err_out_3;
1705 } 1705 }
@@ -1708,7 +1708,7 @@ static int irda_usb_probe(struct usb_interface *intf,
1708 interface = intf->cur_altsetting; 1708 interface = intf->cur_altsetting;
1709 if(!irda_usb_parse_endpoints(self, interface->endpoint, 1709 if(!irda_usb_parse_endpoints(self, interface->endpoint,
1710 interface->desc.bNumEndpoints)) { 1710 interface->desc.bNumEndpoints)) {
1711 IRDA_ERROR("%s(), Bogus endpoints...\n", __FUNCTION__); 1711 IRDA_ERROR("%s(), Bogus endpoints...\n", __func__);
1712 ret = -EIO; 1712 ret = -EIO;
1713 goto err_out_3; 1713 goto err_out_3;
1714 } 1714 }
@@ -1815,7 +1815,7 @@ static void irda_usb_disconnect(struct usb_interface *intf)
1815 struct irda_usb_cb *self = usb_get_intfdata(intf); 1815 struct irda_usb_cb *self = usb_get_intfdata(intf);
1816 int i; 1816 int i;
1817 1817
1818 IRDA_DEBUG(1, "%s()\n", __FUNCTION__); 1818 IRDA_DEBUG(1, "%s()\n", __func__);
1819 1819
1820 usb_set_intfdata(intf, NULL); 1820 usb_set_intfdata(intf, NULL);
1821 if (!self) 1821 if (!self)
@@ -1865,7 +1865,7 @@ static void irda_usb_disconnect(struct usb_interface *intf)
1865 1865
1866 /* Free self and network device */ 1866 /* Free self and network device */
1867 free_netdev(self->netdev); 1867 free_netdev(self->netdev);
1868 IRDA_DEBUG(0, "%s(), USB IrDA Disconnected\n", __FUNCTION__); 1868 IRDA_DEBUG(0, "%s(), USB IrDA Disconnected\n", __func__);
1869} 1869}
1870 1870
1871/*------------------------------------------------------------------*/ 1871/*------------------------------------------------------------------*/
diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
index 9e33196f9459..6bcee01c684c 100644
--- a/drivers/net/irda/irtty-sir.c
+++ b/drivers/net/irda/irtty-sir.c
@@ -231,7 +231,7 @@ static void irtty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
231 231
232 dev = priv->dev; 232 dev = priv->dev;
233 if (!dev) { 233 if (!dev) {
234 IRDA_WARNING("%s(), not ready yet!\n", __FUNCTION__); 234 IRDA_WARNING("%s(), not ready yet!\n", __func__);
235 return; 235 return;
236 } 236 }
237 237
@@ -388,7 +388,7 @@ static int irtty_ioctl(struct tty_struct *tty, struct file *file, unsigned int c
388 IRDA_ASSERT(priv != NULL, return -ENODEV;); 388 IRDA_ASSERT(priv != NULL, return -ENODEV;);
389 IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -EBADR;); 389 IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -EBADR;);
390 390
391 IRDA_DEBUG(3, "%s(cmd=0x%X)\n", __FUNCTION__, cmd); 391 IRDA_DEBUG(3, "%s(cmd=0x%X)\n", __func__, cmd);
392 392
393 dev = priv->dev; 393 dev = priv->dev;
394 IRDA_ASSERT(dev != NULL, return -1;); 394 IRDA_ASSERT(dev != NULL, return -1;);
@@ -476,7 +476,7 @@ static int irtty_open(struct tty_struct *tty)
476 476
477 mutex_unlock(&irtty_mutex); 477 mutex_unlock(&irtty_mutex);
478 478
479 IRDA_DEBUG(0, "%s - %s: irda line discipline opened\n", __FUNCTION__, tty->name); 479 IRDA_DEBUG(0, "%s - %s: irda line discipline opened\n", __func__, tty->name);
480 480
481 return 0; 481 return 0;
482 482
@@ -528,7 +528,7 @@ static void irtty_close(struct tty_struct *tty)
528 528
529 kfree(priv); 529 kfree(priv);
530 530
531 IRDA_DEBUG(0, "%s - %s: irda line discipline closed\n", __FUNCTION__, tty->name); 531 IRDA_DEBUG(0, "%s - %s: irda line discipline closed\n", __func__, tty->name);
532} 532}
533 533
534/* ------------------------------------------------------- */ 534/* ------------------------------------------------------- */
@@ -566,7 +566,7 @@ static void __exit irtty_sir_cleanup(void)
566 566
567 if ((err = tty_unregister_ldisc(N_IRDA))) { 567 if ((err = tty_unregister_ldisc(N_IRDA))) {
568 IRDA_ERROR("%s(), can't unregister line discipline (err = %d)\n", 568 IRDA_ERROR("%s(), can't unregister line discipline (err = %d)\n",
569 __FUNCTION__, err); 569 __func__, err);
570 } 570 }
571} 571}
572 572
diff --git a/drivers/net/irda/kingsun-sir.c b/drivers/net/irda/kingsun-sir.c
index 648e54b3f00e..73fe83be34fe 100644
--- a/drivers/net/irda/kingsun-sir.c
+++ b/drivers/net/irda/kingsun-sir.c
@@ -243,7 +243,7 @@ static void kingsun_rcv_irq(struct urb *urb)
243 } 243 }
244 } else if (urb->actual_length > 0) { 244 } else if (urb->actual_length > 0) {
245 err("%s(): Unexpected response length, expected %d got %d", 245 err("%s(): Unexpected response length, expected %d got %d",
246 __FUNCTION__, kingsun->max_rx, urb->actual_length); 246 __func__, kingsun->max_rx, urb->actual_length);
247 } 247 }
248 /* This urb has already been filled in kingsun_net_open */ 248 /* This urb has already been filled in kingsun_net_open */
249 ret = usb_submit_urb(urb, GFP_ATOMIC); 249 ret = usb_submit_urb(urb, GFP_ATOMIC);
diff --git a/drivers/net/irda/litelink-sir.c b/drivers/net/irda/litelink-sir.c
index 73261c54bbfd..d6d9d2e5ad49 100644
--- a/drivers/net/irda/litelink-sir.c
+++ b/drivers/net/irda/litelink-sir.c
@@ -78,7 +78,7 @@ static int litelink_open(struct sir_dev *dev)
78{ 78{
79 struct qos_info *qos = &dev->qos; 79 struct qos_info *qos = &dev->qos;
80 80
81 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 81 IRDA_DEBUG(2, "%s()\n", __func__);
82 82
83 /* Power up dongle */ 83 /* Power up dongle */
84 sirdev_set_dtr_rts(dev, TRUE, TRUE); 84 sirdev_set_dtr_rts(dev, TRUE, TRUE);
@@ -95,7 +95,7 @@ static int litelink_open(struct sir_dev *dev)
95 95
96static int litelink_close(struct sir_dev *dev) 96static int litelink_close(struct sir_dev *dev)
97{ 97{
98 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 98 IRDA_DEBUG(2, "%s()\n", __func__);
99 99
100 /* Power off dongle */ 100 /* Power off dongle */
101 sirdev_set_dtr_rts(dev, FALSE, FALSE); 101 sirdev_set_dtr_rts(dev, FALSE, FALSE);
@@ -113,7 +113,7 @@ static int litelink_change_speed(struct sir_dev *dev, unsigned speed)
113{ 113{
114 int i; 114 int i;
115 115
116 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 116 IRDA_DEBUG(2, "%s()\n", __func__);
117 117
118 /* dongle already reset by irda-thread - current speed (dongle and 118 /* dongle already reset by irda-thread - current speed (dongle and
119 * port) is the default speed (115200 for litelink!) 119 * port) is the default speed (115200 for litelink!)
@@ -156,7 +156,7 @@ static int litelink_change_speed(struct sir_dev *dev, unsigned speed)
156 */ 156 */
157static int litelink_reset(struct sir_dev *dev) 157static int litelink_reset(struct sir_dev *dev)
158{ 158{
159 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 159 IRDA_DEBUG(2, "%s()\n", __func__);
160 160
161 /* probably the power-up can be dropped here, but with only 161 /* probably the power-up can be dropped here, but with only
162 * 15 usec delay it's not worth the risk unless somebody with 162 * 15 usec delay it's not worth the risk unless somebody with
diff --git a/drivers/net/irda/ma600-sir.c b/drivers/net/irda/ma600-sir.c
index 809906d94762..1ceed9cfb7c4 100644
--- a/drivers/net/irda/ma600-sir.c
+++ b/drivers/net/irda/ma600-sir.c
@@ -67,13 +67,13 @@ static struct dongle_driver ma600 = {
67 67
68static int __init ma600_sir_init(void) 68static int __init ma600_sir_init(void)
69{ 69{
70 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 70 IRDA_DEBUG(2, "%s()\n", __func__);
71 return irda_register_dongle(&ma600); 71 return irda_register_dongle(&ma600);
72} 72}
73 73
74static void __exit ma600_sir_cleanup(void) 74static void __exit ma600_sir_cleanup(void)
75{ 75{
76 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 76 IRDA_DEBUG(2, "%s()\n", __func__);
77 irda_unregister_dongle(&ma600); 77 irda_unregister_dongle(&ma600);
78} 78}
79 79
@@ -88,7 +88,7 @@ static int ma600_open(struct sir_dev *dev)
88{ 88{
89 struct qos_info *qos = &dev->qos; 89 struct qos_info *qos = &dev->qos;
90 90
91 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 91 IRDA_DEBUG(2, "%s()\n", __func__);
92 92
93 sirdev_set_dtr_rts(dev, TRUE, TRUE); 93 sirdev_set_dtr_rts(dev, TRUE, TRUE);
94 94
@@ -106,7 +106,7 @@ static int ma600_open(struct sir_dev *dev)
106 106
107static int ma600_close(struct sir_dev *dev) 107static int ma600_close(struct sir_dev *dev)
108{ 108{
109 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 109 IRDA_DEBUG(2, "%s()\n", __func__);
110 110
111 /* Power off dongle */ 111 /* Power off dongle */
112 sirdev_set_dtr_rts(dev, FALSE, FALSE); 112 sirdev_set_dtr_rts(dev, FALSE, FALSE);
@@ -176,7 +176,7 @@ static int ma600_change_speed(struct sir_dev *dev, unsigned speed)
176{ 176{
177 u8 byte; 177 u8 byte;
178 178
179 IRDA_DEBUG(2, "%s(), speed=%d (was %d)\n", __FUNCTION__, 179 IRDA_DEBUG(2, "%s(), speed=%d (was %d)\n", __func__,
180 speed, dev->speed); 180 speed, dev->speed);
181 181
182 /* dongle already reset, dongle and port at default speed (9600) */ 182 /* dongle already reset, dongle and port at default speed (9600) */
@@ -201,12 +201,12 @@ static int ma600_change_speed(struct sir_dev *dev, unsigned speed)
201 sirdev_raw_read(dev, &byte, sizeof(byte)); 201 sirdev_raw_read(dev, &byte, sizeof(byte));
202 if (byte != get_control_byte(speed)) { 202 if (byte != get_control_byte(speed)) {
203 IRDA_WARNING("%s(): bad control byte read-back %02x != %02x\n", 203 IRDA_WARNING("%s(): bad control byte read-back %02x != %02x\n",
204 __FUNCTION__, (unsigned) byte, 204 __func__, (unsigned) byte,
205 (unsigned) get_control_byte(speed)); 205 (unsigned) get_control_byte(speed));
206 return -1; 206 return -1;
207 } 207 }
208 else 208 else
209 IRDA_DEBUG(2, "%s() control byte write read OK\n", __FUNCTION__); 209 IRDA_DEBUG(2, "%s() control byte write read OK\n", __func__);
210#endif 210#endif
211 211
212 /* Set DTR, Set RTS */ 212 /* Set DTR, Set RTS */
@@ -238,7 +238,7 @@ static int ma600_change_speed(struct sir_dev *dev, unsigned speed)
238 238
239int ma600_reset(struct sir_dev *dev) 239int ma600_reset(struct sir_dev *dev)
240{ 240{
241 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 241 IRDA_DEBUG(2, "%s()\n", __func__);
242 242
243 /* Reset the dongle : set DTR low for 10 ms */ 243 /* Reset the dongle : set DTR low for 10 ms */
244 sirdev_set_dtr_rts(dev, FALSE, TRUE); 244 sirdev_set_dtr_rts(dev, FALSE, TRUE);
diff --git a/drivers/net/irda/mcp2120-sir.c b/drivers/net/irda/mcp2120-sir.c
index 67bd016e4df8..5e2f4859cee7 100644
--- a/drivers/net/irda/mcp2120-sir.c
+++ b/drivers/net/irda/mcp2120-sir.c
@@ -63,7 +63,7 @@ static int mcp2120_open(struct sir_dev *dev)
63{ 63{
64 struct qos_info *qos = &dev->qos; 64 struct qos_info *qos = &dev->qos;
65 65
66 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 66 IRDA_DEBUG(2, "%s()\n", __func__);
67 67
68 /* seems no explicit power-on required here and reset switching it on anyway */ 68 /* seems no explicit power-on required here and reset switching it on anyway */
69 69
@@ -76,7 +76,7 @@ static int mcp2120_open(struct sir_dev *dev)
76 76
77static int mcp2120_close(struct sir_dev *dev) 77static int mcp2120_close(struct sir_dev *dev)
78{ 78{
79 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 79 IRDA_DEBUG(2, "%s()\n", __func__);
80 80
81 /* Power off dongle */ 81 /* Power off dongle */
82 /* reset and inhibit mcp2120 */ 82 /* reset and inhibit mcp2120 */
@@ -102,7 +102,7 @@ static int mcp2120_change_speed(struct sir_dev *dev, unsigned speed)
102 u8 control[2]; 102 u8 control[2];
103 static int ret = 0; 103 static int ret = 0;
104 104
105 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 105 IRDA_DEBUG(2, "%s()\n", __func__);
106 106
107 switch (state) { 107 switch (state) {
108 case SIRDEV_STATE_DONGLE_SPEED: 108 case SIRDEV_STATE_DONGLE_SPEED:
@@ -155,7 +155,7 @@ static int mcp2120_change_speed(struct sir_dev *dev, unsigned speed)
155 break; 155 break;
156 156
157 default: 157 default:
158 IRDA_ERROR("%s(), undefine state %d\n", __FUNCTION__, state); 158 IRDA_ERROR("%s(), undefine state %d\n", __func__, state);
159 ret = -EINVAL; 159 ret = -EINVAL;
160 break; 160 break;
161 } 161 }
@@ -187,7 +187,7 @@ static int mcp2120_reset(struct sir_dev *dev)
187 unsigned delay = 0; 187 unsigned delay = 0;
188 int ret = 0; 188 int ret = 0;
189 189
190 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 190 IRDA_DEBUG(2, "%s()\n", __func__);
191 191
192 switch (state) { 192 switch (state) {
193 case SIRDEV_STATE_DONGLE_RESET: 193 case SIRDEV_STATE_DONGLE_RESET:
@@ -213,7 +213,7 @@ static int mcp2120_reset(struct sir_dev *dev)
213 break; 213 break;
214 214
215 default: 215 default:
216 IRDA_ERROR("%s(), undefined state %d\n", __FUNCTION__, state); 216 IRDA_ERROR("%s(), undefined state %d\n", __func__, state);
217 ret = -EINVAL; 217 ret = -EINVAL;
218 break; 218 break;
219 } 219 }
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
index effc1ce8179a..8583d951a6ad 100644
--- a/drivers/net/irda/nsc-ircc.c
+++ b/drivers/net/irda/nsc-ircc.c
@@ -151,8 +151,8 @@ static char *dongle_types[] = {
151static chipio_t pnp_info; 151static chipio_t pnp_info;
152static const struct pnp_device_id nsc_ircc_pnp_table[] = { 152static const struct pnp_device_id nsc_ircc_pnp_table[] = {
153 { .id = "NSC6001", .driver_data = 0 }, 153 { .id = "NSC6001", .driver_data = 0 },
154 { .id = "IBM0071", .driver_data = 0 },
155 { .id = "HWPC224", .driver_data = 0 }, 154 { .id = "HWPC224", .driver_data = 0 },
155 { .id = "IBM0071", .driver_data = NSC_FORCE_DONGLE_TYPE9 },
156 { } 156 { }
157}; 157};
158 158
@@ -223,7 +223,7 @@ static int __init nsc_ircc_init(void)
223 223
224 /* Probe for all the NSC chipsets we know about */ 224 /* Probe for all the NSC chipsets we know about */
225 for (chip = chips; chip->name ; chip++) { 225 for (chip = chips; chip->name ; chip++) {
226 IRDA_DEBUG(2, "%s(), Probing for %s ...\n", __FUNCTION__, 226 IRDA_DEBUG(2, "%s(), Probing for %s ...\n", __func__,
227 chip->name); 227 chip->name);
228 228
229 /* Try all config registers for this chip */ 229 /* Try all config registers for this chip */
@@ -235,7 +235,7 @@ static int __init nsc_ircc_init(void)
235 /* Read index register */ 235 /* Read index register */
236 reg = inb(cfg_base); 236 reg = inb(cfg_base);
237 if (reg == 0xff) { 237 if (reg == 0xff) {
238 IRDA_DEBUG(2, "%s() no chip at 0x%03x\n", __FUNCTION__, cfg_base); 238 IRDA_DEBUG(2, "%s() no chip at 0x%03x\n", __func__, cfg_base);
239 continue; 239 continue;
240 } 240 }
241 241
@@ -244,7 +244,7 @@ static int __init nsc_ircc_init(void)
244 id = inb(cfg_base+1); 244 id = inb(cfg_base+1);
245 if ((id & chip->cid_mask) == chip->cid_value) { 245 if ((id & chip->cid_mask) == chip->cid_value) {
246 IRDA_DEBUG(2, "%s() Found %s chip, revision=%d\n", 246 IRDA_DEBUG(2, "%s() Found %s chip, revision=%d\n",
247 __FUNCTION__, chip->name, id & ~chip->cid_mask); 247 __func__, chip->name, id & ~chip->cid_mask);
248 248
249 /* 249 /*
250 * If we found a correct PnP setting, 250 * If we found a correct PnP setting,
@@ -295,7 +295,7 @@ static int __init nsc_ircc_init(void)
295 } 295 }
296 i++; 296 i++;
297 } else { 297 } else {
298 IRDA_DEBUG(2, "%s(), Wrong chip id=0x%02x\n", __FUNCTION__, id); 298 IRDA_DEBUG(2, "%s(), Wrong chip id=0x%02x\n", __func__, id);
299 } 299 }
300 } 300 }
301 } 301 }
@@ -345,7 +345,7 @@ static int __init nsc_ircc_open(chipio_t *info)
345 void *ret; 345 void *ret;
346 int err, chip_index; 346 int err, chip_index;
347 347
348 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 348 IRDA_DEBUG(2, "%s()\n", __func__);
349 349
350 350
351 for (chip_index = 0; chip_index < ARRAY_SIZE(dev_self); chip_index++) { 351 for (chip_index = 0; chip_index < ARRAY_SIZE(dev_self); chip_index++) {
@@ -354,7 +354,7 @@ static int __init nsc_ircc_open(chipio_t *info)
354 } 354 }
355 355
356 if (chip_index == ARRAY_SIZE(dev_self)) { 356 if (chip_index == ARRAY_SIZE(dev_self)) {
357 IRDA_ERROR("%s(), maximum number of supported chips reached!\n", __FUNCTION__); 357 IRDA_ERROR("%s(), maximum number of supported chips reached!\n", __func__);
358 return -ENOMEM; 358 return -ENOMEM;
359 } 359 }
360 360
@@ -369,7 +369,7 @@ static int __init nsc_ircc_open(chipio_t *info)
369 dev = alloc_irdadev(sizeof(struct nsc_ircc_cb)); 369 dev = alloc_irdadev(sizeof(struct nsc_ircc_cb));
370 if (dev == NULL) { 370 if (dev == NULL) {
371 IRDA_ERROR("%s(), can't allocate memory for " 371 IRDA_ERROR("%s(), can't allocate memory for "
372 "control block!\n", __FUNCTION__); 372 "control block!\n", __func__);
373 return -ENOMEM; 373 return -ENOMEM;
374 } 374 }
375 375
@@ -393,7 +393,7 @@ static int __init nsc_ircc_open(chipio_t *info)
393 ret = request_region(self->io.fir_base, self->io.fir_ext, driver_name); 393 ret = request_region(self->io.fir_base, self->io.fir_ext, driver_name);
394 if (!ret) { 394 if (!ret) {
395 IRDA_WARNING("%s(), can't get iobase of 0x%03x\n", 395 IRDA_WARNING("%s(), can't get iobase of 0x%03x\n",
396 __FUNCTION__, self->io.fir_base); 396 __func__, self->io.fir_base);
397 err = -ENODEV; 397 err = -ENODEV;
398 goto out1; 398 goto out1;
399 } 399 }
@@ -450,7 +450,7 @@ static int __init nsc_ircc_open(chipio_t *info)
450 450
451 err = register_netdev(dev); 451 err = register_netdev(dev);
452 if (err) { 452 if (err) {
453 IRDA_ERROR("%s(), register_netdev() failed!\n", __FUNCTION__); 453 IRDA_ERROR("%s(), register_netdev() failed!\n", __func__);
454 goto out4; 454 goto out4;
455 } 455 }
456 IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name); 456 IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name);
@@ -506,7 +506,7 @@ static int __exit nsc_ircc_close(struct nsc_ircc_cb *self)
506{ 506{
507 int iobase; 507 int iobase;
508 508
509 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 509 IRDA_DEBUG(4, "%s()\n", __func__);
510 510
511 IRDA_ASSERT(self != NULL, return -1;); 511 IRDA_ASSERT(self != NULL, return -1;);
512 512
@@ -519,7 +519,7 @@ static int __exit nsc_ircc_close(struct nsc_ircc_cb *self)
519 519
520 /* Release the PORT that this driver is using */ 520 /* Release the PORT that this driver is using */
521 IRDA_DEBUG(4, "%s(), Releasing Region %03x\n", 521 IRDA_DEBUG(4, "%s(), Releasing Region %03x\n",
522 __FUNCTION__, self->io.fir_base); 522 __func__, self->io.fir_base);
523 release_region(self->io.fir_base, self->io.fir_ext); 523 release_region(self->io.fir_base, self->io.fir_ext);
524 524
525 if (self->tx_buff.head) 525 if (self->tx_buff.head)
@@ -557,7 +557,7 @@ static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info)
557 case 0x2e8: outb(0x15, cfg_base+1); break; 557 case 0x2e8: outb(0x15, cfg_base+1); break;
558 case 0x3f8: outb(0x16, cfg_base+1); break; 558 case 0x3f8: outb(0x16, cfg_base+1); break;
559 case 0x2f8: outb(0x17, cfg_base+1); break; 559 case 0x2f8: outb(0x17, cfg_base+1); break;
560 default: IRDA_ERROR("%s(), invalid base_address", __FUNCTION__); 560 default: IRDA_ERROR("%s(), invalid base_address", __func__);
561 } 561 }
562 562
563 /* Control Signal Routing Register (CSRT) */ 563 /* Control Signal Routing Register (CSRT) */
@@ -569,7 +569,7 @@ static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info)
569 case 9: temp = 0x05; break; 569 case 9: temp = 0x05; break;
570 case 11: temp = 0x06; break; 570 case 11: temp = 0x06; break;
571 case 15: temp = 0x07; break; 571 case 15: temp = 0x07; break;
572 default: IRDA_ERROR("%s(), invalid irq", __FUNCTION__); 572 default: IRDA_ERROR("%s(), invalid irq", __func__);
573 } 573 }
574 outb(CFG_108_CSRT, cfg_base); 574 outb(CFG_108_CSRT, cfg_base);
575 575
@@ -577,7 +577,7 @@ static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info)
577 case 0: outb(0x08+temp, cfg_base+1); break; 577 case 0: outb(0x08+temp, cfg_base+1); break;
578 case 1: outb(0x10+temp, cfg_base+1); break; 578 case 1: outb(0x10+temp, cfg_base+1); break;
579 case 3: outb(0x18+temp, cfg_base+1); break; 579 case 3: outb(0x18+temp, cfg_base+1); break;
580 default: IRDA_ERROR("%s(), invalid dma", __FUNCTION__); 580 default: IRDA_ERROR("%s(), invalid dma", __func__);
581 } 581 }
582 582
583 outb(CFG_108_MCTL, cfg_base); /* Mode Control Register (MCTL) */ 583 outb(CFG_108_MCTL, cfg_base); /* Mode Control Register (MCTL) */
@@ -616,7 +616,7 @@ static int nsc_ircc_probe_108(nsc_chip_t *chip, chipio_t *info)
616 break; 616 break;
617 } 617 }
618 info->sir_base = info->fir_base; 618 info->sir_base = info->fir_base;
619 IRDA_DEBUG(2, "%s(), probing fir_base=0x%03x\n", __FUNCTION__, 619 IRDA_DEBUG(2, "%s(), probing fir_base=0x%03x\n", __func__,
620 info->fir_base); 620 info->fir_base);
621 621
622 /* Read control signals routing register (CSRT) */ 622 /* Read control signals routing register (CSRT) */
@@ -649,7 +649,7 @@ static int nsc_ircc_probe_108(nsc_chip_t *chip, chipio_t *info)
649 info->irq = 15; 649 info->irq = 15;
650 break; 650 break;
651 } 651 }
652 IRDA_DEBUG(2, "%s(), probing irq=%d\n", __FUNCTION__, info->irq); 652 IRDA_DEBUG(2, "%s(), probing irq=%d\n", __func__, info->irq);
653 653
654 /* Currently we only read Rx DMA but it will also be used for Tx */ 654 /* Currently we only read Rx DMA but it will also be used for Tx */
655 switch ((reg >> 3) & 0x03) { 655 switch ((reg >> 3) & 0x03) {
@@ -666,7 +666,7 @@ static int nsc_ircc_probe_108(nsc_chip_t *chip, chipio_t *info)
666 info->dma = 3; 666 info->dma = 3;
667 break; 667 break;
668 } 668 }
669 IRDA_DEBUG(2, "%s(), probing dma=%d\n", __FUNCTION__, info->dma); 669 IRDA_DEBUG(2, "%s(), probing dma=%d\n", __func__, info->dma);
670 670
671 /* Read mode control register (MCTL) */ 671 /* Read mode control register (MCTL) */
672 outb(CFG_108_MCTL, cfg_base); 672 outb(CFG_108_MCTL, cfg_base);
@@ -823,7 +823,7 @@ static int nsc_ircc_init_39x(nsc_chip_t *chip, chipio_t *info)
823 /* User is sure about his config... accept it. */ 823 /* User is sure about his config... accept it. */
824 IRDA_DEBUG(2, "%s(): nsc_ircc_init_39x (user settings): " 824 IRDA_DEBUG(2, "%s(): nsc_ircc_init_39x (user settings): "
825 "io=0x%04x, irq=%d, dma=%d\n", 825 "io=0x%04x, irq=%d, dma=%d\n",
826 __FUNCTION__, info->fir_base, info->irq, info->dma); 826 __func__, info->fir_base, info->irq, info->dma);
827 827
828 /* Access bank for SP2 */ 828 /* Access bank for SP2 */
829 outb(CFG_39X_LDN, cfg_base); 829 outb(CFG_39X_LDN, cfg_base);
@@ -864,7 +864,7 @@ static int nsc_ircc_probe_39x(nsc_chip_t *chip, chipio_t *info)
864 int enabled, susp; 864 int enabled, susp;
865 865
866 IRDA_DEBUG(2, "%s(), nsc_ircc_probe_39x, base=%d\n", 866 IRDA_DEBUG(2, "%s(), nsc_ircc_probe_39x, base=%d\n",
867 __FUNCTION__, cfg_base); 867 __func__, cfg_base);
868 868
869 /* This function should be executed with irq off to avoid 869 /* This function should be executed with irq off to avoid
870 * another driver messing with the Super I/O bank - Jean II */ 870 * another driver messing with the Super I/O bank - Jean II */
@@ -898,7 +898,7 @@ static int nsc_ircc_probe_39x(nsc_chip_t *chip, chipio_t *info)
898 outb(CFG_39X_SPC, cfg_base); 898 outb(CFG_39X_SPC, cfg_base);
899 susp = 1 - ((inb(cfg_base+1) & 0x02) >> 1); 899 susp = 1 - ((inb(cfg_base+1) & 0x02) >> 1);
900 900
901 IRDA_DEBUG(2, "%s(): io=0x%02x%02x, irq=%d (type %d), rxdma=%d, txdma=%d, enabled=%d (suspended=%d)\n", __FUNCTION__, reg1,reg2,irq,irqt,dma1,dma2,enabled,susp); 901 IRDA_DEBUG(2, "%s(): io=0x%02x%02x, irq=%d (type %d), rxdma=%d, txdma=%d, enabled=%d (suspended=%d)\n", __func__, reg1,reg2,irq,irqt,dma1,dma2,enabled,susp);
902 902
903 /* Configure SP2 */ 903 /* Configure SP2 */
904 904
@@ -930,7 +930,10 @@ static int nsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *i
930 pnp_info.dma = -1; 930 pnp_info.dma = -1;
931 pnp_succeeded = 1; 931 pnp_succeeded = 1;
932 932
933 /* There don't seem to be any way to get the cfg_base. 933 if (id->driver_data & NSC_FORCE_DONGLE_TYPE9)
934 dongle_id = 0x9;
935
936 /* There doesn't seem to be any way of getting the cfg_base.
934 * On my box, cfg_base is in the PnP descriptor of the 937 * On my box, cfg_base is in the PnP descriptor of the
935 * motherboard. Oh well... Jean II */ 938 * motherboard. Oh well... Jean II */
936 939
@@ -947,7 +950,7 @@ static int nsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *i
947 pnp_info.dma = pnp_dma(dev, 0); 950 pnp_info.dma = pnp_dma(dev, 0);
948 951
949 IRDA_DEBUG(0, "%s() : From PnP, found firbase 0x%03X ; irq %d ; dma %d.\n", 952 IRDA_DEBUG(0, "%s() : From PnP, found firbase 0x%03X ; irq %d ; dma %d.\n",
950 __FUNCTION__, pnp_info.fir_base, pnp_info.irq, pnp_info.dma); 953 __func__, pnp_info.fir_base, pnp_info.irq, pnp_info.dma);
951 954
952 if((pnp_info.fir_base == 0) || 955 if((pnp_info.fir_base == 0) ||
953 (pnp_info.irq == -1) || (pnp_info.dma == -1)) { 956 (pnp_info.irq == -1) || (pnp_info.dma == -1)) {
@@ -976,7 +979,7 @@ static int nsc_ircc_setup(chipio_t *info)
976 version = inb(iobase+MID); 979 version = inb(iobase+MID);
977 980
978 IRDA_DEBUG(2, "%s() Driver %s Found chip version %02x\n", 981 IRDA_DEBUG(2, "%s() Driver %s Found chip version %02x\n",
979 __FUNCTION__, driver_name, version); 982 __func__, driver_name, version);
980 983
981 /* Should be 0x2? */ 984 /* Should be 0x2? */
982 if (0x20 != (version & 0xf0)) { 985 if (0x20 != (version & 0xf0)) {
@@ -1080,30 +1083,30 @@ static void nsc_ircc_init_dongle_interface (int iobase, int dongle_id)
1080 case 0x00: /* same as */ 1083 case 0x00: /* same as */
1081 case 0x01: /* Differential serial interface */ 1084 case 0x01: /* Differential serial interface */
1082 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", 1085 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
1083 __FUNCTION__, dongle_types[dongle_id]); 1086 __func__, dongle_types[dongle_id]);
1084 break; 1087 break;
1085 case 0x02: /* same as */ 1088 case 0x02: /* same as */
1086 case 0x03: /* Reserved */ 1089 case 0x03: /* Reserved */
1087 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", 1090 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
1088 __FUNCTION__, dongle_types[dongle_id]); 1091 __func__, dongle_types[dongle_id]);
1089 break; 1092 break;
1090 case 0x04: /* Sharp RY5HD01 */ 1093 case 0x04: /* Sharp RY5HD01 */
1091 break; 1094 break;
1092 case 0x05: /* Reserved, but this is what the Thinkpad reports */ 1095 case 0x05: /* Reserved, but this is what the Thinkpad reports */
1093 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", 1096 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
1094 __FUNCTION__, dongle_types[dongle_id]); 1097 __func__, dongle_types[dongle_id]);
1095 break; 1098 break;
1096 case 0x06: /* Single-ended serial interface */ 1099 case 0x06: /* Single-ended serial interface */
1097 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", 1100 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
1098 __FUNCTION__, dongle_types[dongle_id]); 1101 __func__, dongle_types[dongle_id]);
1099 break; 1102 break;
1100 case 0x07: /* Consumer-IR only */ 1103 case 0x07: /* Consumer-IR only */
1101 IRDA_DEBUG(0, "%s(), %s is not for IrDA mode\n", 1104 IRDA_DEBUG(0, "%s(), %s is not for IrDA mode\n",
1102 __FUNCTION__, dongle_types[dongle_id]); 1105 __func__, dongle_types[dongle_id]);
1103 break; 1106 break;
1104 case 0x08: /* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */ 1107 case 0x08: /* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */
1105 IRDA_DEBUG(0, "%s(), %s\n", 1108 IRDA_DEBUG(0, "%s(), %s\n",
1106 __FUNCTION__, dongle_types[dongle_id]); 1109 __func__, dongle_types[dongle_id]);
1107 break; 1110 break;
1108 case 0x09: /* IBM31T1100 or Temic TFDS6000/TFDS6500 */ 1111 case 0x09: /* IBM31T1100 or Temic TFDS6000/TFDS6500 */
1109 outb(0x28, iobase+7); /* Set irsl[0-2] as output */ 1112 outb(0x28, iobase+7); /* Set irsl[0-2] as output */
@@ -1111,7 +1114,7 @@ static void nsc_ircc_init_dongle_interface (int iobase, int dongle_id)
1111 case 0x0A: /* same as */ 1114 case 0x0A: /* same as */
1112 case 0x0B: /* Reserved */ 1115 case 0x0B: /* Reserved */
1113 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", 1116 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
1114 __FUNCTION__, dongle_types[dongle_id]); 1117 __func__, dongle_types[dongle_id]);
1115 break; 1118 break;
1116 case 0x0C: /* same as */ 1119 case 0x0C: /* same as */
1117 case 0x0D: /* HP HSDL-1100/HSDL-2100 */ 1120 case 0x0D: /* HP HSDL-1100/HSDL-2100 */
@@ -1126,14 +1129,14 @@ static void nsc_ircc_init_dongle_interface (int iobase, int dongle_id)
1126 break; 1129 break;
1127 case 0x0F: /* No dongle connected */ 1130 case 0x0F: /* No dongle connected */
1128 IRDA_DEBUG(0, "%s(), %s\n", 1131 IRDA_DEBUG(0, "%s(), %s\n",
1129 __FUNCTION__, dongle_types[dongle_id]); 1132 __func__, dongle_types[dongle_id]);
1130 1133
1131 switch_bank(iobase, BANK0); 1134 switch_bank(iobase, BANK0);
1132 outb(0x62, iobase+MCR); 1135 outb(0x62, iobase+MCR);
1133 break; 1136 break;
1134 default: 1137 default:
1135 IRDA_DEBUG(0, "%s(), invalid dongle_id %#x", 1138 IRDA_DEBUG(0, "%s(), invalid dongle_id %#x",
1136 __FUNCTION__, dongle_id); 1139 __func__, dongle_id);
1137 } 1140 }
1138 1141
1139 /* IRCFG1: IRSL1 and 2 are set to IrDA mode */ 1142 /* IRCFG1: IRSL1 and 2 are set to IrDA mode */
@@ -1165,30 +1168,30 @@ static void nsc_ircc_change_dongle_speed(int iobase, int speed, int dongle_id)
1165 case 0x00: /* same as */ 1168 case 0x00: /* same as */
1166 case 0x01: /* Differential serial interface */ 1169 case 0x01: /* Differential serial interface */
1167 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", 1170 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
1168 __FUNCTION__, dongle_types[dongle_id]); 1171 __func__, dongle_types[dongle_id]);
1169 break; 1172 break;
1170 case 0x02: /* same as */ 1173 case 0x02: /* same as */
1171 case 0x03: /* Reserved */ 1174 case 0x03: /* Reserved */
1172 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", 1175 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
1173 __FUNCTION__, dongle_types[dongle_id]); 1176 __func__, dongle_types[dongle_id]);
1174 break; 1177 break;
1175 case 0x04: /* Sharp RY5HD01 */ 1178 case 0x04: /* Sharp RY5HD01 */
1176 break; 1179 break;
1177 case 0x05: /* Reserved */ 1180 case 0x05: /* Reserved */
1178 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", 1181 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
1179 __FUNCTION__, dongle_types[dongle_id]); 1182 __func__, dongle_types[dongle_id]);
1180 break; 1183 break;
1181 case 0x06: /* Single-ended serial interface */ 1184 case 0x06: /* Single-ended serial interface */
1182 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", 1185 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
1183 __FUNCTION__, dongle_types[dongle_id]); 1186 __func__, dongle_types[dongle_id]);
1184 break; 1187 break;
1185 case 0x07: /* Consumer-IR only */ 1188 case 0x07: /* Consumer-IR only */
1186 IRDA_DEBUG(0, "%s(), %s is not for IrDA mode\n", 1189 IRDA_DEBUG(0, "%s(), %s is not for IrDA mode\n",
1187 __FUNCTION__, dongle_types[dongle_id]); 1190 __func__, dongle_types[dongle_id]);
1188 break; 1191 break;
1189 case 0x08: /* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */ 1192 case 0x08: /* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */
1190 IRDA_DEBUG(0, "%s(), %s\n", 1193 IRDA_DEBUG(0, "%s(), %s\n",
1191 __FUNCTION__, dongle_types[dongle_id]); 1194 __func__, dongle_types[dongle_id]);
1192 outb(0x00, iobase+4); 1195 outb(0x00, iobase+4);
1193 if (speed > 115200) 1196 if (speed > 115200)
1194 outb(0x01, iobase+4); 1197 outb(0x01, iobase+4);
@@ -1207,7 +1210,7 @@ static void nsc_ircc_change_dongle_speed(int iobase, int speed, int dongle_id)
1207 case 0x0A: /* same as */ 1210 case 0x0A: /* same as */
1208 case 0x0B: /* Reserved */ 1211 case 0x0B: /* Reserved */
1209 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", 1212 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
1210 __FUNCTION__, dongle_types[dongle_id]); 1213 __func__, dongle_types[dongle_id]);
1211 break; 1214 break;
1212 case 0x0C: /* same as */ 1215 case 0x0C: /* same as */
1213 case 0x0D: /* HP HSDL-1100/HSDL-2100 */ 1216 case 0x0D: /* HP HSDL-1100/HSDL-2100 */
@@ -1216,13 +1219,13 @@ static void nsc_ircc_change_dongle_speed(int iobase, int speed, int dongle_id)
1216 break; 1219 break;
1217 case 0x0F: /* No dongle connected */ 1220 case 0x0F: /* No dongle connected */
1218 IRDA_DEBUG(0, "%s(), %s is not for IrDA mode\n", 1221 IRDA_DEBUG(0, "%s(), %s is not for IrDA mode\n",
1219 __FUNCTION__, dongle_types[dongle_id]); 1222 __func__, dongle_types[dongle_id]);
1220 1223
1221 switch_bank(iobase, BANK0); 1224 switch_bank(iobase, BANK0);
1222 outb(0x62, iobase+MCR); 1225 outb(0x62, iobase+MCR);
1223 break; 1226 break;
1224 default: 1227 default:
1225 IRDA_DEBUG(0, "%s(), invalid data_rate\n", __FUNCTION__); 1228 IRDA_DEBUG(0, "%s(), invalid data_rate\n", __func__);
1226 } 1229 }
1227 /* Restore bank register */ 1230 /* Restore bank register */
1228 outb(bank, iobase+BSR); 1231 outb(bank, iobase+BSR);
@@ -1243,7 +1246,7 @@ static __u8 nsc_ircc_change_speed(struct nsc_ircc_cb *self, __u32 speed)
1243 __u8 bank; 1246 __u8 bank;
1244 __u8 ier; /* Interrupt enable register */ 1247 __u8 ier; /* Interrupt enable register */
1245 1248
1246 IRDA_DEBUG(2, "%s(), speed=%d\n", __FUNCTION__, speed); 1249 IRDA_DEBUG(2, "%s(), speed=%d\n", __func__, speed);
1247 1250
1248 IRDA_ASSERT(self != NULL, return 0;); 1251 IRDA_ASSERT(self != NULL, return 0;);
1249 1252
@@ -1276,20 +1279,20 @@ static __u8 nsc_ircc_change_speed(struct nsc_ircc_cb *self, __u32 speed)
1276 outb(inb(iobase+4) | 0x04, iobase+4); 1279 outb(inb(iobase+4) | 0x04, iobase+4);
1277 1280
1278 mcr = MCR_MIR; 1281 mcr = MCR_MIR;
1279 IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __FUNCTION__); 1282 IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __func__);
1280 break; 1283 break;
1281 case 1152000: 1284 case 1152000:
1282 mcr = MCR_MIR; 1285 mcr = MCR_MIR;
1283 IRDA_DEBUG(0, "%s(), handling baud of 1152000\n", __FUNCTION__); 1286 IRDA_DEBUG(0, "%s(), handling baud of 1152000\n", __func__);
1284 break; 1287 break;
1285 case 4000000: 1288 case 4000000:
1286 mcr = MCR_FIR; 1289 mcr = MCR_FIR;
1287 IRDA_DEBUG(0, "%s(), handling baud of 4000000\n", __FUNCTION__); 1290 IRDA_DEBUG(0, "%s(), handling baud of 4000000\n", __func__);
1288 break; 1291 break;
1289 default: 1292 default:
1290 mcr = MCR_FIR; 1293 mcr = MCR_FIR;
1291 IRDA_DEBUG(0, "%s(), unknown baud rate of %d\n", 1294 IRDA_DEBUG(0, "%s(), unknown baud rate of %d\n",
1292 __FUNCTION__, speed); 1295 __func__, speed);
1293 break; 1296 break;
1294 } 1297 }
1295 1298
@@ -1594,7 +1597,7 @@ static int nsc_ircc_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
1594 int actual = 0; 1597 int actual = 0;
1595 __u8 bank; 1598 __u8 bank;
1596 1599
1597 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 1600 IRDA_DEBUG(4, "%s()\n", __func__);
1598 1601
1599 /* Save current bank */ 1602 /* Save current bank */
1600 bank = inb(iobase+BSR); 1603 bank = inb(iobase+BSR);
@@ -1602,7 +1605,7 @@ static int nsc_ircc_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
1602 switch_bank(iobase, BANK0); 1605 switch_bank(iobase, BANK0);
1603 if (!(inb_p(iobase+LSR) & LSR_TXEMP)) { 1606 if (!(inb_p(iobase+LSR) & LSR_TXEMP)) {
1604 IRDA_DEBUG(4, "%s(), warning, FIFO not empty yet!\n", 1607 IRDA_DEBUG(4, "%s(), warning, FIFO not empty yet!\n",
1605 __FUNCTION__); 1608 __func__);
1606 1609
1607 /* FIFO may still be filled to the Tx interrupt threshold */ 1610 /* FIFO may still be filled to the Tx interrupt threshold */
1608 fifo_size -= 17; 1611 fifo_size -= 17;
@@ -1615,7 +1618,7 @@ static int nsc_ircc_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
1615 } 1618 }
1616 1619
1617 IRDA_DEBUG(4, "%s(), fifo_size %d ; %d sent of %d\n", 1620 IRDA_DEBUG(4, "%s(), fifo_size %d ; %d sent of %d\n",
1618 __FUNCTION__, fifo_size, actual, len); 1621 __func__, fifo_size, actual, len);
1619 1622
1620 /* Restore bank */ 1623 /* Restore bank */
1621 outb(bank, iobase+BSR); 1624 outb(bank, iobase+BSR);
@@ -1636,7 +1639,7 @@ static int nsc_ircc_dma_xmit_complete(struct nsc_ircc_cb *self)
1636 __u8 bank; 1639 __u8 bank;
1637 int ret = TRUE; 1640 int ret = TRUE;
1638 1641
1639 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 1642 IRDA_DEBUG(2, "%s()\n", __func__);
1640 1643
1641 iobase = self->io.fir_base; 1644 iobase = self->io.fir_base;
1642 1645
@@ -1767,7 +1770,7 @@ static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase)
1767 len = inb(iobase+RFLFL) | ((inb(iobase+RFLFH) & 0x1f) << 8); 1770 len = inb(iobase+RFLFL) | ((inb(iobase+RFLFH) & 0x1f) << 8);
1768 1771
1769 if (st_fifo->tail >= MAX_RX_WINDOW) { 1772 if (st_fifo->tail >= MAX_RX_WINDOW) {
1770 IRDA_DEBUG(0, "%s(), window is full!\n", __FUNCTION__); 1773 IRDA_DEBUG(0, "%s(), window is full!\n", __func__);
1771 continue; 1774 continue;
1772 } 1775 }
1773 1776
@@ -1859,7 +1862,7 @@ static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase)
1859 if (skb == NULL) { 1862 if (skb == NULL) {
1860 IRDA_WARNING("%s(), memory squeeze, " 1863 IRDA_WARNING("%s(), memory squeeze, "
1861 "dropping frame.\n", 1864 "dropping frame.\n",
1862 __FUNCTION__); 1865 __func__);
1863 self->stats.rx_dropped++; 1866 self->stats.rx_dropped++;
1864 1867
1865 /* Restore bank register */ 1868 /* Restore bank register */
@@ -1965,7 +1968,7 @@ static void nsc_ircc_sir_interrupt(struct nsc_ircc_cb *self, int eir)
1965 * Need to be after self->io.direction to avoid race with 1968 * Need to be after self->io.direction to avoid race with
1966 * nsc_ircc_hard_xmit_sir() - Jean II */ 1969 * nsc_ircc_hard_xmit_sir() - Jean II */
1967 if (self->new_speed) { 1970 if (self->new_speed) {
1968 IRDA_DEBUG(2, "%s(), Changing speed!\n", __FUNCTION__); 1971 IRDA_DEBUG(2, "%s(), Changing speed!\n", __func__);
1969 self->ier = nsc_ircc_change_speed(self, 1972 self->ier = nsc_ircc_change_speed(self,
1970 self->new_speed); 1973 self->new_speed);
1971 self->new_speed = 0; 1974 self->new_speed = 0;
@@ -2051,7 +2054,7 @@ static void nsc_ircc_fir_interrupt(struct nsc_ircc_cb *self, int iobase,
2051 } else 2054 } else
2052 IRDA_WARNING("%s(), potential " 2055 IRDA_WARNING("%s(), potential "
2053 "Tx queue lockup !\n", 2056 "Tx queue lockup !\n",
2054 __FUNCTION__); 2057 __func__);
2055 } 2058 }
2056 } else { 2059 } else {
2057 /* Not finished yet, so interrupt on DMA again */ 2060 /* Not finished yet, so interrupt on DMA again */
@@ -2160,7 +2163,7 @@ static int nsc_ircc_net_open(struct net_device *dev)
2160 char hwname[32]; 2163 char hwname[32];
2161 __u8 bank; 2164 __u8 bank;
2162 2165
2163 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 2166 IRDA_DEBUG(4, "%s()\n", __func__);
2164 2167
2165 IRDA_ASSERT(dev != NULL, return -1;); 2168 IRDA_ASSERT(dev != NULL, return -1;);
2166 self = (struct nsc_ircc_cb *) dev->priv; 2169 self = (struct nsc_ircc_cb *) dev->priv;
@@ -2222,7 +2225,7 @@ static int nsc_ircc_net_close(struct net_device *dev)
2222 int iobase; 2225 int iobase;
2223 __u8 bank; 2226 __u8 bank;
2224 2227
2225 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 2228 IRDA_DEBUG(4, "%s()\n", __func__);
2226 2229
2227 IRDA_ASSERT(dev != NULL, return -1;); 2230 IRDA_ASSERT(dev != NULL, return -1;);
2228 2231
@@ -2276,7 +2279,7 @@ static int nsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2276 2279
2277 IRDA_ASSERT(self != NULL, return -1;); 2280 IRDA_ASSERT(self != NULL, return -1;);
2278 2281
2279 IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__, dev->name, cmd); 2282 IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
2280 2283
2281 switch (cmd) { 2284 switch (cmd) {
2282 case SIOCSBANDWIDTH: /* Set bandwidth */ 2285 case SIOCSBANDWIDTH: /* Set bandwidth */
diff --git a/drivers/net/irda/nsc-ircc.h b/drivers/net/irda/nsc-ircc.h
index 29398a4f73fd..71cd3c5a0762 100644
--- a/drivers/net/irda/nsc-ircc.h
+++ b/drivers/net/irda/nsc-ircc.h
@@ -35,6 +35,9 @@
35#include <linux/types.h> 35#include <linux/types.h>
36#include <asm/io.h> 36#include <asm/io.h>
37 37
38/* Features for chips (set in driver_data) */
39#define NSC_FORCE_DONGLE_TYPE9 0x00000001
40
38/* DMA modes needed */ 41/* DMA modes needed */
39#define DMA_TX_MODE 0x08 /* Mem to I/O, ++, demand. */ 42#define DMA_TX_MODE 0x08 /* Mem to I/O, ++, demand. */
40#define DMA_RX_MODE 0x04 /* I/O to mem, ++, demand. */ 43#define DMA_RX_MODE 0x04 /* I/O to mem, ++, demand. */
diff --git a/drivers/net/irda/old_belkin-sir.c b/drivers/net/irda/old_belkin-sir.c
index 8c22c7374a23..75714bc71030 100644
--- a/drivers/net/irda/old_belkin-sir.c
+++ b/drivers/net/irda/old_belkin-sir.c
@@ -92,7 +92,7 @@ static int old_belkin_open(struct sir_dev *dev)
92{ 92{
93 struct qos_info *qos = &dev->qos; 93 struct qos_info *qos = &dev->qos;
94 94
95 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 95 IRDA_DEBUG(2, "%s()\n", __func__);
96 96
97 /* Power on dongle */ 97 /* Power on dongle */
98 sirdev_set_dtr_rts(dev, TRUE, TRUE); 98 sirdev_set_dtr_rts(dev, TRUE, TRUE);
@@ -110,7 +110,7 @@ static int old_belkin_open(struct sir_dev *dev)
110 110
111static int old_belkin_close(struct sir_dev *dev) 111static int old_belkin_close(struct sir_dev *dev)
112{ 112{
113 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 113 IRDA_DEBUG(2, "%s()\n", __func__);
114 114
115 /* Power off dongle */ 115 /* Power off dongle */
116 sirdev_set_dtr_rts(dev, FALSE, FALSE); 116 sirdev_set_dtr_rts(dev, FALSE, FALSE);
@@ -125,7 +125,7 @@ static int old_belkin_close(struct sir_dev *dev)
125 */ 125 */
126static int old_belkin_change_speed(struct sir_dev *dev, unsigned speed) 126static int old_belkin_change_speed(struct sir_dev *dev, unsigned speed)
127{ 127{
128 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 128 IRDA_DEBUG(2, "%s()\n", __func__);
129 129
130 dev->speed = 9600; 130 dev->speed = 9600;
131 return (speed==dev->speed) ? 0 : -EINVAL; 131 return (speed==dev->speed) ? 0 : -EINVAL;
@@ -139,7 +139,7 @@ static int old_belkin_change_speed(struct sir_dev *dev, unsigned speed)
139 */ 139 */
140static int old_belkin_reset(struct sir_dev *dev) 140static int old_belkin_reset(struct sir_dev *dev)
141{ 141{
142 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 142 IRDA_DEBUG(2, "%s()\n", __func__);
143 143
144 /* This dongles speed "defaults" to 9600 bps ;-) */ 144 /* This dongles speed "defaults" to 9600 bps ;-) */
145 dev->speed = 9600; 145 dev->speed = 9600;
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index f76b0b6c277d..4aa61a1a3d55 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -23,8 +23,8 @@
23#include <net/irda/irda_device.h> 23#include <net/irda/irda_device.h>
24 24
25#include <asm/dma.h> 25#include <asm/dma.h>
26#include <asm/arch/irda.h> 26#include <mach/irda.h>
27#include <asm/arch/pxa-regs.h> 27#include <mach/pxa-regs.h>
28 28
29#define IrSR_RXPL_NEG_IS_ZERO (1<<4) 29#define IrSR_RXPL_NEG_IS_ZERO (1<<4)
30#define IrSR_RXPL_POS_IS_ZERO 0x0 30#define IrSR_RXPL_POS_IS_ZERO 0x0
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c
index 1bc8518f9197..a95188948de7 100644
--- a/drivers/net/irda/sa1100_ir.c
+++ b/drivers/net/irda/sa1100_ir.c
@@ -37,7 +37,7 @@
37 37
38#include <asm/irq.h> 38#include <asm/irq.h>
39#include <asm/dma.h> 39#include <asm/dma.h>
40#include <asm/hardware.h> 40#include <mach/hardware.h>
41#include <asm/mach/irda.h> 41#include <asm/mach/irda.h>
42 42
43static int power_level = 3; 43static int power_level = 3;
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c
index 6078e03de9a8..3f32909c24c8 100644
--- a/drivers/net/irda/sir_dev.c
+++ b/drivers/net/irda/sir_dev.c
@@ -80,7 +80,7 @@ static int sirdev_tx_complete_fsm(struct sir_dev *dev)
80 return 0; 80 return 0;
81 81
82 default: 82 default:
83 IRDA_ERROR("%s - undefined state\n", __FUNCTION__); 83 IRDA_ERROR("%s - undefined state\n", __func__);
84 return -EINVAL; 84 return -EINVAL;
85 } 85 }
86 fsm->substate = next_state; 86 fsm->substate = next_state;
@@ -107,11 +107,11 @@ static void sirdev_config_fsm(struct work_struct *work)
107 int ret = -1; 107 int ret = -1;
108 unsigned delay; 108 unsigned delay;
109 109
110 IRDA_DEBUG(2, "%s(), <%ld>\n", __FUNCTION__, jiffies); 110 IRDA_DEBUG(2, "%s(), <%ld>\n", __func__, jiffies);
111 111
112 do { 112 do {
113 IRDA_DEBUG(3, "%s - state=0x%04x / substate=0x%04x\n", 113 IRDA_DEBUG(3, "%s - state=0x%04x / substate=0x%04x\n",
114 __FUNCTION__, fsm->state, fsm->substate); 114 __func__, fsm->state, fsm->substate);
115 115
116 next_state = fsm->state; 116 next_state = fsm->state;
117 delay = 0; 117 delay = 0;
@@ -249,12 +249,12 @@ static void sirdev_config_fsm(struct work_struct *work)
249 break; 249 break;
250 250
251 default: 251 default:
252 IRDA_ERROR("%s - undefined state\n", __FUNCTION__); 252 IRDA_ERROR("%s - undefined state\n", __func__);
253 fsm->result = -EINVAL; 253 fsm->result = -EINVAL;
254 /* fall thru */ 254 /* fall thru */
255 255
256 case SIRDEV_STATE_ERROR: 256 case SIRDEV_STATE_ERROR:
257 IRDA_ERROR("%s - error: %d\n", __FUNCTION__, fsm->result); 257 IRDA_ERROR("%s - error: %d\n", __func__, fsm->result);
258 258
259#if 0 /* don't enable this before we have netdev->tx_timeout to recover */ 259#if 0 /* don't enable this before we have netdev->tx_timeout to recover */
260 netif_stop_queue(dev->netdev); 260 netif_stop_queue(dev->netdev);
@@ -284,11 +284,12 @@ int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned par
284{ 284{
285 struct sir_fsm *fsm = &dev->fsm; 285 struct sir_fsm *fsm = &dev->fsm;
286 286
287 IRDA_DEBUG(2, "%s - state=0x%04x / param=%u\n", __FUNCTION__, initial_state, param); 287 IRDA_DEBUG(2, "%s - state=0x%04x / param=%u\n", __func__,
288 initial_state, param);
288 289
289 if (down_trylock(&fsm->sem)) { 290 if (down_trylock(&fsm->sem)) {
290 if (in_interrupt() || in_atomic() || irqs_disabled()) { 291 if (in_interrupt() || in_atomic() || irqs_disabled()) {
291 IRDA_DEBUG(1, "%s(), state machine busy!\n", __FUNCTION__); 292 IRDA_DEBUG(1, "%s(), state machine busy!\n", __func__);
292 return -EWOULDBLOCK; 293 return -EWOULDBLOCK;
293 } else 294 } else
294 down(&fsm->sem); 295 down(&fsm->sem);
@@ -296,7 +297,7 @@ int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned par
296 297
297 if (fsm->state == SIRDEV_STATE_DEAD) { 298 if (fsm->state == SIRDEV_STATE_DEAD) {
298 /* race with sirdev_close should never happen */ 299 /* race with sirdev_close should never happen */
299 IRDA_ERROR("%s(), instance staled!\n", __FUNCTION__); 300 IRDA_ERROR("%s(), instance staled!\n", __func__);
300 up(&fsm->sem); 301 up(&fsm->sem);
301 return -ESTALE; /* or better EPIPE? */ 302 return -ESTALE; /* or better EPIPE? */
302 } 303 }
@@ -341,7 +342,7 @@ int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type)
341{ 342{
342 int err; 343 int err;
343 344
344 IRDA_DEBUG(3, "%s : requesting dongle %d.\n", __FUNCTION__, type); 345 IRDA_DEBUG(3, "%s : requesting dongle %d.\n", __func__, type);
345 346
346 err = sirdev_schedule_dongle_open(dev, type); 347 err = sirdev_schedule_dongle_open(dev, type);
347 if (unlikely(err)) 348 if (unlikely(err))
@@ -376,7 +377,7 @@ int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len)
376 377
377 ret = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len); 378 ret = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len);
378 if (ret > 0) { 379 if (ret > 0) {
379 IRDA_DEBUG(3, "%s(), raw-tx started\n", __FUNCTION__); 380 IRDA_DEBUG(3, "%s(), raw-tx started\n", __func__);
380 381
381 dev->tx_buff.data += ret; 382 dev->tx_buff.data += ret;
382 dev->tx_buff.len -= ret; 383 dev->tx_buff.len -= ret;
@@ -437,7 +438,7 @@ void sirdev_write_complete(struct sir_dev *dev)
437 spin_lock_irqsave(&dev->tx_lock, flags); 438 spin_lock_irqsave(&dev->tx_lock, flags);
438 439
439 IRDA_DEBUG(3, "%s() - dev->tx_buff.len = %d\n", 440 IRDA_DEBUG(3, "%s() - dev->tx_buff.len = %d\n",
440 __FUNCTION__, dev->tx_buff.len); 441 __func__, dev->tx_buff.len);
441 442
442 if (likely(dev->tx_buff.len > 0)) { 443 if (likely(dev->tx_buff.len > 0)) {
443 /* Write data left in transmit buffer */ 444 /* Write data left in transmit buffer */
@@ -450,7 +451,7 @@ void sirdev_write_complete(struct sir_dev *dev)
450 else if (unlikely(actual<0)) { 451 else if (unlikely(actual<0)) {
451 /* could be dropped later when we have tx_timeout to recover */ 452 /* could be dropped later when we have tx_timeout to recover */
452 IRDA_ERROR("%s: drv->do_write failed (%d)\n", 453 IRDA_ERROR("%s: drv->do_write failed (%d)\n",
453 __FUNCTION__, actual); 454 __func__, actual);
454 if ((skb=dev->tx_skb) != NULL) { 455 if ((skb=dev->tx_skb) != NULL) {
455 dev->tx_skb = NULL; 456 dev->tx_skb = NULL;
456 dev_kfree_skb_any(skb); 457 dev_kfree_skb_any(skb);
@@ -471,7 +472,7 @@ void sirdev_write_complete(struct sir_dev *dev)
471 * restarted when the irda-thread has completed the request. 472 * restarted when the irda-thread has completed the request.
472 */ 473 */
473 474
474 IRDA_DEBUG(3, "%s(), raw-tx done\n", __FUNCTION__); 475 IRDA_DEBUG(3, "%s(), raw-tx done\n", __func__);
475 dev->raw_tx = 0; 476 dev->raw_tx = 0;
476 goto done; /* no post-frame handling in raw mode */ 477 goto done; /* no post-frame handling in raw mode */
477 } 478 }
@@ -488,7 +489,7 @@ void sirdev_write_complete(struct sir_dev *dev)
488 * re-activated. 489 * re-activated.
489 */ 490 */
490 491
491 IRDA_DEBUG(5, "%s(), finished with frame!\n", __FUNCTION__); 492 IRDA_DEBUG(5, "%s(), finished with frame!\n", __func__);
492 493
493 if ((skb=dev->tx_skb) != NULL) { 494 if ((skb=dev->tx_skb) != NULL) {
494 dev->tx_skb = NULL; 495 dev->tx_skb = NULL;
@@ -498,14 +499,14 @@ void sirdev_write_complete(struct sir_dev *dev)
498 } 499 }
499 500
500 if (unlikely(dev->new_speed > 0)) { 501 if (unlikely(dev->new_speed > 0)) {
501 IRDA_DEBUG(5, "%s(), Changing speed!\n", __FUNCTION__); 502 IRDA_DEBUG(5, "%s(), Changing speed!\n", __func__);
502 err = sirdev_schedule_speed(dev, dev->new_speed); 503 err = sirdev_schedule_speed(dev, dev->new_speed);
503 if (unlikely(err)) { 504 if (unlikely(err)) {
504 /* should never happen 505 /* should never happen
505 * forget the speed change and hope the stack recovers 506 * forget the speed change and hope the stack recovers
506 */ 507 */
507 IRDA_ERROR("%s - schedule speed change failed: %d\n", 508 IRDA_ERROR("%s - schedule speed change failed: %d\n",
508 __FUNCTION__, err); 509 __func__, err);
509 netif_wake_queue(dev->netdev); 510 netif_wake_queue(dev->netdev);
510 } 511 }
511 /* else: success 512 /* else: success
@@ -532,13 +533,13 @@ EXPORT_SYMBOL(sirdev_write_complete);
532int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count) 533int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count)
533{ 534{
534 if (!dev || !dev->netdev) { 535 if (!dev || !dev->netdev) {
535 IRDA_WARNING("%s(), not ready yet!\n", __FUNCTION__); 536 IRDA_WARNING("%s(), not ready yet!\n", __func__);
536 return -1; 537 return -1;
537 } 538 }
538 539
539 if (!dev->irlap) { 540 if (!dev->irlap) {
540 IRDA_WARNING("%s - too early: %p / %zd!\n", 541 IRDA_WARNING("%s - too early: %p / %zd!\n",
541 __FUNCTION__, cp, count); 542 __func__, cp, count);
542 return -1; 543 return -1;
543 } 544 }
544 545
@@ -548,7 +549,7 @@ int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count)
548 */ 549 */
549 irda_device_set_media_busy(dev->netdev, TRUE); 550 irda_device_set_media_busy(dev->netdev, TRUE);
550 dev->stats.rx_dropped++; 551 dev->stats.rx_dropped++;
551 IRDA_DEBUG(0, "%s; rx-drop: %zd\n", __FUNCTION__, count); 552 IRDA_DEBUG(0, "%s; rx-drop: %zd\n", __func__, count);
552 return 0; 553 return 0;
553 } 554 }
554 555
@@ -600,7 +601,7 @@ static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
600 601
601 netif_stop_queue(ndev); 602 netif_stop_queue(ndev);
602 603
603 IRDA_DEBUG(3, "%s(), skb->len = %d\n", __FUNCTION__, skb->len); 604 IRDA_DEBUG(3, "%s(), skb->len = %d\n", __func__, skb->len);
604 605
605 speed = irda_get_next_speed(skb); 606 speed = irda_get_next_speed(skb);
606 if ((speed != dev->speed) && (speed != -1)) { 607 if ((speed != dev->speed) && (speed != -1)) {
@@ -637,7 +638,7 @@ static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
637 638
638 /* Check problems */ 639 /* Check problems */
639 if(spin_is_locked(&dev->tx_lock)) { 640 if(spin_is_locked(&dev->tx_lock)) {
640 IRDA_DEBUG(3, "%s(), write not completed\n", __FUNCTION__); 641 IRDA_DEBUG(3, "%s(), write not completed\n", __func__);
641 } 642 }
642 643
643 /* serialize with write completion */ 644 /* serialize with write completion */
@@ -666,7 +667,7 @@ static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
666 else if (unlikely(actual < 0)) { 667 else if (unlikely(actual < 0)) {
667 /* could be dropped later when we have tx_timeout to recover */ 668 /* could be dropped later when we have tx_timeout to recover */
668 IRDA_ERROR("%s: drv->do_write failed (%d)\n", 669 IRDA_ERROR("%s: drv->do_write failed (%d)\n",
669 __FUNCTION__, actual); 670 __func__, actual);
670 dev_kfree_skb_any(skb); 671 dev_kfree_skb_any(skb);
671 dev->stats.tx_errors++; 672 dev->stats.tx_errors++;
672 dev->stats.tx_dropped++; 673 dev->stats.tx_dropped++;
@@ -687,7 +688,7 @@ static int sirdev_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
687 688
688 IRDA_ASSERT(dev != NULL, return -1;); 689 IRDA_ASSERT(dev != NULL, return -1;);
689 690
690 IRDA_DEBUG(3, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__, ndev->name, cmd); 691 IRDA_DEBUG(3, "%s(), %s, (cmd=0x%X)\n", __func__, ndev->name, cmd);
691 692
692 switch (cmd) { 693 switch (cmd) {
693 case SIOCSBANDWIDTH: /* Set bandwidth */ 694 case SIOCSBANDWIDTH: /* Set bandwidth */
@@ -804,7 +805,7 @@ static int sirdev_open(struct net_device *ndev)
804 if (!try_module_get(drv->owner)) 805 if (!try_module_get(drv->owner))
805 return -ESTALE; 806 return -ESTALE;
806 807
807 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 808 IRDA_DEBUG(2, "%s()\n", __func__);
808 809
809 if (sirdev_alloc_buffers(dev)) 810 if (sirdev_alloc_buffers(dev))
810 goto errout_dec; 811 goto errout_dec;
@@ -822,7 +823,7 @@ static int sirdev_open(struct net_device *ndev)
822 823
823 netif_wake_queue(ndev); 824 netif_wake_queue(ndev);
824 825
825 IRDA_DEBUG(2, "%s - done, speed = %d\n", __FUNCTION__, dev->speed); 826 IRDA_DEBUG(2, "%s - done, speed = %d\n", __func__, dev->speed);
826 827
827 return 0; 828 return 0;
828 829
@@ -842,7 +843,7 @@ static int sirdev_close(struct net_device *ndev)
842 struct sir_dev *dev = ndev->priv; 843 struct sir_dev *dev = ndev->priv;
843 const struct sir_driver *drv; 844 const struct sir_driver *drv;
844 845
845// IRDA_DEBUG(0, "%s\n", __FUNCTION__); 846// IRDA_DEBUG(0, "%s\n", __func__);
846 847
847 netif_stop_queue(ndev); 848 netif_stop_queue(ndev);
848 849
@@ -878,7 +879,7 @@ struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *n
878 struct net_device *ndev; 879 struct net_device *ndev;
879 struct sir_dev *dev; 880 struct sir_dev *dev;
880 881
881 IRDA_DEBUG(0, "%s - %s\n", __FUNCTION__, name); 882 IRDA_DEBUG(0, "%s - %s\n", __func__, name);
882 883
883 /* instead of adding tests to protect against drv->do_write==NULL 884 /* instead of adding tests to protect against drv->do_write==NULL
884 * at several places we refuse to create a sir_dev instance for 885 * at several places we refuse to create a sir_dev instance for
@@ -892,7 +893,7 @@ struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *n
892 */ 893 */
893 ndev = alloc_irdadev(sizeof(*dev)); 894 ndev = alloc_irdadev(sizeof(*dev));
894 if (ndev == NULL) { 895 if (ndev == NULL) {
895 IRDA_ERROR("%s - Can't allocate memory for IrDA control block!\n", __FUNCTION__); 896 IRDA_ERROR("%s - Can't allocate memory for IrDA control block!\n", __func__);
896 goto out; 897 goto out;
897 } 898 }
898 dev = ndev->priv; 899 dev = ndev->priv;
@@ -921,7 +922,7 @@ struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *n
921 ndev->do_ioctl = sirdev_ioctl; 922 ndev->do_ioctl = sirdev_ioctl;
922 923
923 if (register_netdev(ndev)) { 924 if (register_netdev(ndev)) {
924 IRDA_ERROR("%s(), register_netdev() failed!\n", __FUNCTION__); 925 IRDA_ERROR("%s(), register_netdev() failed!\n", __func__);
925 goto out_freenetdev; 926 goto out_freenetdev;
926 } 927 }
927 928
@@ -938,7 +939,7 @@ int sirdev_put_instance(struct sir_dev *dev)
938{ 939{
939 int err = 0; 940 int err = 0;
940 941
941 IRDA_DEBUG(0, "%s\n", __FUNCTION__); 942 IRDA_DEBUG(0, "%s\n", __func__);
942 943
943 atomic_set(&dev->enable_rx, 0); 944 atomic_set(&dev->enable_rx, 0);
944 945
@@ -948,7 +949,7 @@ int sirdev_put_instance(struct sir_dev *dev)
948 if (dev->dongle_drv) 949 if (dev->dongle_drv)
949 err = sirdev_schedule_dongle_close(dev); 950 err = sirdev_schedule_dongle_close(dev);
950 if (err) 951 if (err)
951 IRDA_ERROR("%s - error %d\n", __FUNCTION__, err); 952 IRDA_ERROR("%s - error %d\n", __func__, err);
952 953
953 sirdev_close(dev->netdev); 954 sirdev_close(dev->netdev);
954 955
diff --git a/drivers/net/irda/sir_dongle.c b/drivers/net/irda/sir_dongle.c
index 25d5b8a96bdc..36030241f7a9 100644
--- a/drivers/net/irda/sir_dongle.c
+++ b/drivers/net/irda/sir_dongle.c
@@ -36,7 +36,7 @@ int irda_register_dongle(struct dongle_driver *new)
36 struct dongle_driver *drv; 36 struct dongle_driver *drv;
37 37
38 IRDA_DEBUG(0, "%s : registering dongle \"%s\" (%d).\n", 38 IRDA_DEBUG(0, "%s : registering dongle \"%s\" (%d).\n",
39 __FUNCTION__, new->driver_name, new->type); 39 __func__, new->driver_name, new->type);
40 40
41 mutex_lock(&dongle_list_lock); 41 mutex_lock(&dongle_list_lock);
42 list_for_each(entry, &dongle_list) { 42 list_for_each(entry, &dongle_list) {
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 78dc8e7837f0..b5360fe99d3a 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -460,7 +460,7 @@ static int __init smsc_ircc_init(void)
460{ 460{
461 int ret; 461 int ret;
462 462
463 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 463 IRDA_DEBUG(1, "%s\n", __func__);
464 464
465 ret = platform_driver_register(&smsc_ircc_driver); 465 ret = platform_driver_register(&smsc_ircc_driver);
466 if (ret) { 466 if (ret) {
@@ -500,7 +500,7 @@ static int __init smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u
500 struct net_device *dev; 500 struct net_device *dev;
501 int err; 501 int err;
502 502
503 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 503 IRDA_DEBUG(1, "%s\n", __func__);
504 504
505 err = smsc_ircc_present(fir_base, sir_base); 505 err = smsc_ircc_present(fir_base, sir_base);
506 if (err) 506 if (err)
@@ -508,7 +508,7 @@ static int __init smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u
508 508
509 err = -ENOMEM; 509 err = -ENOMEM;
510 if (dev_count >= ARRAY_SIZE(dev_self)) { 510 if (dev_count >= ARRAY_SIZE(dev_self)) {
511 IRDA_WARNING("%s(), too many devices!\n", __FUNCTION__); 511 IRDA_WARNING("%s(), too many devices!\n", __func__);
512 goto err_out1; 512 goto err_out1;
513 } 513 }
514 514
@@ -517,7 +517,7 @@ static int __init smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u
517 */ 517 */
518 dev = alloc_irdadev(sizeof(struct smsc_ircc_cb)); 518 dev = alloc_irdadev(sizeof(struct smsc_ircc_cb));
519 if (!dev) { 519 if (!dev) {
520 IRDA_WARNING("%s() can't allocate net device\n", __FUNCTION__); 520 IRDA_WARNING("%s() can't allocate net device\n", __func__);
521 goto err_out1; 521 goto err_out1;
522 } 522 }
523 523
@@ -633,14 +633,14 @@ static int smsc_ircc_present(unsigned int fir_base, unsigned int sir_base)
633 if (!request_region(fir_base, SMSC_IRCC2_FIR_CHIP_IO_EXTENT, 633 if (!request_region(fir_base, SMSC_IRCC2_FIR_CHIP_IO_EXTENT,
634 driver_name)) { 634 driver_name)) {
635 IRDA_WARNING("%s: can't get fir_base of 0x%03x\n", 635 IRDA_WARNING("%s: can't get fir_base of 0x%03x\n",
636 __FUNCTION__, fir_base); 636 __func__, fir_base);
637 goto out1; 637 goto out1;
638 } 638 }
639 639
640 if (!request_region(sir_base, SMSC_IRCC2_SIR_CHIP_IO_EXTENT, 640 if (!request_region(sir_base, SMSC_IRCC2_SIR_CHIP_IO_EXTENT,
641 driver_name)) { 641 driver_name)) {
642 IRDA_WARNING("%s: can't get sir_base of 0x%03x\n", 642 IRDA_WARNING("%s: can't get sir_base of 0x%03x\n",
643 __FUNCTION__, sir_base); 643 __func__, sir_base);
644 goto out2; 644 goto out2;
645 } 645 }
646 646
@@ -656,7 +656,7 @@ static int smsc_ircc_present(unsigned int fir_base, unsigned int sir_base)
656 656
657 if (high != 0x10 || low != 0xb8 || (chip != 0xf1 && chip != 0xf2)) { 657 if (high != 0x10 || low != 0xb8 || (chip != 0xf1 && chip != 0xf2)) {
658 IRDA_WARNING("%s(), addr 0x%04x - no device found!\n", 658 IRDA_WARNING("%s(), addr 0x%04x - no device found!\n",
659 __FUNCTION__, fir_base); 659 __func__, fir_base);
660 goto out3; 660 goto out3;
661 } 661 }
662 IRDA_MESSAGE("SMsC IrDA Controller found\n IrCC version %d.%d, " 662 IRDA_MESSAGE("SMsC IrDA Controller found\n IrCC version %d.%d, "
@@ -793,7 +793,7 @@ static int smsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd
793 793
794 IRDA_ASSERT(self != NULL, return -1;); 794 IRDA_ASSERT(self != NULL, return -1;);
795 795
796 IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__, dev->name, cmd); 796 IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
797 797
798 switch (cmd) { 798 switch (cmd) {
799 case SIOCSBANDWIDTH: /* Set bandwidth */ 799 case SIOCSBANDWIDTH: /* Set bandwidth */
@@ -878,7 +878,7 @@ int smsc_ircc_hard_xmit_sir(struct sk_buff *skb, struct net_device *dev)
878 unsigned long flags; 878 unsigned long flags;
879 s32 speed; 879 s32 speed;
880 880
881 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 881 IRDA_DEBUG(1, "%s\n", __func__);
882 882
883 IRDA_ASSERT(dev != NULL, return 0;); 883 IRDA_ASSERT(dev != NULL, return 0;);
884 884
@@ -953,21 +953,21 @@ static void smsc_ircc_set_fir_speed(struct smsc_ircc_cb *self, u32 speed)
953 ir_mode = IRCC_CFGA_IRDA_HDLC; 953 ir_mode = IRCC_CFGA_IRDA_HDLC;
954 ctrl = IRCC_CRC; 954 ctrl = IRCC_CRC;
955 fast = 0; 955 fast = 0;
956 IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __FUNCTION__); 956 IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __func__);
957 break; 957 break;
958 case 1152000: 958 case 1152000:
959 ir_mode = IRCC_CFGA_IRDA_HDLC; 959 ir_mode = IRCC_CFGA_IRDA_HDLC;
960 ctrl = IRCC_1152 | IRCC_CRC; 960 ctrl = IRCC_1152 | IRCC_CRC;
961 fast = IRCC_LCR_A_FAST | IRCC_LCR_A_GP_DATA; 961 fast = IRCC_LCR_A_FAST | IRCC_LCR_A_GP_DATA;
962 IRDA_DEBUG(0, "%s(), handling baud of 1152000\n", 962 IRDA_DEBUG(0, "%s(), handling baud of 1152000\n",
963 __FUNCTION__); 963 __func__);
964 break; 964 break;
965 case 4000000: 965 case 4000000:
966 ir_mode = IRCC_CFGA_IRDA_4PPM; 966 ir_mode = IRCC_CFGA_IRDA_4PPM;
967 ctrl = IRCC_CRC; 967 ctrl = IRCC_CRC;
968 fast = IRCC_LCR_A_FAST; 968 fast = IRCC_LCR_A_FAST;
969 IRDA_DEBUG(0, "%s(), handling baud of 4000000\n", 969 IRDA_DEBUG(0, "%s(), handling baud of 4000000\n",
970 __FUNCTION__); 970 __func__);
971 break; 971 break;
972 } 972 }
973 #if 0 973 #if 0
@@ -995,7 +995,7 @@ static void smsc_ircc_fir_start(struct smsc_ircc_cb *self)
995 struct net_device *dev; 995 struct net_device *dev;
996 int fir_base; 996 int fir_base;
997 997
998 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 998 IRDA_DEBUG(1, "%s\n", __func__);
999 999
1000 IRDA_ASSERT(self != NULL, return;); 1000 IRDA_ASSERT(self != NULL, return;);
1001 dev = self->netdev; 1001 dev = self->netdev;
@@ -1043,7 +1043,7 @@ static void smsc_ircc_fir_stop(struct smsc_ircc_cb *self)
1043{ 1043{
1044 int fir_base; 1044 int fir_base;
1045 1045
1046 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 1046 IRDA_DEBUG(1, "%s\n", __func__);
1047 1047
1048 IRDA_ASSERT(self != NULL, return;); 1048 IRDA_ASSERT(self != NULL, return;);
1049 1049
@@ -1067,7 +1067,7 @@ static void smsc_ircc_change_speed(struct smsc_ircc_cb *self, u32 speed)
1067 struct net_device *dev; 1067 struct net_device *dev;
1068 int last_speed_was_sir; 1068 int last_speed_was_sir;
1069 1069
1070 IRDA_DEBUG(0, "%s() changing speed to: %d\n", __FUNCTION__, speed); 1070 IRDA_DEBUG(0, "%s() changing speed to: %d\n", __func__, speed);
1071 1071
1072 IRDA_ASSERT(self != NULL, return;); 1072 IRDA_ASSERT(self != NULL, return;);
1073 dev = self->netdev; 1073 dev = self->netdev;
@@ -1135,7 +1135,7 @@ void smsc_ircc_set_sir_speed(struct smsc_ircc_cb *self, __u32 speed)
1135 int lcr; /* Line control reg */ 1135 int lcr; /* Line control reg */
1136 int divisor; 1136 int divisor;
1137 1137
1138 IRDA_DEBUG(0, "%s(), Setting speed to: %d\n", __FUNCTION__, speed); 1138 IRDA_DEBUG(0, "%s(), Setting speed to: %d\n", __func__, speed);
1139 1139
1140 IRDA_ASSERT(self != NULL, return;); 1140 IRDA_ASSERT(self != NULL, return;);
1141 iobase = self->io.sir_base; 1141 iobase = self->io.sir_base;
@@ -1170,7 +1170,7 @@ void smsc_ircc_set_sir_speed(struct smsc_ircc_cb *self, __u32 speed)
1170 /* Turn on interrups */ 1170 /* Turn on interrups */
1171 outb(UART_IER_RLSI | UART_IER_RDI | UART_IER_THRI, iobase + UART_IER); 1171 outb(UART_IER_RLSI | UART_IER_RDI | UART_IER_THRI, iobase + UART_IER);
1172 1172
1173 IRDA_DEBUG(2, "%s() speed changed to: %d\n", __FUNCTION__, speed); 1173 IRDA_DEBUG(2, "%s() speed changed to: %d\n", __func__, speed);
1174} 1174}
1175 1175
1176 1176
@@ -1253,7 +1253,7 @@ static void smsc_ircc_dma_xmit(struct smsc_ircc_cb *self, int bofs)
1253 int iobase = self->io.fir_base; 1253 int iobase = self->io.fir_base;
1254 u8 ctrl; 1254 u8 ctrl;
1255 1255
1256 IRDA_DEBUG(3, "%s\n", __FUNCTION__); 1256 IRDA_DEBUG(3, "%s\n", __func__);
1257#if 1 1257#if 1
1258 /* Disable Rx */ 1258 /* Disable Rx */
1259 register_bank(iobase, 0); 1259 register_bank(iobase, 0);
@@ -1307,7 +1307,7 @@ static void smsc_ircc_dma_xmit_complete(struct smsc_ircc_cb *self)
1307{ 1307{
1308 int iobase = self->io.fir_base; 1308 int iobase = self->io.fir_base;
1309 1309
1310 IRDA_DEBUG(3, "%s\n", __FUNCTION__); 1310 IRDA_DEBUG(3, "%s\n", __func__);
1311#if 0 1311#if 0
1312 /* Disable Tx */ 1312 /* Disable Tx */
1313 register_bank(iobase, 0); 1313 register_bank(iobase, 0);
@@ -1411,7 +1411,7 @@ static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self)
1411 1411
1412 register_bank(iobase, 0); 1412 register_bank(iobase, 0);
1413 1413
1414 IRDA_DEBUG(3, "%s\n", __FUNCTION__); 1414 IRDA_DEBUG(3, "%s\n", __func__);
1415#if 0 1415#if 0
1416 /* Disable Rx */ 1416 /* Disable Rx */
1417 register_bank(iobase, 0); 1417 register_bank(iobase, 0);
@@ -1422,7 +1422,7 @@ static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self)
1422 lsr= inb(iobase + IRCC_LSR); 1422 lsr= inb(iobase + IRCC_LSR);
1423 msgcnt = inb(iobase + IRCC_LCR_B) & 0x08; 1423 msgcnt = inb(iobase + IRCC_LCR_B) & 0x08;
1424 1424
1425 IRDA_DEBUG(2, "%s: dma count = %d\n", __FUNCTION__, 1425 IRDA_DEBUG(2, "%s: dma count = %d\n", __func__,
1426 get_dma_residue(self->io.dma)); 1426 get_dma_residue(self->io.dma));
1427 1427
1428 len = self->rx_buff.truesize - get_dma_residue(self->io.dma); 1428 len = self->rx_buff.truesize - get_dma_residue(self->io.dma);
@@ -1445,15 +1445,15 @@ static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self)
1445 len -= self->io.speed < 4000000 ? 2 : 4; 1445 len -= self->io.speed < 4000000 ? 2 : 4;
1446 1446
1447 if (len < 2 || len > 2050) { 1447 if (len < 2 || len > 2050) {
1448 IRDA_WARNING("%s(), bogus len=%d\n", __FUNCTION__, len); 1448 IRDA_WARNING("%s(), bogus len=%d\n", __func__, len);
1449 return; 1449 return;
1450 } 1450 }
1451 IRDA_DEBUG(2, "%s: msgcnt = %d, len=%d\n", __FUNCTION__, msgcnt, len); 1451 IRDA_DEBUG(2, "%s: msgcnt = %d, len=%d\n", __func__, msgcnt, len);
1452 1452
1453 skb = dev_alloc_skb(len + 1); 1453 skb = dev_alloc_skb(len + 1);
1454 if (!skb) { 1454 if (!skb) {
1455 IRDA_WARNING("%s(), memory squeeze, dropping frame.\n", 1455 IRDA_WARNING("%s(), memory squeeze, dropping frame.\n",
1456 __FUNCTION__); 1456 __func__);
1457 return; 1457 return;
1458 } 1458 }
1459 /* Make sure IP header gets aligned */ 1459 /* Make sure IP header gets aligned */
@@ -1494,7 +1494,7 @@ static void smsc_ircc_sir_receive(struct smsc_ircc_cb *self)
1494 1494
1495 /* Make sure we don't stay here to long */ 1495 /* Make sure we don't stay here to long */
1496 if (boguscount++ > 32) { 1496 if (boguscount++ > 32) {
1497 IRDA_DEBUG(2, "%s(), breaking!\n", __FUNCTION__); 1497 IRDA_DEBUG(2, "%s(), breaking!\n", __func__);
1498 break; 1498 break;
1499 } 1499 }
1500 } while (inb(iobase + UART_LSR) & UART_LSR_DR); 1500 } while (inb(iobase + UART_LSR) & UART_LSR_DR);
@@ -1536,7 +1536,7 @@ static irqreturn_t smsc_ircc_interrupt(int dummy, void *dev_id)
1536 lcra = inb(iobase + IRCC_LCR_A); 1536 lcra = inb(iobase + IRCC_LCR_A);
1537 lsr = inb(iobase + IRCC_LSR); 1537 lsr = inb(iobase + IRCC_LSR);
1538 1538
1539 IRDA_DEBUG(2, "%s(), iir = 0x%02x\n", __FUNCTION__, iir); 1539 IRDA_DEBUG(2, "%s(), iir = 0x%02x\n", __func__, iir);
1540 1540
1541 if (iir & IRCC_IIR_EOM) { 1541 if (iir & IRCC_IIR_EOM) {
1542 if (self->io.direction == IO_RECV) 1542 if (self->io.direction == IO_RECV)
@@ -1548,7 +1548,7 @@ static irqreturn_t smsc_ircc_interrupt(int dummy, void *dev_id)
1548 } 1548 }
1549 1549
1550 if (iir & IRCC_IIR_ACTIVE_FRAME) { 1550 if (iir & IRCC_IIR_ACTIVE_FRAME) {
1551 /*printk(KERN_WARNING "%s(): Active Frame\n", __FUNCTION__);*/ 1551 /*printk(KERN_WARNING "%s(): Active Frame\n", __func__);*/
1552 } 1552 }
1553 1553
1554 /* Enable interrupts again */ 1554 /* Enable interrupts again */
@@ -1587,11 +1587,11 @@ static irqreturn_t smsc_ircc_interrupt_sir(struct net_device *dev)
1587 lsr = inb(iobase + UART_LSR); 1587 lsr = inb(iobase + UART_LSR);
1588 1588
1589 IRDA_DEBUG(4, "%s(), iir=%02x, lsr=%02x, iobase=%#x\n", 1589 IRDA_DEBUG(4, "%s(), iir=%02x, lsr=%02x, iobase=%#x\n",
1590 __FUNCTION__, iir, lsr, iobase); 1590 __func__, iir, lsr, iobase);
1591 1591
1592 switch (iir) { 1592 switch (iir) {
1593 case UART_IIR_RLSI: 1593 case UART_IIR_RLSI:
1594 IRDA_DEBUG(2, "%s(), RLSI\n", __FUNCTION__); 1594 IRDA_DEBUG(2, "%s(), RLSI\n", __func__);
1595 break; 1595 break;
1596 case UART_IIR_RDI: 1596 case UART_IIR_RDI:
1597 /* Receive interrupt */ 1597 /* Receive interrupt */
@@ -1604,7 +1604,7 @@ static irqreturn_t smsc_ircc_interrupt_sir(struct net_device *dev)
1604 break; 1604 break;
1605 default: 1605 default:
1606 IRDA_DEBUG(0, "%s(), unhandled IIR=%#x\n", 1606 IRDA_DEBUG(0, "%s(), unhandled IIR=%#x\n",
1607 __FUNCTION__, iir); 1607 __func__, iir);
1608 break; 1608 break;
1609 } 1609 }
1610 1610
@@ -1631,11 +1631,11 @@ static int ircc_is_receiving(struct smsc_ircc_cb *self)
1631 int status = FALSE; 1631 int status = FALSE;
1632 /* int iobase; */ 1632 /* int iobase; */
1633 1633
1634 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 1634 IRDA_DEBUG(1, "%s\n", __func__);
1635 1635
1636 IRDA_ASSERT(self != NULL, return FALSE;); 1636 IRDA_ASSERT(self != NULL, return FALSE;);
1637 1637
1638 IRDA_DEBUG(0, "%s: dma count = %d\n", __FUNCTION__, 1638 IRDA_DEBUG(0, "%s: dma count = %d\n", __func__,
1639 get_dma_residue(self->io.dma)); 1639 get_dma_residue(self->io.dma));
1640 1640
1641 status = (self->rx_buff.state != OUTSIDE_FRAME); 1641 status = (self->rx_buff.state != OUTSIDE_FRAME);
@@ -1652,7 +1652,7 @@ static int smsc_ircc_request_irq(struct smsc_ircc_cb *self)
1652 self->netdev->name, self->netdev); 1652 self->netdev->name, self->netdev);
1653 if (error) 1653 if (error)
1654 IRDA_DEBUG(0, "%s(), unable to allocate irq=%d, err=%d\n", 1654 IRDA_DEBUG(0, "%s(), unable to allocate irq=%d, err=%d\n",
1655 __FUNCTION__, self->io.irq, error); 1655 __func__, self->io.irq, error);
1656 1656
1657 return error; 1657 return error;
1658} 1658}
@@ -1696,21 +1696,21 @@ static int smsc_ircc_net_open(struct net_device *dev)
1696 struct smsc_ircc_cb *self; 1696 struct smsc_ircc_cb *self;
1697 char hwname[16]; 1697 char hwname[16];
1698 1698
1699 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 1699 IRDA_DEBUG(1, "%s\n", __func__);
1700 1700
1701 IRDA_ASSERT(dev != NULL, return -1;); 1701 IRDA_ASSERT(dev != NULL, return -1;);
1702 self = netdev_priv(dev); 1702 self = netdev_priv(dev);
1703 IRDA_ASSERT(self != NULL, return 0;); 1703 IRDA_ASSERT(self != NULL, return 0;);
1704 1704
1705 if (self->io.suspended) { 1705 if (self->io.suspended) {
1706 IRDA_DEBUG(0, "%s(), device is suspended\n", __FUNCTION__); 1706 IRDA_DEBUG(0, "%s(), device is suspended\n", __func__);
1707 return -EAGAIN; 1707 return -EAGAIN;
1708 } 1708 }
1709 1709
1710 if (request_irq(self->io.irq, smsc_ircc_interrupt, 0, dev->name, 1710 if (request_irq(self->io.irq, smsc_ircc_interrupt, 0, dev->name,
1711 (void *) dev)) { 1711 (void *) dev)) {
1712 IRDA_DEBUG(0, "%s(), unable to allocate irq=%d\n", 1712 IRDA_DEBUG(0, "%s(), unable to allocate irq=%d\n",
1713 __FUNCTION__, self->io.irq); 1713 __func__, self->io.irq);
1714 return -EAGAIN; 1714 return -EAGAIN;
1715 } 1715 }
1716 1716
@@ -1734,7 +1734,7 @@ static int smsc_ircc_net_open(struct net_device *dev)
1734 smsc_ircc_net_close(dev); 1734 smsc_ircc_net_close(dev);
1735 1735
1736 IRDA_WARNING("%s(), unable to allocate DMA=%d\n", 1736 IRDA_WARNING("%s(), unable to allocate DMA=%d\n",
1737 __FUNCTION__, self->io.dma); 1737 __func__, self->io.dma);
1738 return -EAGAIN; 1738 return -EAGAIN;
1739 } 1739 }
1740 1740
@@ -1753,7 +1753,7 @@ static int smsc_ircc_net_close(struct net_device *dev)
1753{ 1753{
1754 struct smsc_ircc_cb *self; 1754 struct smsc_ircc_cb *self;
1755 1755
1756 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 1756 IRDA_DEBUG(1, "%s\n", __func__);
1757 1757
1758 IRDA_ASSERT(dev != NULL, return -1;); 1758 IRDA_ASSERT(dev != NULL, return -1;);
1759 self = netdev_priv(dev); 1759 self = netdev_priv(dev);
@@ -1836,7 +1836,7 @@ static int smsc_ircc_resume(struct platform_device *dev)
1836 */ 1836 */
1837static int __exit smsc_ircc_close(struct smsc_ircc_cb *self) 1837static int __exit smsc_ircc_close(struct smsc_ircc_cb *self)
1838{ 1838{
1839 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 1839 IRDA_DEBUG(1, "%s\n", __func__);
1840 1840
1841 IRDA_ASSERT(self != NULL, return -1;); 1841 IRDA_ASSERT(self != NULL, return -1;);
1842 1842
@@ -1848,12 +1848,12 @@ static int __exit smsc_ircc_close(struct smsc_ircc_cb *self)
1848 smsc_ircc_stop_interrupts(self); 1848 smsc_ircc_stop_interrupts(self);
1849 1849
1850 /* Release the PORTS that this driver is using */ 1850 /* Release the PORTS that this driver is using */
1851 IRDA_DEBUG(0, "%s(), releasing 0x%03x\n", __FUNCTION__, 1851 IRDA_DEBUG(0, "%s(), releasing 0x%03x\n", __func__,
1852 self->io.fir_base); 1852 self->io.fir_base);
1853 1853
1854 release_region(self->io.fir_base, self->io.fir_ext); 1854 release_region(self->io.fir_base, self->io.fir_ext);
1855 1855
1856 IRDA_DEBUG(0, "%s(), releasing 0x%03x\n", __FUNCTION__, 1856 IRDA_DEBUG(0, "%s(), releasing 0x%03x\n", __func__,
1857 self->io.sir_base); 1857 self->io.sir_base);
1858 1858
1859 release_region(self->io.sir_base, self->io.sir_ext); 1859 release_region(self->io.sir_base, self->io.sir_ext);
@@ -1875,7 +1875,7 @@ static void __exit smsc_ircc_cleanup(void)
1875{ 1875{
1876 int i; 1876 int i;
1877 1877
1878 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 1878 IRDA_DEBUG(1, "%s\n", __func__);
1879 1879
1880 for (i = 0; i < 2; i++) { 1880 for (i = 0; i < 2; i++) {
1881 if (dev_self[i]) 1881 if (dev_self[i])
@@ -1899,7 +1899,7 @@ void smsc_ircc_sir_start(struct smsc_ircc_cb *self)
1899 struct net_device *dev; 1899 struct net_device *dev;
1900 int fir_base, sir_base; 1900 int fir_base, sir_base;
1901 1901
1902 IRDA_DEBUG(3, "%s\n", __FUNCTION__); 1902 IRDA_DEBUG(3, "%s\n", __func__);
1903 1903
1904 IRDA_ASSERT(self != NULL, return;); 1904 IRDA_ASSERT(self != NULL, return;);
1905 dev = self->netdev; 1905 dev = self->netdev;
@@ -1926,7 +1926,7 @@ void smsc_ircc_sir_start(struct smsc_ircc_cb *self)
1926 /* Turn on interrups */ 1926 /* Turn on interrups */
1927 outb(UART_IER_RLSI | UART_IER_RDI |UART_IER_THRI, sir_base + UART_IER); 1927 outb(UART_IER_RLSI | UART_IER_RDI |UART_IER_THRI, sir_base + UART_IER);
1928 1928
1929 IRDA_DEBUG(3, "%s() - exit\n", __FUNCTION__); 1929 IRDA_DEBUG(3, "%s() - exit\n", __func__);
1930 1930
1931 outb(0x00, fir_base + IRCC_MASTER); 1931 outb(0x00, fir_base + IRCC_MASTER);
1932} 1932}
@@ -1936,7 +1936,7 @@ void smsc_ircc_sir_stop(struct smsc_ircc_cb *self)
1936{ 1936{
1937 int iobase; 1937 int iobase;
1938 1938
1939 IRDA_DEBUG(3, "%s\n", __FUNCTION__); 1939 IRDA_DEBUG(3, "%s\n", __func__);
1940 iobase = self->io.sir_base; 1940 iobase = self->io.sir_base;
1941 1941
1942 /* Reset UART */ 1942 /* Reset UART */
@@ -1962,7 +1962,7 @@ static void smsc_ircc_sir_write_wakeup(struct smsc_ircc_cb *self)
1962 1962
1963 IRDA_ASSERT(self != NULL, return;); 1963 IRDA_ASSERT(self != NULL, return;);
1964 1964
1965 IRDA_DEBUG(4, "%s\n", __FUNCTION__); 1965 IRDA_DEBUG(4, "%s\n", __func__);
1966 1966
1967 iobase = self->io.sir_base; 1967 iobase = self->io.sir_base;
1968 1968
@@ -1984,7 +1984,7 @@ static void smsc_ircc_sir_write_wakeup(struct smsc_ircc_cb *self)
1984 */ 1984 */
1985 if (self->new_speed) { 1985 if (self->new_speed) {
1986 IRDA_DEBUG(5, "%s(), Changing speed to %d.\n", 1986 IRDA_DEBUG(5, "%s(), Changing speed to %d.\n",
1987 __FUNCTION__, self->new_speed); 1987 __func__, self->new_speed);
1988 smsc_ircc_sir_wait_hw_transmitter_finish(self); 1988 smsc_ircc_sir_wait_hw_transmitter_finish(self);
1989 smsc_ircc_change_speed(self, self->new_speed); 1989 smsc_ircc_change_speed(self, self->new_speed);
1990 self->new_speed = 0; 1990 self->new_speed = 0;
@@ -2023,7 +2023,7 @@ static int smsc_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len)
2023 2023
2024 /* Tx FIFO should be empty! */ 2024 /* Tx FIFO should be empty! */
2025 if (!(inb(iobase + UART_LSR) & UART_LSR_THRE)) { 2025 if (!(inb(iobase + UART_LSR) & UART_LSR_THRE)) {
2026 IRDA_WARNING("%s(), failed, fifo not empty!\n", __FUNCTION__); 2026 IRDA_WARNING("%s(), failed, fifo not empty!\n", __func__);
2027 return 0; 2027 return 0;
2028 } 2028 }
2029 2029
@@ -2123,7 +2123,7 @@ static void smsc_ircc_sir_wait_hw_transmitter_finish(struct smsc_ircc_cb *self)
2123 udelay(1); 2123 udelay(1);
2124 2124
2125 if (count == 0) 2125 if (count == 0)
2126 IRDA_DEBUG(0, "%s(): stuck transmitter\n", __FUNCTION__); 2126 IRDA_DEBUG(0, "%s(): stuck transmitter\n", __func__);
2127} 2127}
2128 2128
2129 2129
@@ -2145,7 +2145,7 @@ static int __init smsc_ircc_look_for_chips(void)
2145 while (address->cfg_base) { 2145 while (address->cfg_base) {
2146 cfg_base = address->cfg_base; 2146 cfg_base = address->cfg_base;
2147 2147
2148 /*printk(KERN_WARNING "%s(): probing: 0x%02x for: 0x%02x\n", __FUNCTION__, cfg_base, address->type);*/ 2148 /*printk(KERN_WARNING "%s(): probing: 0x%02x for: 0x%02x\n", __func__, cfg_base, address->type);*/
2149 2149
2150 if (address->type & SMSCSIO_TYPE_FDC) { 2150 if (address->type & SMSCSIO_TYPE_FDC) {
2151 type = "FDC"; 2151 type = "FDC";
@@ -2184,7 +2184,7 @@ static int __init smsc_superio_flat(const struct smsc_chip *chips, unsigned shor
2184 u8 mode, dma, irq; 2184 u8 mode, dma, irq;
2185 int ret = -ENODEV; 2185 int ret = -ENODEV;
2186 2186
2187 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 2187 IRDA_DEBUG(1, "%s\n", __func__);
2188 2188
2189 if (smsc_ircc_probe(cfgbase, SMSCSIOFLAT_DEVICEID_REG, chips, type) == NULL) 2189 if (smsc_ircc_probe(cfgbase, SMSCSIOFLAT_DEVICEID_REG, chips, type) == NULL)
2190 return ret; 2190 return ret;
@@ -2192,10 +2192,10 @@ static int __init smsc_superio_flat(const struct smsc_chip *chips, unsigned shor
2192 outb(SMSCSIOFLAT_UARTMODE0C_REG, cfgbase); 2192 outb(SMSCSIOFLAT_UARTMODE0C_REG, cfgbase);
2193 mode = inb(cfgbase + 1); 2193 mode = inb(cfgbase + 1);
2194 2194
2195 /*printk(KERN_WARNING "%s(): mode: 0x%02x\n", __FUNCTION__, mode);*/ 2195 /*printk(KERN_WARNING "%s(): mode: 0x%02x\n", __func__, mode);*/
2196 2196
2197 if (!(mode & SMSCSIOFLAT_UART2MODE_VAL_IRDA)) 2197 if (!(mode & SMSCSIOFLAT_UART2MODE_VAL_IRDA))
2198 IRDA_WARNING("%s(): IrDA not enabled\n", __FUNCTION__); 2198 IRDA_WARNING("%s(): IrDA not enabled\n", __func__);
2199 2199
2200 outb(SMSCSIOFLAT_UART2BASEADDR_REG, cfgbase); 2200 outb(SMSCSIOFLAT_UART2BASEADDR_REG, cfgbase);
2201 sirbase = inb(cfgbase + 1) << 2; 2201 sirbase = inb(cfgbase + 1) << 2;
@@ -2212,7 +2212,7 @@ static int __init smsc_superio_flat(const struct smsc_chip *chips, unsigned shor
2212 outb(SMSCSIOFLAT_UARTIRQSELECT_REG, cfgbase); 2212 outb(SMSCSIOFLAT_UARTIRQSELECT_REG, cfgbase);
2213 irq = inb(cfgbase + 1) & SMSCSIOFLAT_UART2IRQSELECT_MASK; 2213 irq = inb(cfgbase + 1) & SMSCSIOFLAT_UART2IRQSELECT_MASK;
2214 2214
2215 IRDA_MESSAGE("%s(): fir: 0x%02x, sir: 0x%02x, dma: %02d, irq: %d, mode: 0x%02x\n", __FUNCTION__, firbase, sirbase, dma, irq, mode); 2215 IRDA_MESSAGE("%s(): fir: 0x%02x, sir: 0x%02x, dma: %02d, irq: %d, mode: 0x%02x\n", __func__, firbase, sirbase, dma, irq, mode);
2216 2216
2217 if (firbase && smsc_ircc_open(firbase, sirbase, dma, irq) == 0) 2217 if (firbase && smsc_ircc_open(firbase, sirbase, dma, irq) == 0)
2218 ret = 0; 2218 ret = 0;
@@ -2234,7 +2234,7 @@ static int __init smsc_superio_paged(const struct smsc_chip *chips, unsigned sho
2234 unsigned short fir_io, sir_io; 2234 unsigned short fir_io, sir_io;
2235 int ret = -ENODEV; 2235 int ret = -ENODEV;
2236 2236
2237 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 2237 IRDA_DEBUG(1, "%s\n", __func__);
2238 2238
2239 if (smsc_ircc_probe(cfg_base, 0x20, chips, type) == NULL) 2239 if (smsc_ircc_probe(cfg_base, 0x20, chips, type) == NULL)
2240 return ret; 2240 return ret;
@@ -2268,7 +2268,7 @@ static int __init smsc_superio_paged(const struct smsc_chip *chips, unsigned sho
2268 2268
2269static int __init smsc_access(unsigned short cfg_base, unsigned char reg) 2269static int __init smsc_access(unsigned short cfg_base, unsigned char reg)
2270{ 2270{
2271 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 2271 IRDA_DEBUG(1, "%s\n", __func__);
2272 2272
2273 outb(reg, cfg_base); 2273 outb(reg, cfg_base);
2274 return inb(cfg_base) != reg ? -1 : 0; 2274 return inb(cfg_base) != reg ? -1 : 0;
@@ -2278,7 +2278,7 @@ static const struct smsc_chip * __init smsc_ircc_probe(unsigned short cfg_base,
2278{ 2278{
2279 u8 devid, xdevid, rev; 2279 u8 devid, xdevid, rev;
2280 2280
2281 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 2281 IRDA_DEBUG(1, "%s\n", __func__);
2282 2282
2283 /* Leave configuration */ 2283 /* Leave configuration */
2284 2284
@@ -2353,7 +2353,7 @@ static int __init smsc_superio_fdc(unsigned short cfg_base)
2353 2353
2354 if (!request_region(cfg_base, 2, driver_name)) { 2354 if (!request_region(cfg_base, 2, driver_name)) {
2355 IRDA_WARNING("%s: can't get cfg_base of 0x%03x\n", 2355 IRDA_WARNING("%s: can't get cfg_base of 0x%03x\n",
2356 __FUNCTION__, cfg_base); 2356 __func__, cfg_base);
2357 } else { 2357 } else {
2358 if (!smsc_superio_flat(fdc_chips_flat, cfg_base, "FDC") || 2358 if (!smsc_superio_flat(fdc_chips_flat, cfg_base, "FDC") ||
2359 !smsc_superio_paged(fdc_chips_paged, cfg_base, "FDC")) 2359 !smsc_superio_paged(fdc_chips_paged, cfg_base, "FDC"))
@@ -2371,7 +2371,7 @@ static int __init smsc_superio_lpc(unsigned short cfg_base)
2371 2371
2372 if (!request_region(cfg_base, 2, driver_name)) { 2372 if (!request_region(cfg_base, 2, driver_name)) {
2373 IRDA_WARNING("%s: can't get cfg_base of 0x%03x\n", 2373 IRDA_WARNING("%s: can't get cfg_base of 0x%03x\n",
2374 __FUNCTION__, cfg_base); 2374 __func__, cfg_base);
2375 } else { 2375 } else {
2376 if (!smsc_superio_flat(lpc_chips_flat, cfg_base, "LPC") || 2376 if (!smsc_superio_flat(lpc_chips_flat, cfg_base, "LPC") ||
2377 !smsc_superio_paged(lpc_chips_paged, cfg_base, "LPC")) 2377 !smsc_superio_paged(lpc_chips_paged, cfg_base, "LPC"))
@@ -2932,7 +2932,7 @@ static void smsc_ircc_set_transceiver_smsc_ircc_atc(int fir_base, u32 speed)
2932 /* empty */; 2932 /* empty */;
2933 2933
2934 if (val) 2934 if (val)
2935 IRDA_WARNING("%s(): ATC: 0x%02x\n", __FUNCTION__, 2935 IRDA_WARNING("%s(): ATC: 0x%02x\n", __func__,
2936 inb(fir_base + IRCC_ATC)); 2936 inb(fir_base + IRCC_ATC));
2937} 2937}
2938 2938
diff --git a/drivers/net/irda/tekram-sir.c b/drivers/net/irda/tekram-sir.c
index d1ce5ae6a172..048a15422844 100644
--- a/drivers/net/irda/tekram-sir.c
+++ b/drivers/net/irda/tekram-sir.c
@@ -77,7 +77,7 @@ static int tekram_open(struct sir_dev *dev)
77{ 77{
78 struct qos_info *qos = &dev->qos; 78 struct qos_info *qos = &dev->qos;
79 79
80 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 80 IRDA_DEBUG(2, "%s()\n", __func__);
81 81
82 sirdev_set_dtr_rts(dev, TRUE, TRUE); 82 sirdev_set_dtr_rts(dev, TRUE, TRUE);
83 83
@@ -92,7 +92,7 @@ static int tekram_open(struct sir_dev *dev)
92 92
93static int tekram_close(struct sir_dev *dev) 93static int tekram_close(struct sir_dev *dev)
94{ 94{
95 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 95 IRDA_DEBUG(2, "%s()\n", __func__);
96 96
97 /* Power off dongle */ 97 /* Power off dongle */
98 sirdev_set_dtr_rts(dev, FALSE, FALSE); 98 sirdev_set_dtr_rts(dev, FALSE, FALSE);
@@ -130,7 +130,7 @@ static int tekram_change_speed(struct sir_dev *dev, unsigned speed)
130 u8 byte; 130 u8 byte;
131 static int ret = 0; 131 static int ret = 0;
132 132
133 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 133 IRDA_DEBUG(2, "%s()\n", __func__);
134 134
135 switch(state) { 135 switch(state) {
136 case SIRDEV_STATE_DONGLE_SPEED: 136 case SIRDEV_STATE_DONGLE_SPEED:
@@ -179,7 +179,7 @@ static int tekram_change_speed(struct sir_dev *dev, unsigned speed)
179 break; 179 break;
180 180
181 default: 181 default:
182 IRDA_ERROR("%s - undefined state %d\n", __FUNCTION__, state); 182 IRDA_ERROR("%s - undefined state %d\n", __func__, state);
183 ret = -EINVAL; 183 ret = -EINVAL;
184 break; 184 break;
185 } 185 }
@@ -204,7 +204,7 @@ static int tekram_change_speed(struct sir_dev *dev, unsigned speed)
204 204
205static int tekram_reset(struct sir_dev *dev) 205static int tekram_reset(struct sir_dev *dev)
206{ 206{
207 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 207 IRDA_DEBUG(2, "%s()\n", __func__);
208 208
209 /* Clear DTR, Set RTS */ 209 /* Clear DTR, Set RTS */
210 sirdev_set_dtr_rts(dev, FALSE, TRUE); 210 sirdev_set_dtr_rts(dev, FALSE, TRUE);
diff --git a/drivers/net/irda/toim3232-sir.c b/drivers/net/irda/toim3232-sir.c
index aa1a9b0ed83e..fcf287b749db 100644
--- a/drivers/net/irda/toim3232-sir.c
+++ b/drivers/net/irda/toim3232-sir.c
@@ -181,7 +181,7 @@ static int toim3232_open(struct sir_dev *dev)
181{ 181{
182 struct qos_info *qos = &dev->qos; 182 struct qos_info *qos = &dev->qos;
183 183
184 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 184 IRDA_DEBUG(2, "%s()\n", __func__);
185 185
186 /* Pull the lines high to start with. 186 /* Pull the lines high to start with.
187 * 187 *
@@ -209,7 +209,7 @@ static int toim3232_open(struct sir_dev *dev)
209 209
210static int toim3232_close(struct sir_dev *dev) 210static int toim3232_close(struct sir_dev *dev)
211{ 211{
212 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 212 IRDA_DEBUG(2, "%s()\n", __func__);
213 213
214 /* Power off dongle */ 214 /* Power off dongle */
215 sirdev_set_dtr_rts(dev, FALSE, FALSE); 215 sirdev_set_dtr_rts(dev, FALSE, FALSE);
@@ -241,7 +241,7 @@ static int toim3232_change_speed(struct sir_dev *dev, unsigned speed)
241 u8 byte; 241 u8 byte;
242 static int ret = 0; 242 static int ret = 0;
243 243
244 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 244 IRDA_DEBUG(2, "%s()\n", __func__);
245 245
246 switch(state) { 246 switch(state) {
247 case SIRDEV_STATE_DONGLE_SPEED: 247 case SIRDEV_STATE_DONGLE_SPEED:
@@ -299,7 +299,7 @@ static int toim3232_change_speed(struct sir_dev *dev, unsigned speed)
299 break; 299 break;
300 300
301 default: 301 default:
302 printk(KERN_ERR "%s - undefined state %d\n", __FUNCTION__, state); 302 printk(KERN_ERR "%s - undefined state %d\n", __func__, state);
303 ret = -EINVAL; 303 ret = -EINVAL;
304 break; 304 break;
305 } 305 }
@@ -344,7 +344,7 @@ static int toim3232_change_speed(struct sir_dev *dev, unsigned speed)
344 344
345static int toim3232_reset(struct sir_dev *dev) 345static int toim3232_reset(struct sir_dev *dev)
346{ 346{
347 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 347 IRDA_DEBUG(2, "%s()\n", __func__);
348 348
349 /* Switch off both DTR and RTS to switch off dongle */ 349 /* Switch off both DTR and RTS to switch off dongle */
350 sirdev_set_dtr_rts(dev, FALSE, FALSE); 350 sirdev_set_dtr_rts(dev, FALSE, FALSE);
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index 04ad3573b159..84e609ea5fbb 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -152,12 +152,12 @@ static int __init via_ircc_init(void)
152{ 152{
153 int rc; 153 int rc;
154 154
155 IRDA_DEBUG(3, "%s()\n", __FUNCTION__); 155 IRDA_DEBUG(3, "%s()\n", __func__);
156 156
157 rc = pci_register_driver(&via_driver); 157 rc = pci_register_driver(&via_driver);
158 if (rc < 0) { 158 if (rc < 0) {
159 IRDA_DEBUG(0, "%s(): error rc = %d, returning -ENODEV...\n", 159 IRDA_DEBUG(0, "%s(): error rc = %d, returning -ENODEV...\n",
160 __FUNCTION__, rc); 160 __func__, rc);
161 return -ENODEV; 161 return -ENODEV;
162 } 162 }
163 return 0; 163 return 0;
@@ -170,11 +170,11 @@ static int __devinit via_init_one (struct pci_dev *pcidev, const struct pci_devi
170 u16 Chipset,FirDRQ1,FirDRQ0,FirIRQ,FirIOBase; 170 u16 Chipset,FirDRQ1,FirDRQ0,FirIRQ,FirIOBase;
171 chipio_t info; 171 chipio_t info;
172 172
173 IRDA_DEBUG(2, "%s(): Device ID=(0X%X)\n", __FUNCTION__, id->device); 173 IRDA_DEBUG(2, "%s(): Device ID=(0X%X)\n", __func__, id->device);
174 174
175 rc = pci_enable_device (pcidev); 175 rc = pci_enable_device (pcidev);
176 if (rc) { 176 if (rc) {
177 IRDA_DEBUG(0, "%s(): error rc = %d\n", __FUNCTION__, rc); 177 IRDA_DEBUG(0, "%s(): error rc = %d\n", __func__, rc);
178 return -ENODEV; 178 return -ENODEV;
179 } 179 }
180 180
@@ -185,7 +185,7 @@ static int __devinit via_init_one (struct pci_dev *pcidev, const struct pci_devi
185 Chipset=0x3076; 185 Chipset=0x3076;
186 186
187 if (Chipset==0x3076) { 187 if (Chipset==0x3076) {
188 IRDA_DEBUG(2, "%s(): Chipset = 3076\n", __FUNCTION__); 188 IRDA_DEBUG(2, "%s(): Chipset = 3076\n", __func__);
189 189
190 WriteLPCReg(7,0x0c ); 190 WriteLPCReg(7,0x0c );
191 temp=ReadLPCReg(0x30);//check if BIOS Enable Fir 191 temp=ReadLPCReg(0x30);//check if BIOS Enable Fir
@@ -222,7 +222,7 @@ static int __devinit via_init_one (struct pci_dev *pcidev, const struct pci_devi
222 } else 222 } else
223 rc = -ENODEV; //IR not turn on 223 rc = -ENODEV; //IR not turn on
224 } else { //Not VT1211 224 } else { //Not VT1211
225 IRDA_DEBUG(2, "%s(): Chipset = 3096\n", __FUNCTION__); 225 IRDA_DEBUG(2, "%s(): Chipset = 3096\n", __func__);
226 226
227 pci_read_config_byte(pcidev,0x67,&bTmp);//check if BIOS Enable Fir 227 pci_read_config_byte(pcidev,0x67,&bTmp);//check if BIOS Enable Fir
228 if((bTmp&0x01)==1) { // BIOS enable FIR 228 if((bTmp&0x01)==1) { // BIOS enable FIR
@@ -262,7 +262,7 @@ static int __devinit via_init_one (struct pci_dev *pcidev, const struct pci_devi
262 rc = -ENODEV; //IR not turn on !!!!! 262 rc = -ENODEV; //IR not turn on !!!!!
263 }//Not VT1211 263 }//Not VT1211
264 264
265 IRDA_DEBUG(2, "%s(): End - rc = %d\n", __FUNCTION__, rc); 265 IRDA_DEBUG(2, "%s(): End - rc = %d\n", __func__, rc);
266 return rc; 266 return rc;
267} 267}
268 268
@@ -276,7 +276,7 @@ static void via_ircc_clean(void)
276{ 276{
277 int i; 277 int i;
278 278
279 IRDA_DEBUG(3, "%s()\n", __FUNCTION__); 279 IRDA_DEBUG(3, "%s()\n", __func__);
280 280
281 for (i=0; i < ARRAY_SIZE(dev_self); i++) { 281 for (i=0; i < ARRAY_SIZE(dev_self); i++) {
282 if (dev_self[i]) 282 if (dev_self[i])
@@ -286,7 +286,7 @@ static void via_ircc_clean(void)
286 286
287static void __devexit via_remove_one (struct pci_dev *pdev) 287static void __devexit via_remove_one (struct pci_dev *pdev)
288{ 288{
289 IRDA_DEBUG(3, "%s()\n", __FUNCTION__); 289 IRDA_DEBUG(3, "%s()\n", __func__);
290 290
291 /* FIXME : This is ugly. We should use pci_get_drvdata(pdev); 291 /* FIXME : This is ugly. We should use pci_get_drvdata(pdev);
292 * to get our driver instance and call directly via_ircc_close(). 292 * to get our driver instance and call directly via_ircc_close().
@@ -301,7 +301,7 @@ static void __devexit via_remove_one (struct pci_dev *pdev)
301 301
302static void __exit via_ircc_cleanup(void) 302static void __exit via_ircc_cleanup(void)
303{ 303{
304 IRDA_DEBUG(3, "%s()\n", __FUNCTION__); 304 IRDA_DEBUG(3, "%s()\n", __func__);
305 305
306 /* FIXME : This should be redundant, as pci_unregister_driver() 306 /* FIXME : This should be redundant, as pci_unregister_driver()
307 * should call via_remove_one() on each device. 307 * should call via_remove_one() on each device.
@@ -324,7 +324,7 @@ static __devinit int via_ircc_open(int i, chipio_t * info, unsigned int id)
324 struct via_ircc_cb *self; 324 struct via_ircc_cb *self;
325 int err; 325 int err;
326 326
327 IRDA_DEBUG(3, "%s()\n", __FUNCTION__); 327 IRDA_DEBUG(3, "%s()\n", __func__);
328 328
329 if (i >= ARRAY_SIZE(dev_self)) 329 if (i >= ARRAY_SIZE(dev_self))
330 return -ENOMEM; 330 return -ENOMEM;
@@ -360,7 +360,7 @@ static __devinit int via_ircc_open(int i, chipio_t * info, unsigned int id)
360 /* Reserve the ioports that we need */ 360 /* Reserve the ioports that we need */
361 if (!request_region(self->io.fir_base, self->io.fir_ext, driver_name)) { 361 if (!request_region(self->io.fir_base, self->io.fir_ext, driver_name)) {
362 IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n", 362 IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n",
363 __FUNCTION__, self->io.fir_base); 363 __func__, self->io.fir_base);
364 err = -ENODEV; 364 err = -ENODEV;
365 goto err_out1; 365 goto err_out1;
366 } 366 }
@@ -471,7 +471,7 @@ static int via_ircc_close(struct via_ircc_cb *self)
471{ 471{
472 int iobase; 472 int iobase;
473 473
474 IRDA_DEBUG(3, "%s()\n", __FUNCTION__); 474 IRDA_DEBUG(3, "%s()\n", __func__);
475 475
476 IRDA_ASSERT(self != NULL, return -1;); 476 IRDA_ASSERT(self != NULL, return -1;);
477 477
@@ -483,7 +483,7 @@ static int via_ircc_close(struct via_ircc_cb *self)
483 483
484 /* Release the PORT that this driver is using */ 484 /* Release the PORT that this driver is using */
485 IRDA_DEBUG(2, "%s(), Releasing Region %03x\n", 485 IRDA_DEBUG(2, "%s(), Releasing Region %03x\n",
486 __FUNCTION__, self->io.fir_base); 486 __func__, self->io.fir_base);
487 release_region(self->io.fir_base, self->io.fir_ext); 487 release_region(self->io.fir_base, self->io.fir_ext);
488 if (self->tx_buff.head) 488 if (self->tx_buff.head)
489 dma_free_coherent(NULL, self->tx_buff.truesize, 489 dma_free_coherent(NULL, self->tx_buff.truesize,
@@ -509,7 +509,7 @@ static void via_hw_init(struct via_ircc_cb *self)
509{ 509{
510 int iobase = self->io.fir_base; 510 int iobase = self->io.fir_base;
511 511
512 IRDA_DEBUG(3, "%s()\n", __FUNCTION__); 512 IRDA_DEBUG(3, "%s()\n", __func__);
513 513
514 SetMaxRxPacketSize(iobase, 0x0fff); //set to max:4095 514 SetMaxRxPacketSize(iobase, 0x0fff); //set to max:4095
515 // FIFO Init 515 // FIFO Init
@@ -582,7 +582,7 @@ static void via_ircc_change_dongle_speed(int iobase, int speed,
582 speed = speed; 582 speed = speed;
583 583
584 IRDA_DEBUG(1, "%s(): change_dongle_speed to %d for 0x%x, %d\n", 584 IRDA_DEBUG(1, "%s(): change_dongle_speed to %d for 0x%x, %d\n",
585 __FUNCTION__, speed, iobase, dongle_id); 585 __func__, speed, iobase, dongle_id);
586 586
587 switch (dongle_id) { 587 switch (dongle_id) {
588 588
@@ -671,7 +671,7 @@ static void via_ircc_change_dongle_speed(int iobase, int speed,
671 671
672 case 0x11: /* Temic TFDS4500 */ 672 case 0x11: /* Temic TFDS4500 */
673 673
674 IRDA_DEBUG(2, "%s: Temic TFDS4500: One RX pin, TX normal, RX inverted.\n", __FUNCTION__); 674 IRDA_DEBUG(2, "%s: Temic TFDS4500: One RX pin, TX normal, RX inverted.\n", __func__);
675 675
676 UseOneRX(iobase, ON); //use ONE RX....RX1 676 UseOneRX(iobase, ON); //use ONE RX....RX1
677 InvertTX(iobase, OFF); 677 InvertTX(iobase, OFF);
@@ -689,7 +689,7 @@ static void via_ircc_change_dongle_speed(int iobase, int speed,
689 SlowIRRXLowActive(iobase, OFF); 689 SlowIRRXLowActive(iobase, OFF);
690 690
691 } else{ 691 } else{
692 IRDA_DEBUG(0, "%s: Warning: TFDS4500 not running in SIR mode !\n", __FUNCTION__); 692 IRDA_DEBUG(0, "%s: Warning: TFDS4500 not running in SIR mode !\n", __func__);
693 } 693 }
694 break; 694 break;
695 695
@@ -707,7 +707,7 @@ static void via_ircc_change_dongle_speed(int iobase, int speed,
707 707
708 default: 708 default:
709 IRDA_ERROR("%s: Error: dongle_id %d unsupported !\n", 709 IRDA_ERROR("%s: Error: dongle_id %d unsupported !\n",
710 __FUNCTION__, dongle_id); 710 __func__, dongle_id);
711 } 711 }
712} 712}
713 713
@@ -726,7 +726,7 @@ static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 speed)
726 iobase = self->io.fir_base; 726 iobase = self->io.fir_base;
727 /* Update accounting for new speed */ 727 /* Update accounting for new speed */
728 self->io.speed = speed; 728 self->io.speed = speed;
729 IRDA_DEBUG(1, "%s: change_speed to %d bps.\n", __FUNCTION__, speed); 729 IRDA_DEBUG(1, "%s: change_speed to %d bps.\n", __func__, speed);
730 730
731 WriteReg(iobase, I_ST_CT_0, 0x0); 731 WriteReg(iobase, I_ST_CT_0, 0x0);
732 732
@@ -957,7 +957,7 @@ static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase)
957 self->tx_buff.head) + self->tx_buff_dma, 957 self->tx_buff.head) + self->tx_buff_dma,
958 self->tx_fifo.queue[self->tx_fifo.ptr].len, DMA_TX_MODE); 958 self->tx_fifo.queue[self->tx_fifo.ptr].len, DMA_TX_MODE);
959 IRDA_DEBUG(1, "%s: tx_fifo.ptr=%x,len=%x,tx_fifo.len=%x..\n", 959 IRDA_DEBUG(1, "%s: tx_fifo.ptr=%x,len=%x,tx_fifo.len=%x..\n",
960 __FUNCTION__, self->tx_fifo.ptr, 960 __func__, self->tx_fifo.ptr,
961 self->tx_fifo.queue[self->tx_fifo.ptr].len, 961 self->tx_fifo.queue[self->tx_fifo.ptr].len,
962 self->tx_fifo.len); 962 self->tx_fifo.len);
963 963
@@ -981,7 +981,7 @@ static int via_ircc_dma_xmit_complete(struct via_ircc_cb *self)
981 int ret = TRUE; 981 int ret = TRUE;
982 u8 Tx_status; 982 u8 Tx_status;
983 983
984 IRDA_DEBUG(3, "%s()\n", __FUNCTION__); 984 IRDA_DEBUG(3, "%s()\n", __func__);
985 985
986 iobase = self->io.fir_base; 986 iobase = self->io.fir_base;
987 /* Disable DMA */ 987 /* Disable DMA */
@@ -1014,7 +1014,7 @@ static int via_ircc_dma_xmit_complete(struct via_ircc_cb *self)
1014 } 1014 }
1015 IRDA_DEBUG(1, 1015 IRDA_DEBUG(1,
1016 "%s: tx_fifo.len=%x ,tx_fifo.ptr=%x,tx_fifo.free=%x...\n", 1016 "%s: tx_fifo.len=%x ,tx_fifo.ptr=%x,tx_fifo.free=%x...\n",
1017 __FUNCTION__, 1017 __func__,
1018 self->tx_fifo.len, self->tx_fifo.ptr, self->tx_fifo.free); 1018 self->tx_fifo.len, self->tx_fifo.ptr, self->tx_fifo.free);
1019/* F01_S 1019/* F01_S
1020 // Any frames to be sent back-to-back? 1020 // Any frames to be sent back-to-back?
@@ -1050,7 +1050,7 @@ static int via_ircc_dma_receive(struct via_ircc_cb *self)
1050 1050
1051 iobase = self->io.fir_base; 1051 iobase = self->io.fir_base;
1052 1052
1053 IRDA_DEBUG(3, "%s()\n", __FUNCTION__); 1053 IRDA_DEBUG(3, "%s()\n", __func__);
1054 1054
1055 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0; 1055 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
1056 self->tx_fifo.tail = self->tx_buff.head; 1056 self->tx_fifo.tail = self->tx_buff.head;
@@ -1134,13 +1134,13 @@ static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
1134 return TRUE; //interrupt only, data maybe move by RxT 1134 return TRUE; //interrupt only, data maybe move by RxT
1135 if (((len - 4) < 2) || ((len - 4) > 2048)) { 1135 if (((len - 4) < 2) || ((len - 4) > 2048)) {
1136 IRDA_DEBUG(1, "%s(): Trouble:len=%x,CurCount=%x,LastCount=%x..\n", 1136 IRDA_DEBUG(1, "%s(): Trouble:len=%x,CurCount=%x,LastCount=%x..\n",
1137 __FUNCTION__, len, RxCurCount(iobase, self), 1137 __func__, len, RxCurCount(iobase, self),
1138 self->RxLastCount); 1138 self->RxLastCount);
1139 hwreset(self); 1139 hwreset(self);
1140 return FALSE; 1140 return FALSE;
1141 } 1141 }
1142 IRDA_DEBUG(2, "%s(): fifo.len=%x,len=%x,CurCount=%x..\n", 1142 IRDA_DEBUG(2, "%s(): fifo.len=%x,len=%x,CurCount=%x..\n",
1143 __FUNCTION__, 1143 __func__,
1144 st_fifo->len, len - 4, RxCurCount(iobase, self)); 1144 st_fifo->len, len - 4, RxCurCount(iobase, self));
1145 1145
1146 st_fifo->entries[st_fifo->tail].status = status; 1146 st_fifo->entries[st_fifo->tail].status = status;
@@ -1187,7 +1187,7 @@ F01_E */
1187 skb_put(skb, len - 4); 1187 skb_put(skb, len - 4);
1188 1188
1189 skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4); 1189 skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
1190 IRDA_DEBUG(2, "%s(): len=%x.rx_buff=%p\n", __FUNCTION__, 1190 IRDA_DEBUG(2, "%s(): len=%x.rx_buff=%p\n", __func__,
1191 len - 4, self->rx_buff.data); 1191 len - 4, self->rx_buff.data);
1192 1192
1193 // Move to next frame 1193 // Move to next frame
@@ -1217,7 +1217,7 @@ static int upload_rxdata(struct via_ircc_cb *self, int iobase)
1217 1217
1218 len = GetRecvByte(iobase, self); 1218 len = GetRecvByte(iobase, self);
1219 1219
1220 IRDA_DEBUG(2, "%s(): len=%x\n", __FUNCTION__, len); 1220 IRDA_DEBUG(2, "%s(): len=%x\n", __func__, len);
1221 1221
1222 if ((len - 4) < 2) { 1222 if ((len - 4) < 2) {
1223 self->stats.rx_dropped++; 1223 self->stats.rx_dropped++;
@@ -1302,7 +1302,7 @@ static int RxTimerHandler(struct via_ircc_cb *self, int iobase)
1302 skb_put(skb, len - 4); 1302 skb_put(skb, len - 4);
1303 skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4); 1303 skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
1304 1304
1305 IRDA_DEBUG(2, "%s(): len=%x.head=%x\n", __FUNCTION__, 1305 IRDA_DEBUG(2, "%s(): len=%x.head=%x\n", __func__,
1306 len - 4, st_fifo->head); 1306 len - 4, st_fifo->head);
1307 1307
1308 // Move to next frame 1308 // Move to next frame
@@ -1318,7 +1318,7 @@ static int RxTimerHandler(struct via_ircc_cb *self, int iobase)
1318 1318
1319 IRDA_DEBUG(2, 1319 IRDA_DEBUG(2,
1320 "%s(): End of upload HostStatus=%x,RxStatus=%x\n", 1320 "%s(): End of upload HostStatus=%x,RxStatus=%x\n",
1321 __FUNCTION__, 1321 __func__,
1322 GetHostStatus(iobase), GetRXStatus(iobase)); 1322 GetHostStatus(iobase), GetRXStatus(iobase));
1323 1323
1324 /* 1324 /*
@@ -1358,7 +1358,7 @@ static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id)
1358 iHostIntType = GetHostStatus(iobase); 1358 iHostIntType = GetHostStatus(iobase);
1359 1359
1360 IRDA_DEBUG(4, "%s(): iHostIntType %02x: %s %s %s %02x\n", 1360 IRDA_DEBUG(4, "%s(): iHostIntType %02x: %s %s %s %02x\n",
1361 __FUNCTION__, iHostIntType, 1361 __func__, iHostIntType,
1362 (iHostIntType & 0x40) ? "Timer" : "", 1362 (iHostIntType & 0x40) ? "Timer" : "",
1363 (iHostIntType & 0x20) ? "Tx" : "", 1363 (iHostIntType & 0x20) ? "Tx" : "",
1364 (iHostIntType & 0x10) ? "Rx" : "", 1364 (iHostIntType & 0x10) ? "Rx" : "",
@@ -1388,7 +1388,7 @@ static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id)
1388 iTxIntType = GetTXStatus(iobase); 1388 iTxIntType = GetTXStatus(iobase);
1389 1389
1390 IRDA_DEBUG(4, "%s(): iTxIntType %02x: %s %s %s %s\n", 1390 IRDA_DEBUG(4, "%s(): iTxIntType %02x: %s %s %s %s\n",
1391 __FUNCTION__, iTxIntType, 1391 __func__, iTxIntType,
1392 (iTxIntType & 0x08) ? "FIFO underr." : "", 1392 (iTxIntType & 0x08) ? "FIFO underr." : "",
1393 (iTxIntType & 0x04) ? "EOM" : "", 1393 (iTxIntType & 0x04) ? "EOM" : "",
1394 (iTxIntType & 0x02) ? "FIFO ready" : "", 1394 (iTxIntType & 0x02) ? "FIFO ready" : "",
@@ -1412,7 +1412,7 @@ static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id)
1412 iRxIntType = GetRXStatus(iobase); 1412 iRxIntType = GetRXStatus(iobase);
1413 1413
1414 IRDA_DEBUG(4, "%s(): iRxIntType %02x: %s %s %s %s %s %s %s\n", 1414 IRDA_DEBUG(4, "%s(): iRxIntType %02x: %s %s %s %s %s %s %s\n",
1415 __FUNCTION__, iRxIntType, 1415 __func__, iRxIntType,
1416 (iRxIntType & 0x80) ? "PHY err." : "", 1416 (iRxIntType & 0x80) ? "PHY err." : "",
1417 (iRxIntType & 0x40) ? "CRC err" : "", 1417 (iRxIntType & 0x40) ? "CRC err" : "",
1418 (iRxIntType & 0x20) ? "FIFO overr." : "", 1418 (iRxIntType & 0x20) ? "FIFO overr." : "",
@@ -1421,7 +1421,7 @@ static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id)
1421 (iRxIntType & 0x02) ? "RxMaxLen" : "", 1421 (iRxIntType & 0x02) ? "RxMaxLen" : "",
1422 (iRxIntType & 0x01) ? "SIR bad" : ""); 1422 (iRxIntType & 0x01) ? "SIR bad" : "");
1423 if (!iRxIntType) 1423 if (!iRxIntType)
1424 IRDA_DEBUG(3, "%s(): RxIRQ =0\n", __FUNCTION__); 1424 IRDA_DEBUG(3, "%s(): RxIRQ =0\n", __func__);
1425 1425
1426 if (iRxIntType & 0x10) { 1426 if (iRxIntType & 0x10) {
1427 if (via_ircc_dma_receive_complete(self, iobase)) { 1427 if (via_ircc_dma_receive_complete(self, iobase)) {
@@ -1431,7 +1431,7 @@ static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id)
1431 } // No ERR 1431 } // No ERR
1432 else { //ERR 1432 else { //ERR
1433 IRDA_DEBUG(4, "%s(): RxIRQ ERR:iRxIntType=%x,HostIntType=%x,CurCount=%x,RxLastCount=%x_____\n", 1433 IRDA_DEBUG(4, "%s(): RxIRQ ERR:iRxIntType=%x,HostIntType=%x,CurCount=%x,RxLastCount=%x_____\n",
1434 __FUNCTION__, iRxIntType, iHostIntType, 1434 __func__, iRxIntType, iHostIntType,
1435 RxCurCount(iobase, self), 1435 RxCurCount(iobase, self),
1436 self->RxLastCount); 1436 self->RxLastCount);
1437 1437
@@ -1456,7 +1456,7 @@ static void hwreset(struct via_ircc_cb *self)
1456 int iobase; 1456 int iobase;
1457 iobase = self->io.fir_base; 1457 iobase = self->io.fir_base;
1458 1458
1459 IRDA_DEBUG(3, "%s()\n", __FUNCTION__); 1459 IRDA_DEBUG(3, "%s()\n", __func__);
1460 1460
1461 ResetChip(iobase, 5); 1461 ResetChip(iobase, 5);
1462 EnableDMA(iobase, OFF); 1462 EnableDMA(iobase, OFF);
@@ -1501,7 +1501,7 @@ static int via_ircc_is_receiving(struct via_ircc_cb *self)
1501 if (CkRxRecv(iobase, self)) 1501 if (CkRxRecv(iobase, self))
1502 status = TRUE; 1502 status = TRUE;
1503 1503
1504 IRDA_DEBUG(2, "%s(): status=%x....\n", __FUNCTION__, status); 1504 IRDA_DEBUG(2, "%s(): status=%x....\n", __func__, status);
1505 1505
1506 return status; 1506 return status;
1507} 1507}
@@ -1519,7 +1519,7 @@ static int via_ircc_net_open(struct net_device *dev)
1519 int iobase; 1519 int iobase;
1520 char hwname[32]; 1520 char hwname[32];
1521 1521
1522 IRDA_DEBUG(3, "%s()\n", __FUNCTION__); 1522 IRDA_DEBUG(3, "%s()\n", __func__);
1523 1523
1524 IRDA_ASSERT(dev != NULL, return -1;); 1524 IRDA_ASSERT(dev != NULL, return -1;);
1525 self = (struct via_ircc_cb *) dev->priv; 1525 self = (struct via_ircc_cb *) dev->priv;
@@ -1586,7 +1586,7 @@ static int via_ircc_net_close(struct net_device *dev)
1586 struct via_ircc_cb *self; 1586 struct via_ircc_cb *self;
1587 int iobase; 1587 int iobase;
1588 1588
1589 IRDA_DEBUG(3, "%s()\n", __FUNCTION__); 1589 IRDA_DEBUG(3, "%s()\n", __func__);
1590 1590
1591 IRDA_ASSERT(dev != NULL, return -1;); 1591 IRDA_ASSERT(dev != NULL, return -1;);
1592 self = (struct via_ircc_cb *) dev->priv; 1592 self = (struct via_ircc_cb *) dev->priv;
@@ -1630,7 +1630,7 @@ static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
1630 IRDA_ASSERT(dev != NULL, return -1;); 1630 IRDA_ASSERT(dev != NULL, return -1;);
1631 self = dev->priv; 1631 self = dev->priv;
1632 IRDA_ASSERT(self != NULL, return -1;); 1632 IRDA_ASSERT(self != NULL, return -1;);
1633 IRDA_DEBUG(1, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__, dev->name, 1633 IRDA_DEBUG(1, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name,
1634 cmd); 1634 cmd);
1635 /* Disable interrupts & save flags */ 1635 /* Disable interrupts & save flags */
1636 spin_lock_irqsave(&self->lock, flags); 1636 spin_lock_irqsave(&self->lock, flags);
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index d15e00b8591e..18f4b3a96aed 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -140,15 +140,15 @@ static void vlsi_ring_debug(struct vlsi_ring *r)
140 unsigned i; 140 unsigned i;
141 141
142 printk(KERN_DEBUG "%s - ring %p / size %u / mask 0x%04x / len %u / dir %d / hw %p\n", 142 printk(KERN_DEBUG "%s - ring %p / size %u / mask 0x%04x / len %u / dir %d / hw %p\n",
143 __FUNCTION__, r, r->size, r->mask, r->len, r->dir, r->rd[0].hw); 143 __func__, r, r->size, r->mask, r->len, r->dir, r->rd[0].hw);
144 printk(KERN_DEBUG "%s - head = %d / tail = %d\n", __FUNCTION__, 144 printk(KERN_DEBUG "%s - head = %d / tail = %d\n", __func__,
145 atomic_read(&r->head) & r->mask, atomic_read(&r->tail) & r->mask); 145 atomic_read(&r->head) & r->mask, atomic_read(&r->tail) & r->mask);
146 for (i = 0; i < r->size; i++) { 146 for (i = 0; i < r->size; i++) {
147 rd = &r->rd[i]; 147 rd = &r->rd[i];
148 printk(KERN_DEBUG "%s - ring descr %u: ", __FUNCTION__, i); 148 printk(KERN_DEBUG "%s - ring descr %u: ", __func__, i);
149 printk("skb=%p data=%p hw=%p\n", rd->skb, rd->buf, rd->hw); 149 printk("skb=%p data=%p hw=%p\n", rd->skb, rd->buf, rd->hw);
150 printk(KERN_DEBUG "%s - hw: status=%02x count=%u addr=0x%08x\n", 150 printk(KERN_DEBUG "%s - hw: status=%02x count=%u addr=0x%08x\n",
151 __FUNCTION__, (unsigned) rd_get_status(rd), 151 __func__, (unsigned) rd_get_status(rd),
152 (unsigned) rd_get_count(rd), (unsigned) rd_get_addr(rd)); 152 (unsigned) rd_get_count(rd), (unsigned) rd_get_addr(rd));
153 } 153 }
154} 154}
@@ -435,7 +435,7 @@ static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr
435 || !(busaddr = pci_map_single(pdev, rd->buf, len, dir))) { 435 || !(busaddr = pci_map_single(pdev, rd->buf, len, dir))) {
436 if (rd->buf) { 436 if (rd->buf) {
437 IRDA_ERROR("%s: failed to create PCI-MAP for %p", 437 IRDA_ERROR("%s: failed to create PCI-MAP for %p",
438 __FUNCTION__, rd->buf); 438 __func__, rd->buf);
439 kfree(rd->buf); 439 kfree(rd->buf);
440 rd->buf = NULL; 440 rd->buf = NULL;
441 } 441 }
@@ -489,7 +489,7 @@ static int vlsi_create_hwif(vlsi_irda_dev_t *idev)
489 ringarea = pci_alloc_consistent(idev->pdev, HW_RING_AREA_SIZE, &idev->busaddr); 489 ringarea = pci_alloc_consistent(idev->pdev, HW_RING_AREA_SIZE, &idev->busaddr);
490 if (!ringarea) { 490 if (!ringarea) {
491 IRDA_ERROR("%s: insufficient memory for descriptor rings\n", 491 IRDA_ERROR("%s: insufficient memory for descriptor rings\n",
492 __FUNCTION__); 492 __func__);
493 goto out; 493 goto out;
494 } 494 }
495 memset(ringarea, 0, HW_RING_AREA_SIZE); 495 memset(ringarea, 0, HW_RING_AREA_SIZE);
@@ -564,7 +564,7 @@ static int vlsi_process_rx(struct vlsi_ring *r, struct ring_descr *rd)
564 crclen = (idev->mode==IFF_FIR) ? sizeof(u32) : sizeof(u16); 564 crclen = (idev->mode==IFF_FIR) ? sizeof(u32) : sizeof(u16);
565 len -= crclen; /* remove trailing CRC */ 565 len -= crclen; /* remove trailing CRC */
566 if (len <= 0) { 566 if (len <= 0) {
567 IRDA_DEBUG(0, "%s: strange frame (len=%d)\n", __FUNCTION__, len); 567 IRDA_DEBUG(0, "%s: strange frame (len=%d)\n", __func__, len);
568 ret |= VLSI_RX_DROP; 568 ret |= VLSI_RX_DROP;
569 goto done; 569 goto done;
570 } 570 }
@@ -579,14 +579,14 @@ static int vlsi_process_rx(struct vlsi_ring *r, struct ring_descr *rd)
579 */ 579 */
580 le16_to_cpus(rd->buf+len); 580 le16_to_cpus(rd->buf+len);
581 if (irda_calc_crc16(INIT_FCS,rd->buf,len+crclen) != GOOD_FCS) { 581 if (irda_calc_crc16(INIT_FCS,rd->buf,len+crclen) != GOOD_FCS) {
582 IRDA_DEBUG(0, "%s: crc error\n", __FUNCTION__); 582 IRDA_DEBUG(0, "%s: crc error\n", __func__);
583 ret |= VLSI_RX_CRC; 583 ret |= VLSI_RX_CRC;
584 goto done; 584 goto done;
585 } 585 }
586 } 586 }
587 587
588 if (!rd->skb) { 588 if (!rd->skb) {
589 IRDA_WARNING("%s: rx packet lost\n", __FUNCTION__); 589 IRDA_WARNING("%s: rx packet lost\n", __func__);
590 ret |= VLSI_RX_DROP; 590 ret |= VLSI_RX_DROP;
591 goto done; 591 goto done;
592 } 592 }
@@ -617,7 +617,7 @@ static void vlsi_fill_rx(struct vlsi_ring *r)
617 for (rd = ring_last(r); rd != NULL; rd = ring_put(r)) { 617 for (rd = ring_last(r); rd != NULL; rd = ring_put(r)) {
618 if (rd_is_active(rd)) { 618 if (rd_is_active(rd)) {
619 IRDA_WARNING("%s: driver bug: rx descr race with hw\n", 619 IRDA_WARNING("%s: driver bug: rx descr race with hw\n",
620 __FUNCTION__); 620 __func__);
621 vlsi_ring_debug(r); 621 vlsi_ring_debug(r);
622 break; 622 break;
623 } 623 }
@@ -676,7 +676,7 @@ static void vlsi_rx_interrupt(struct net_device *ndev)
676 676
677 if (ring_first(r) == NULL) { 677 if (ring_first(r) == NULL) {
678 /* we are in big trouble, if this should ever happen */ 678 /* we are in big trouble, if this should ever happen */
679 IRDA_ERROR("%s: rx ring exhausted!\n", __FUNCTION__); 679 IRDA_ERROR("%s: rx ring exhausted!\n", __func__);
680 vlsi_ring_debug(r); 680 vlsi_ring_debug(r);
681 } 681 }
682 else 682 else
@@ -697,7 +697,7 @@ static void vlsi_unarm_rx(vlsi_irda_dev_t *idev)
697 if (rd_is_active(rd)) { 697 if (rd_is_active(rd)) {
698 rd_set_status(rd, 0); 698 rd_set_status(rd, 0);
699 if (rd_get_count(rd)) { 699 if (rd_get_count(rd)) {
700 IRDA_DEBUG(0, "%s - dropping rx packet\n", __FUNCTION__); 700 IRDA_DEBUG(0, "%s - dropping rx packet\n", __func__);
701 ret = -VLSI_RX_DROP; 701 ret = -VLSI_RX_DROP;
702 } 702 }
703 rd_set_count(rd, 0); 703 rd_set_count(rd, 0);
@@ -772,7 +772,7 @@ static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase)
772 int fifocnt; 772 int fifocnt;
773 773
774 baudrate = idev->new_baud; 774 baudrate = idev->new_baud;
775 IRDA_DEBUG(2, "%s: %d -> %d\n", __FUNCTION__, idev->baud, idev->new_baud); 775 IRDA_DEBUG(2, "%s: %d -> %d\n", __func__, idev->baud, idev->new_baud);
776 if (baudrate == 4000000) { 776 if (baudrate == 4000000) {
777 mode = IFF_FIR; 777 mode = IFF_FIR;
778 config = IRCFG_FIR; 778 config = IRCFG_FIR;
@@ -789,7 +789,7 @@ static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase)
789 switch(baudrate) { 789 switch(baudrate) {
790 default: 790 default:
791 IRDA_WARNING("%s: undefined baudrate %d - fallback to 9600!\n", 791 IRDA_WARNING("%s: undefined baudrate %d - fallback to 9600!\n",
792 __FUNCTION__, baudrate); 792 __func__, baudrate);
793 baudrate = 9600; 793 baudrate = 9600;
794 /* fallthru */ 794 /* fallthru */
795 case 2400: 795 case 2400:
@@ -806,7 +806,7 @@ static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase)
806 806
807 fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; 807 fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
808 if (fifocnt != 0) { 808 if (fifocnt != 0) {
809 IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n", __FUNCTION__, fifocnt); 809 IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n", __func__, fifocnt);
810 } 810 }
811 811
812 outw(0, iobase+VLSI_PIO_IRENABLE); 812 outw(0, iobase+VLSI_PIO_IRENABLE);
@@ -830,14 +830,14 @@ static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase)
830 config ^= IRENABLE_SIR_ON; 830 config ^= IRENABLE_SIR_ON;
831 831
832 if (config != (IRENABLE_PHYANDCLOCK|IRENABLE_ENRXST)) { 832 if (config != (IRENABLE_PHYANDCLOCK|IRENABLE_ENRXST)) {
833 IRDA_WARNING("%s: failed to set %s mode!\n", __FUNCTION__, 833 IRDA_WARNING("%s: failed to set %s mode!\n", __func__,
834 (mode==IFF_SIR)?"SIR":((mode==IFF_MIR)?"MIR":"FIR")); 834 (mode==IFF_SIR)?"SIR":((mode==IFF_MIR)?"MIR":"FIR"));
835 ret = -1; 835 ret = -1;
836 } 836 }
837 else { 837 else {
838 if (inw(iobase+VLSI_PIO_PHYCTL) != nphyctl) { 838 if (inw(iobase+VLSI_PIO_PHYCTL) != nphyctl) {
839 IRDA_WARNING("%s: failed to apply baudrate %d\n", 839 IRDA_WARNING("%s: failed to apply baudrate %d\n",
840 __FUNCTION__, baudrate); 840 __func__, baudrate);
841 ret = -1; 841 ret = -1;
842 } 842 }
843 else { 843 else {
@@ -849,7 +849,7 @@ static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase)
849 } 849 }
850 850
851 if (ret) 851 if (ret)
852 vlsi_reg_debug(iobase,__FUNCTION__); 852 vlsi_reg_debug(iobase,__func__);
853 853
854 return ret; 854 return ret;
855} 855}
@@ -982,7 +982,7 @@ static int vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
982 982
983 if (len >= r->len-5) 983 if (len >= r->len-5)
984 IRDA_WARNING("%s: possible buffer overflow with SIR wrapping!\n", 984 IRDA_WARNING("%s: possible buffer overflow with SIR wrapping!\n",
985 __FUNCTION__); 985 __func__);
986 } 986 }
987 else { 987 else {
988 /* hw deals with MIR/FIR mode wrapping */ 988 /* hw deals with MIR/FIR mode wrapping */
@@ -1027,7 +1027,7 @@ static int vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1027 1027
1028 fifocnt = inw(ndev->base_addr+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; 1028 fifocnt = inw(ndev->base_addr+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
1029 if (fifocnt != 0) { 1029 if (fifocnt != 0) {
1030 IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n", __FUNCTION__, fifocnt); 1030 IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n", __func__, fifocnt);
1031 } 1031 }
1032 1032
1033 config = inw(iobase+VLSI_PIO_IRCFG); 1033 config = inw(iobase+VLSI_PIO_IRCFG);
@@ -1040,7 +1040,7 @@ static int vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1040 1040
1041 if (ring_put(r) == NULL) { 1041 if (ring_put(r) == NULL) {
1042 netif_stop_queue(ndev); 1042 netif_stop_queue(ndev);
1043 IRDA_DEBUG(3, "%s: tx ring full - queue stopped\n", __FUNCTION__); 1043 IRDA_DEBUG(3, "%s: tx ring full - queue stopped\n", __func__);
1044 } 1044 }
1045 spin_unlock_irqrestore(&idev->lock, flags); 1045 spin_unlock_irqrestore(&idev->lock, flags);
1046 1046
@@ -1049,7 +1049,7 @@ static int vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1049drop_unlock: 1049drop_unlock:
1050 spin_unlock_irqrestore(&idev->lock, flags); 1050 spin_unlock_irqrestore(&idev->lock, flags);
1051drop: 1051drop:
1052 IRDA_WARNING("%s: dropping packet - %s\n", __FUNCTION__, msg); 1052 IRDA_WARNING("%s: dropping packet - %s\n", __func__, msg);
1053 dev_kfree_skb_any(skb); 1053 dev_kfree_skb_any(skb);
1054 idev->stats.tx_errors++; 1054 idev->stats.tx_errors++;
1055 idev->stats.tx_dropped++; 1055 idev->stats.tx_dropped++;
@@ -1106,7 +1106,7 @@ static void vlsi_tx_interrupt(struct net_device *ndev)
1106 fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; 1106 fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
1107 if (fifocnt != 0) { 1107 if (fifocnt != 0) {
1108 IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n", 1108 IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n",
1109 __FUNCTION__, fifocnt); 1109 __func__, fifocnt);
1110 } 1110 }
1111 outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG); 1111 outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG);
1112 } 1112 }
@@ -1115,7 +1115,7 @@ static void vlsi_tx_interrupt(struct net_device *ndev)
1115 1115
1116 if (netif_queue_stopped(ndev) && !idev->new_baud) { 1116 if (netif_queue_stopped(ndev) && !idev->new_baud) {
1117 netif_wake_queue(ndev); 1117 netif_wake_queue(ndev);
1118 IRDA_DEBUG(3, "%s: queue awoken\n", __FUNCTION__); 1118 IRDA_DEBUG(3, "%s: queue awoken\n", __func__);
1119 } 1119 }
1120} 1120}
1121 1121
@@ -1138,7 +1138,7 @@ static void vlsi_unarm_tx(vlsi_irda_dev_t *idev)
1138 dev_kfree_skb_any(rd->skb); 1138 dev_kfree_skb_any(rd->skb);
1139 rd->skb = NULL; 1139 rd->skb = NULL;
1140 } 1140 }
1141 IRDA_DEBUG(0, "%s - dropping tx packet\n", __FUNCTION__); 1141 IRDA_DEBUG(0, "%s - dropping tx packet\n", __func__);
1142 ret = -VLSI_TX_DROP; 1142 ret = -VLSI_TX_DROP;
1143 } 1143 }
1144 else 1144 else
@@ -1188,7 +1188,7 @@ static int vlsi_start_clock(struct pci_dev *pdev)
1188 if (count < 3) { 1188 if (count < 3) {
1189 if (clksrc == 1) { /* explicitly asked for PLL hence bail out */ 1189 if (clksrc == 1) { /* explicitly asked for PLL hence bail out */
1190 IRDA_ERROR("%s: no PLL or failed to lock!\n", 1190 IRDA_ERROR("%s: no PLL or failed to lock!\n",
1191 __FUNCTION__); 1191 __func__);
1192 clkctl = CLKCTL_CLKSTP; 1192 clkctl = CLKCTL_CLKSTP;
1193 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl); 1193 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
1194 return -1; 1194 return -1;
@@ -1197,7 +1197,7 @@ static int vlsi_start_clock(struct pci_dev *pdev)
1197 clksrc = 3; /* fallback to 40MHz XCLK (OB800) */ 1197 clksrc = 3; /* fallback to 40MHz XCLK (OB800) */
1198 1198
1199 IRDA_DEBUG(0, "%s: PLL not locked, fallback to clksrc=%d\n", 1199 IRDA_DEBUG(0, "%s: PLL not locked, fallback to clksrc=%d\n",
1200 __FUNCTION__, clksrc); 1200 __func__, clksrc);
1201 } 1201 }
1202 else 1202 else
1203 clksrc = 1; /* got successful PLL lock */ 1203 clksrc = 1; /* got successful PLL lock */
@@ -1269,7 +1269,7 @@ static int vlsi_init_chip(struct pci_dev *pdev)
1269 /* start the clock and clean the registers */ 1269 /* start the clock and clean the registers */
1270 1270
1271 if (vlsi_start_clock(pdev)) { 1271 if (vlsi_start_clock(pdev)) {
1272 IRDA_ERROR("%s: no valid clock source\n", __FUNCTION__); 1272 IRDA_ERROR("%s: no valid clock source\n", __func__);
1273 return -1; 1273 return -1;
1274 } 1274 }
1275 iobase = ndev->base_addr; 1275 iobase = ndev->base_addr;
@@ -1386,7 +1386,7 @@ static void vlsi_tx_timeout(struct net_device *ndev)
1386 vlsi_irda_dev_t *idev = ndev->priv; 1386 vlsi_irda_dev_t *idev = ndev->priv;
1387 1387
1388 1388
1389 vlsi_reg_debug(ndev->base_addr, __FUNCTION__); 1389 vlsi_reg_debug(ndev->base_addr, __func__);
1390 vlsi_ring_debug(idev->tx_ring); 1390 vlsi_ring_debug(idev->tx_ring);
1391 1391
1392 if (netif_running(ndev)) 1392 if (netif_running(ndev))
@@ -1401,7 +1401,7 @@ static void vlsi_tx_timeout(struct net_device *ndev)
1401 1401
1402 if (vlsi_start_hw(idev)) 1402 if (vlsi_start_hw(idev))
1403 IRDA_ERROR("%s: failed to restart hw - %s(%s) unusable!\n", 1403 IRDA_ERROR("%s: failed to restart hw - %s(%s) unusable!\n",
1404 __FUNCTION__, pci_name(idev->pdev), ndev->name); 1404 __func__, pci_name(idev->pdev), ndev->name);
1405 else 1405 else
1406 netif_start_queue(ndev); 1406 netif_start_queue(ndev);
1407} 1407}
@@ -1446,7 +1446,7 @@ static int vlsi_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
1446 break; 1446 break;
1447 default: 1447 default:
1448 IRDA_WARNING("%s: notsupp - cmd=%04x\n", 1448 IRDA_WARNING("%s: notsupp - cmd=%04x\n",
1449 __FUNCTION__, cmd); 1449 __func__, cmd);
1450 ret = -EOPNOTSUPP; 1450 ret = -EOPNOTSUPP;
1451 } 1451 }
1452 1452
@@ -1491,7 +1491,7 @@ static irqreturn_t vlsi_interrupt(int irq, void *dev_instance)
1491 1491
1492 if (boguscount <= 0) 1492 if (boguscount <= 0)
1493 IRDA_MESSAGE("%s: too much work in interrupt!\n", 1493 IRDA_MESSAGE("%s: too much work in interrupt!\n",
1494 __FUNCTION__); 1494 __func__);
1495 return IRQ_RETVAL(handled); 1495 return IRQ_RETVAL(handled);
1496} 1496}
1497 1497
@@ -1504,7 +1504,7 @@ static int vlsi_open(struct net_device *ndev)
1504 char hwname[32]; 1504 char hwname[32];
1505 1505
1506 if (pci_request_regions(idev->pdev, drivername)) { 1506 if (pci_request_regions(idev->pdev, drivername)) {
1507 IRDA_WARNING("%s: io resource busy\n", __FUNCTION__); 1507 IRDA_WARNING("%s: io resource busy\n", __func__);
1508 goto errout; 1508 goto errout;
1509 } 1509 }
1510 ndev->base_addr = pci_resource_start(idev->pdev,0); 1510 ndev->base_addr = pci_resource_start(idev->pdev,0);
@@ -1519,7 +1519,7 @@ static int vlsi_open(struct net_device *ndev)
1519 if (request_irq(ndev->irq, vlsi_interrupt, IRQF_SHARED, 1519 if (request_irq(ndev->irq, vlsi_interrupt, IRQF_SHARED,
1520 drivername, ndev)) { 1520 drivername, ndev)) {
1521 IRDA_WARNING("%s: couldn't get IRQ: %d\n", 1521 IRDA_WARNING("%s: couldn't get IRQ: %d\n",
1522 __FUNCTION__, ndev->irq); 1522 __func__, ndev->irq);
1523 goto errout_io; 1523 goto errout_io;
1524 } 1524 }
1525 1525
@@ -1540,7 +1540,7 @@ static int vlsi_open(struct net_device *ndev)
1540 1540
1541 netif_start_queue(ndev); 1541 netif_start_queue(ndev);
1542 1542
1543 IRDA_MESSAGE("%s: device %s operational\n", __FUNCTION__, ndev->name); 1543 IRDA_MESSAGE("%s: device %s operational\n", __func__, ndev->name);
1544 1544
1545 return 0; 1545 return 0;
1546 1546
@@ -1574,7 +1574,7 @@ static int vlsi_close(struct net_device *ndev)
1574 1574
1575 pci_release_regions(idev->pdev); 1575 pci_release_regions(idev->pdev);
1576 1576
1577 IRDA_MESSAGE("%s: device %s stopped\n", __FUNCTION__, ndev->name); 1577 IRDA_MESSAGE("%s: device %s stopped\n", __func__, ndev->name);
1578 1578
1579 return 0; 1579 return 0;
1580} 1580}
@@ -1593,7 +1593,7 @@ static int vlsi_irda_init(struct net_device *ndev)
1593 1593
1594 if (pci_set_dma_mask(pdev,DMA_MASK_USED_BY_HW) 1594 if (pci_set_dma_mask(pdev,DMA_MASK_USED_BY_HW)
1595 || pci_set_dma_mask(pdev,DMA_MASK_MSTRPAGE)) { 1595 || pci_set_dma_mask(pdev,DMA_MASK_MSTRPAGE)) {
1596 IRDA_ERROR("%s: aborting due to PCI BM-DMA address limitations\n", __FUNCTION__); 1596 IRDA_ERROR("%s: aborting due to PCI BM-DMA address limitations\n", __func__);
1597 return -1; 1597 return -1;
1598 } 1598 }
1599 1599
@@ -1645,14 +1645,14 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1645 1645
1646 if ( !pci_resource_start(pdev,0) 1646 if ( !pci_resource_start(pdev,0)
1647 || !(pci_resource_flags(pdev,0) & IORESOURCE_IO) ) { 1647 || !(pci_resource_flags(pdev,0) & IORESOURCE_IO) ) {
1648 IRDA_ERROR("%s: bar 0 invalid", __FUNCTION__); 1648 IRDA_ERROR("%s: bar 0 invalid", __func__);
1649 goto out_disable; 1649 goto out_disable;
1650 } 1650 }
1651 1651
1652 ndev = alloc_irdadev(sizeof(*idev)); 1652 ndev = alloc_irdadev(sizeof(*idev));
1653 if (ndev==NULL) { 1653 if (ndev==NULL) {
1654 IRDA_ERROR("%s: Unable to allocate device memory.\n", 1654 IRDA_ERROR("%s: Unable to allocate device memory.\n",
1655 __FUNCTION__); 1655 __func__);
1656 goto out_disable; 1656 goto out_disable;
1657 } 1657 }
1658 1658
@@ -1667,7 +1667,7 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1667 goto out_freedev; 1667 goto out_freedev;
1668 1668
1669 if (register_netdev(ndev) < 0) { 1669 if (register_netdev(ndev) < 0) {
1670 IRDA_ERROR("%s: register_netdev failed\n", __FUNCTION__); 1670 IRDA_ERROR("%s: register_netdev failed\n", __func__);
1671 goto out_freedev; 1671 goto out_freedev;
1672 } 1672 }
1673 1673
@@ -1678,7 +1678,7 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1678 vlsi_proc_root, VLSI_PROC_FOPS, ndev); 1678 vlsi_proc_root, VLSI_PROC_FOPS, ndev);
1679 if (!ent) { 1679 if (!ent) {
1680 IRDA_WARNING("%s: failed to create proc entry\n", 1680 IRDA_WARNING("%s: failed to create proc entry\n",
1681 __FUNCTION__); 1681 __func__);
1682 } else { 1682 } else {
1683 ent->size = 0; 1683 ent->size = 0;
1684 } 1684 }
@@ -1745,7 +1745,7 @@ static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state)
1745 1745
1746 if (!ndev) { 1746 if (!ndev) {
1747 IRDA_ERROR("%s - %s: no netdevice \n", 1747 IRDA_ERROR("%s - %s: no netdevice \n",
1748 __FUNCTION__, pci_name(pdev)); 1748 __func__, pci_name(pdev));
1749 return 0; 1749 return 0;
1750 } 1750 }
1751 idev = ndev->priv; 1751 idev = ndev->priv;
@@ -1756,7 +1756,7 @@ static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state)
1756 pdev->current_state = state.event; 1756 pdev->current_state = state.event;
1757 } 1757 }
1758 else 1758 else
1759 IRDA_ERROR("%s - %s: invalid suspend request %u -> %u\n", __FUNCTION__, pci_name(pdev), pdev->current_state, state.event); 1759 IRDA_ERROR("%s - %s: invalid suspend request %u -> %u\n", __func__, pci_name(pdev), pdev->current_state, state.event);
1760 mutex_unlock(&idev->mtx); 1760 mutex_unlock(&idev->mtx);
1761 return 0; 1761 return 0;
1762 } 1762 }
@@ -1784,7 +1784,7 @@ static int vlsi_irda_resume(struct pci_dev *pdev)
1784 1784
1785 if (!ndev) { 1785 if (!ndev) {
1786 IRDA_ERROR("%s - %s: no netdevice \n", 1786 IRDA_ERROR("%s - %s: no netdevice \n",
1787 __FUNCTION__, pci_name(pdev)); 1787 __func__, pci_name(pdev));
1788 return 0; 1788 return 0;
1789 } 1789 }
1790 idev = ndev->priv; 1790 idev = ndev->priv;
@@ -1792,7 +1792,7 @@ static int vlsi_irda_resume(struct pci_dev *pdev)
1792 if (pdev->current_state == 0) { 1792 if (pdev->current_state == 0) {
1793 mutex_unlock(&idev->mtx); 1793 mutex_unlock(&idev->mtx);
1794 IRDA_WARNING("%s - %s: already resumed\n", 1794 IRDA_WARNING("%s - %s: already resumed\n",
1795 __FUNCTION__, pci_name(pdev)); 1795 __func__, pci_name(pdev));
1796 return 0; 1796 return 0;
1797 } 1797 }
1798 1798
@@ -1811,7 +1811,7 @@ static int vlsi_irda_resume(struct pci_dev *pdev)
1811 * now we explicitly set pdev->current_state = 0 after enabling the 1811 * now we explicitly set pdev->current_state = 0 after enabling the
1812 * device and independently resume_ok should catch any garbage config. 1812 * device and independently resume_ok should catch any garbage config.
1813 */ 1813 */
1814 IRDA_WARNING("%s - hm, nothing to resume?\n", __FUNCTION__); 1814 IRDA_WARNING("%s - hm, nothing to resume?\n", __func__);
1815 mutex_unlock(&idev->mtx); 1815 mutex_unlock(&idev->mtx);
1816 return 0; 1816 return 0;
1817 } 1817 }
diff --git a/drivers/net/irda/vlsi_ir.h b/drivers/net/irda/vlsi_ir.h
index c8b9c74eea52..9b1884329fba 100644
--- a/drivers/net/irda/vlsi_ir.h
+++ b/drivers/net/irda/vlsi_ir.h
@@ -617,7 +617,7 @@ static inline void rd_set_addr_status(struct ring_descr *rd, dma_addr_t a, u8 s)
617 */ 617 */
618 618
619 if ((a & ~DMA_MASK_MSTRPAGE)>>24 != MSTRPAGE_VALUE) { 619 if ((a & ~DMA_MASK_MSTRPAGE)>>24 != MSTRPAGE_VALUE) {
620 IRDA_ERROR("%s: pci busaddr inconsistency!\n", __FUNCTION__); 620 IRDA_ERROR("%s: pci busaddr inconsistency!\n", __func__);
621 dump_stack(); 621 dump_stack();
622 return; 622 return;
623 } 623 }
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index 9fd2451b0fb2..002a6d769f21 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -114,7 +114,7 @@ static int __init w83977af_init(void)
114{ 114{
115 int i; 115 int i;
116 116
117 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); 117 IRDA_DEBUG(0, "%s()\n", __func__ );
118 118
119 for (i=0; (io[i] < 2000) && (i < ARRAY_SIZE(dev_self)); i++) { 119 for (i=0; (io[i] < 2000) && (i < ARRAY_SIZE(dev_self)); i++) {
120 if (w83977af_open(i, io[i], irq[i], dma[i]) == 0) 120 if (w83977af_open(i, io[i], irq[i], dma[i]) == 0)
@@ -133,7 +133,7 @@ static void __exit w83977af_cleanup(void)
133{ 133{
134 int i; 134 int i;
135 135
136 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 136 IRDA_DEBUG(4, "%s()\n", __func__ );
137 137
138 for (i=0; i < ARRAY_SIZE(dev_self); i++) { 138 for (i=0; i < ARRAY_SIZE(dev_self); i++) {
139 if (dev_self[i]) 139 if (dev_self[i])
@@ -154,12 +154,12 @@ int w83977af_open(int i, unsigned int iobase, unsigned int irq,
154 struct w83977af_ir *self; 154 struct w83977af_ir *self;
155 int err; 155 int err;
156 156
157 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); 157 IRDA_DEBUG(0, "%s()\n", __func__ );
158 158
159 /* Lock the port that we need */ 159 /* Lock the port that we need */
160 if (!request_region(iobase, CHIP_IO_EXTENT, driver_name)) { 160 if (!request_region(iobase, CHIP_IO_EXTENT, driver_name)) {
161 IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n", 161 IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n",
162 __FUNCTION__ , iobase); 162 __func__ , iobase);
163 return -ENODEV; 163 return -ENODEV;
164 } 164 }
165 165
@@ -241,7 +241,7 @@ int w83977af_open(int i, unsigned int iobase, unsigned int irq,
241 241
242 err = register_netdev(dev); 242 err = register_netdev(dev);
243 if (err) { 243 if (err) {
244 IRDA_ERROR("%s(), register_netdevice() failed!\n", __FUNCTION__); 244 IRDA_ERROR("%s(), register_netdevice() failed!\n", __func__);
245 goto err_out3; 245 goto err_out3;
246 } 246 }
247 IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name); 247 IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name);
@@ -273,7 +273,7 @@ static int w83977af_close(struct w83977af_ir *self)
273{ 273{
274 int iobase; 274 int iobase;
275 275
276 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); 276 IRDA_DEBUG(0, "%s()\n", __func__ );
277 277
278 iobase = self->io.fir_base; 278 iobase = self->io.fir_base;
279 279
@@ -294,7 +294,7 @@ static int w83977af_close(struct w83977af_ir *self)
294 294
295 /* Release the PORT that this driver is using */ 295 /* Release the PORT that this driver is using */
296 IRDA_DEBUG(0 , "%s(), Releasing Region %03x\n", 296 IRDA_DEBUG(0 , "%s(), Releasing Region %03x\n",
297 __FUNCTION__ , self->io.fir_base); 297 __func__ , self->io.fir_base);
298 release_region(self->io.fir_base, self->io.fir_ext); 298 release_region(self->io.fir_base, self->io.fir_ext);
299 299
300 if (self->tx_buff.head) 300 if (self->tx_buff.head)
@@ -316,7 +316,7 @@ int w83977af_probe( int iobase, int irq, int dma)
316 int i; 316 int i;
317 317
318 for (i=0; i < 2; i++) { 318 for (i=0; i < 2; i++) {
319 IRDA_DEBUG( 0, "%s()\n", __FUNCTION__ ); 319 IRDA_DEBUG( 0, "%s()\n", __func__ );
320#ifdef CONFIG_USE_W977_PNP 320#ifdef CONFIG_USE_W977_PNP
321 /* Enter PnP configuration mode */ 321 /* Enter PnP configuration mode */
322 w977_efm_enter(efbase[i]); 322 w977_efm_enter(efbase[i]);
@@ -403,7 +403,7 @@ int w83977af_probe( int iobase, int irq, int dma)
403 return 0; 403 return 0;
404 } else { 404 } else {
405 /* Try next extented function register address */ 405 /* Try next extented function register address */
406 IRDA_DEBUG( 0, "%s(), Wrong chip version", __FUNCTION__ ); 406 IRDA_DEBUG( 0, "%s(), Wrong chip version", __func__ );
407 } 407 }
408 } 408 }
409 return -1; 409 return -1;
@@ -439,19 +439,19 @@ void w83977af_change_speed(struct w83977af_ir *self, __u32 speed)
439 case 115200: outb(0x01, iobase+ABLL); break; 439 case 115200: outb(0x01, iobase+ABLL); break;
440 case 576000: 440 case 576000:
441 ir_mode = HCR_MIR_576; 441 ir_mode = HCR_MIR_576;
442 IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __FUNCTION__ ); 442 IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __func__ );
443 break; 443 break;
444 case 1152000: 444 case 1152000:
445 ir_mode = HCR_MIR_1152; 445 ir_mode = HCR_MIR_1152;
446 IRDA_DEBUG(0, "%s(), handling baud of 1152000\n", __FUNCTION__ ); 446 IRDA_DEBUG(0, "%s(), handling baud of 1152000\n", __func__ );
447 break; 447 break;
448 case 4000000: 448 case 4000000:
449 ir_mode = HCR_FIR; 449 ir_mode = HCR_FIR;
450 IRDA_DEBUG(0, "%s(), handling baud of 4000000\n", __FUNCTION__ ); 450 IRDA_DEBUG(0, "%s(), handling baud of 4000000\n", __func__ );
451 break; 451 break;
452 default: 452 default:
453 ir_mode = HCR_FIR; 453 ir_mode = HCR_FIR;
454 IRDA_DEBUG(0, "%s(), unknown baud rate of %d\n", __FUNCTION__ , speed); 454 IRDA_DEBUG(0, "%s(), unknown baud rate of %d\n", __func__ , speed);
455 break; 455 break;
456 } 456 }
457 457
@@ -501,7 +501,7 @@ int w83977af_hard_xmit(struct sk_buff *skb, struct net_device *dev)
501 501
502 iobase = self->io.fir_base; 502 iobase = self->io.fir_base;
503 503
504 IRDA_DEBUG(4, "%s(%ld), skb->len=%d\n", __FUNCTION__ , jiffies, 504 IRDA_DEBUG(4, "%s(%ld), skb->len=%d\n", __func__ , jiffies,
505 (int) skb->len); 505 (int) skb->len);
506 506
507 /* Lock transmit buffer */ 507 /* Lock transmit buffer */
@@ -549,7 +549,7 @@ int w83977af_hard_xmit(struct sk_buff *skb, struct net_device *dev)
549 outb(ICR_ETMRI, iobase+ICR); 549 outb(ICR_ETMRI, iobase+ICR);
550 } else { 550 } else {
551#endif 551#endif
552 IRDA_DEBUG(4, "%s(%ld), mtt=%d\n", __FUNCTION__ , jiffies, mtt); 552 IRDA_DEBUG(4, "%s(%ld), mtt=%d\n", __func__ , jiffies, mtt);
553 if (mtt) 553 if (mtt)
554 udelay(mtt); 554 udelay(mtt);
555 555
@@ -591,7 +591,7 @@ static void w83977af_dma_write(struct w83977af_ir *self, int iobase)
591 unsigned long flags; 591 unsigned long flags;
592 __u8 hcr; 592 __u8 hcr;
593#endif 593#endif
594 IRDA_DEBUG(4, "%s(), len=%d\n", __FUNCTION__ , self->tx_buff.len); 594 IRDA_DEBUG(4, "%s(), len=%d\n", __func__ , self->tx_buff.len);
595 595
596 /* Save current set */ 596 /* Save current set */
597 set = inb(iobase+SSR); 597 set = inb(iobase+SSR);
@@ -643,7 +643,7 @@ static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
643 int actual = 0; 643 int actual = 0;
644 __u8 set; 644 __u8 set;
645 645
646 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 646 IRDA_DEBUG(4, "%s()\n", __func__ );
647 647
648 /* Save current bank */ 648 /* Save current bank */
649 set = inb(iobase+SSR); 649 set = inb(iobase+SSR);
@@ -651,11 +651,11 @@ static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
651 switch_bank(iobase, SET0); 651 switch_bank(iobase, SET0);
652 if (!(inb_p(iobase+USR) & USR_TSRE)) { 652 if (!(inb_p(iobase+USR) & USR_TSRE)) {
653 IRDA_DEBUG(4, 653 IRDA_DEBUG(4,
654 "%s(), warning, FIFO not empty yet!\n", __FUNCTION__ ); 654 "%s(), warning, FIFO not empty yet!\n", __func__ );
655 655
656 fifo_size -= 17; 656 fifo_size -= 17;
657 IRDA_DEBUG(4, "%s(), %d bytes left in tx fifo\n", 657 IRDA_DEBUG(4, "%s(), %d bytes left in tx fifo\n",
658 __FUNCTION__ , fifo_size); 658 __func__ , fifo_size);
659 } 659 }
660 660
661 /* Fill FIFO with current frame */ 661 /* Fill FIFO with current frame */
@@ -665,7 +665,7 @@ static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
665 } 665 }
666 666
667 IRDA_DEBUG(4, "%s(), fifo_size %d ; %d sent of %d\n", 667 IRDA_DEBUG(4, "%s(), fifo_size %d ; %d sent of %d\n",
668 __FUNCTION__ , fifo_size, actual, len); 668 __func__ , fifo_size, actual, len);
669 669
670 /* Restore bank */ 670 /* Restore bank */
671 outb(set, iobase+SSR); 671 outb(set, iobase+SSR);
@@ -685,7 +685,7 @@ static void w83977af_dma_xmit_complete(struct w83977af_ir *self)
685 int iobase; 685 int iobase;
686 __u8 set; 686 __u8 set;
687 687
688 IRDA_DEBUG(4, "%s(%ld)\n", __FUNCTION__ , jiffies); 688 IRDA_DEBUG(4, "%s(%ld)\n", __func__ , jiffies);
689 689
690 IRDA_ASSERT(self != NULL, return;); 690 IRDA_ASSERT(self != NULL, return;);
691 691
@@ -700,7 +700,7 @@ static void w83977af_dma_xmit_complete(struct w83977af_ir *self)
700 700
701 /* Check for underrrun! */ 701 /* Check for underrrun! */
702 if (inb(iobase+AUDR) & AUDR_UNDR) { 702 if (inb(iobase+AUDR) & AUDR_UNDR) {
703 IRDA_DEBUG(0, "%s(), Transmit underrun!\n", __FUNCTION__ ); 703 IRDA_DEBUG(0, "%s(), Transmit underrun!\n", __func__ );
704 704
705 self->stats.tx_errors++; 705 self->stats.tx_errors++;
706 self->stats.tx_fifo_errors++; 706 self->stats.tx_fifo_errors++;
@@ -741,7 +741,7 @@ int w83977af_dma_receive(struct w83977af_ir *self)
741#endif 741#endif
742 IRDA_ASSERT(self != NULL, return -1;); 742 IRDA_ASSERT(self != NULL, return -1;);
743 743
744 IRDA_DEBUG(4, "%s\n", __FUNCTION__ ); 744 IRDA_DEBUG(4, "%s\n", __func__ );
745 745
746 iobase= self->io.fir_base; 746 iobase= self->io.fir_base;
747 747
@@ -812,7 +812,7 @@ int w83977af_dma_receive_complete(struct w83977af_ir *self)
812 __u8 set; 812 __u8 set;
813 __u8 status; 813 __u8 status;
814 814
815 IRDA_DEBUG(4, "%s\n", __FUNCTION__ ); 815 IRDA_DEBUG(4, "%s\n", __func__ );
816 816
817 st_fifo = &self->st_fifo; 817 st_fifo = &self->st_fifo;
818 818
@@ -892,7 +892,7 @@ int w83977af_dma_receive_complete(struct w83977af_ir *self)
892 skb = dev_alloc_skb(len+1); 892 skb = dev_alloc_skb(len+1);
893 if (skb == NULL) { 893 if (skb == NULL) {
894 printk(KERN_INFO 894 printk(KERN_INFO
895 "%s(), memory squeeze, dropping frame.\n", __FUNCTION__); 895 "%s(), memory squeeze, dropping frame.\n", __func__);
896 /* Restore set register */ 896 /* Restore set register */
897 outb(set, iobase+SSR); 897 outb(set, iobase+SSR);
898 898
@@ -943,7 +943,7 @@ static void w83977af_pio_receive(struct w83977af_ir *self)
943 __u8 byte = 0x00; 943 __u8 byte = 0x00;
944 int iobase; 944 int iobase;
945 945
946 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 946 IRDA_DEBUG(4, "%s()\n", __func__ );
947 947
948 IRDA_ASSERT(self != NULL, return;); 948 IRDA_ASSERT(self != NULL, return;);
949 949
@@ -970,7 +970,7 @@ static __u8 w83977af_sir_interrupt(struct w83977af_ir *self, int isr)
970 __u8 set; 970 __u8 set;
971 int iobase; 971 int iobase;
972 972
973 IRDA_DEBUG(4, "%s(), isr=%#x\n", __FUNCTION__ , isr); 973 IRDA_DEBUG(4, "%s(), isr=%#x\n", __func__ , isr);
974 974
975 iobase = self->io.fir_base; 975 iobase = self->io.fir_base;
976 /* Transmit FIFO low on data */ 976 /* Transmit FIFO low on data */
@@ -1007,7 +1007,7 @@ static __u8 w83977af_sir_interrupt(struct w83977af_ir *self, int isr)
1007 /* Check if we need to change the speed? */ 1007 /* Check if we need to change the speed? */
1008 if (self->new_speed) { 1008 if (self->new_speed) {
1009 IRDA_DEBUG(2, 1009 IRDA_DEBUG(2,
1010 "%s(), Changing speed!\n", __FUNCTION__ ); 1010 "%s(), Changing speed!\n", __func__ );
1011 w83977af_change_speed(self, self->new_speed); 1011 w83977af_change_speed(self, self->new_speed);
1012 self->new_speed = 0; 1012 self->new_speed = 0;
1013 } 1013 }
@@ -1189,7 +1189,7 @@ static int w83977af_net_open(struct net_device *dev)
1189 char hwname[32]; 1189 char hwname[32];
1190 __u8 set; 1190 __u8 set;
1191 1191
1192 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); 1192 IRDA_DEBUG(0, "%s()\n", __func__ );
1193 1193
1194 IRDA_ASSERT(dev != NULL, return -1;); 1194 IRDA_ASSERT(dev != NULL, return -1;);
1195 self = (struct w83977af_ir *) dev->priv; 1195 self = (struct w83977af_ir *) dev->priv;
@@ -1252,7 +1252,7 @@ static int w83977af_net_close(struct net_device *dev)
1252 int iobase; 1252 int iobase;
1253 __u8 set; 1253 __u8 set;
1254 1254
1255 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); 1255 IRDA_DEBUG(0, "%s()\n", __func__ );
1256 1256
1257 IRDA_ASSERT(dev != NULL, return -1;); 1257 IRDA_ASSERT(dev != NULL, return -1;);
1258 1258
@@ -1307,7 +1307,7 @@ static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1307 1307
1308 IRDA_ASSERT(self != NULL, return -1;); 1308 IRDA_ASSERT(self != NULL, return -1;);
1309 1309
1310 IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__ , dev->name, cmd); 1310 IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__ , dev->name, cmd);
1311 1311
1312 spin_lock_irqsave(&self->lock, flags); 1312 spin_lock_irqsave(&self->lock, flags);
1313 1313
diff --git a/drivers/net/ixp2000/ixp2400-msf.c b/drivers/net/ixp2000/ixp2400-msf.c
index 9ec38eebfb56..f5ffd7e05d26 100644
--- a/drivers/net/ixp2000/ixp2400-msf.c
+++ b/drivers/net/ixp2000/ixp2400-msf.c
@@ -13,8 +13,8 @@
13 13
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <asm/hardware.h> 16#include <mach/hardware.h>
17#include <asm/arch/ixp2000-regs.h> 17#include <mach/ixp2000-regs.h>
18#include <asm/delay.h> 18#include <asm/delay.h>
19#include <asm/io.h> 19#include <asm/io.h>
20#include "ixp2400-msf.h" 20#include "ixp2400-msf.h"
diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c
index 7111c65f0b30..7b70c66504a0 100644
--- a/drivers/net/ixp2000/ixpdev.c
+++ b/drivers/net/ixp2000/ixpdev.c
@@ -16,7 +16,6 @@
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/moduleparam.h> 17#include <linux/moduleparam.h>
18#include <asm/hardware/uengine.h> 18#include <asm/hardware/uengine.h>
19#include <asm/mach-types.h>
20#include <asm/io.h> 19#include <asm/io.h>
21#include "ixp2400_rx.ucode" 20#include "ixp2400_rx.ucode"
22#include "ixp2400_tx.ucode" 21#include "ixp2400_tx.ucode"
diff --git a/drivers/net/lp486e.c b/drivers/net/lp486e.c
index 591a7e4220c7..83fa9d82a004 100644
--- a/drivers/net/lp486e.c
+++ b/drivers/net/lp486e.c
@@ -1272,8 +1272,6 @@ static void set_multicast_list(struct net_device *dev) {
1272 return; 1272 return;
1273 } 1273 }
1274 if (dev->mc_count == 0 && !(dev->flags & (IFF_PROMISC | IFF_ALLMULTI))) { 1274 if (dev->mc_count == 0 && !(dev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
1275 if (dev->flags & IFF_ALLMULTI)
1276 dev->flags |= IFF_PROMISC;
1277 lp->i596_config[8] &= ~0x01; 1275 lp->i596_config[8] &= ~0x01;
1278 } else { 1276 } else {
1279 lp->i596_config[8] |= 0x01; 1277 lp->i596_config[8] |= 0x01;
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index daba82bbcb56..84c77f1f9a5c 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -21,8 +21,8 @@
21#include <linux/platform_device.h> 21#include <linux/platform_device.h>
22#include <linux/phy.h> 22#include <linux/phy.h>
23 23
24#include <asm/arch/board.h> 24#include <mach/board.h>
25#include <asm/arch/cpu.h> 25#include <mach/cpu.h>
26 26
27#include "macb.h" 27#include "macb.h"
28 28
diff --git a/drivers/net/meth.c b/drivers/net/meth.c
index 4cb364e67dc6..0a97c26df6ab 100644
--- a/drivers/net/meth.c
+++ b/drivers/net/meth.c
@@ -100,7 +100,7 @@ static inline void load_eaddr(struct net_device *dev)
100 DPRINTK("Loading MAC Address: %s\n", print_mac(mac, dev->dev_addr)); 100 DPRINTK("Loading MAC Address: %s\n", print_mac(mac, dev->dev_addr));
101 macaddr = 0; 101 macaddr = 0;
102 for (i = 0; i < 6; i++) 102 for (i = 0; i < 6; i++)
103 macaddr |= dev->dev_addr[i] << ((5 - i) * 8); 103 macaddr |= (u64)dev->dev_addr[i] << ((5 - i) * 8);
104 104
105 mace->eth.mac_addr = macaddr; 105 mace->eth.mac_addr = macaddr;
106} 106}
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 8a97a0066a88..46819af3b062 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -55,7 +55,7 @@
55#include <asm/system.h> 55#include <asm/system.h>
56 56
57static char mv643xx_eth_driver_name[] = "mv643xx_eth"; 57static char mv643xx_eth_driver_name[] = "mv643xx_eth";
58static char mv643xx_eth_driver_version[] = "1.1"; 58static char mv643xx_eth_driver_version[] = "1.2";
59 59
60#define MV643XX_ETH_CHECKSUM_OFFLOAD_TX 60#define MV643XX_ETH_CHECKSUM_OFFLOAD_TX
61#define MV643XX_ETH_NAPI 61#define MV643XX_ETH_NAPI
@@ -90,12 +90,21 @@ static char mv643xx_eth_driver_version[] = "1.1";
90#define PORT_SERIAL_CONTROL(p) (0x043c + ((p) << 10)) 90#define PORT_SERIAL_CONTROL(p) (0x043c + ((p) << 10))
91#define PORT_STATUS(p) (0x0444 + ((p) << 10)) 91#define PORT_STATUS(p) (0x0444 + ((p) << 10))
92#define TX_FIFO_EMPTY 0x00000400 92#define TX_FIFO_EMPTY 0x00000400
93#define TX_IN_PROGRESS 0x00000080
94#define PORT_SPEED_MASK 0x00000030
95#define PORT_SPEED_1000 0x00000010
96#define PORT_SPEED_100 0x00000020
97#define PORT_SPEED_10 0x00000000
98#define FLOW_CONTROL_ENABLED 0x00000008
99#define FULL_DUPLEX 0x00000004
100#define LINK_UP 0x00000002
93#define TXQ_COMMAND(p) (0x0448 + ((p) << 10)) 101#define TXQ_COMMAND(p) (0x0448 + ((p) << 10))
94#define TXQ_FIX_PRIO_CONF(p) (0x044c + ((p) << 10)) 102#define TXQ_FIX_PRIO_CONF(p) (0x044c + ((p) << 10))
95#define TX_BW_RATE(p) (0x0450 + ((p) << 10)) 103#define TX_BW_RATE(p) (0x0450 + ((p) << 10))
96#define TX_BW_MTU(p) (0x0458 + ((p) << 10)) 104#define TX_BW_MTU(p) (0x0458 + ((p) << 10))
97#define TX_BW_BURST(p) (0x045c + ((p) << 10)) 105#define TX_BW_BURST(p) (0x045c + ((p) << 10))
98#define INT_CAUSE(p) (0x0460 + ((p) << 10)) 106#define INT_CAUSE(p) (0x0460 + ((p) << 10))
107#define INT_TX_END_0 0x00080000
99#define INT_TX_END 0x07f80000 108#define INT_TX_END 0x07f80000
100#define INT_RX 0x0007fbfc 109#define INT_RX 0x0007fbfc
101#define INT_EXT 0x00000002 110#define INT_EXT 0x00000002
@@ -127,21 +136,21 @@ static char mv643xx_eth_driver_version[] = "1.1";
127/* 136/*
128 * SDMA configuration register. 137 * SDMA configuration register.
129 */ 138 */
130#define RX_BURST_SIZE_4_64BIT (2 << 1) 139#define RX_BURST_SIZE_16_64BIT (4 << 1)
131#define BLM_RX_NO_SWAP (1 << 4) 140#define BLM_RX_NO_SWAP (1 << 4)
132#define BLM_TX_NO_SWAP (1 << 5) 141#define BLM_TX_NO_SWAP (1 << 5)
133#define TX_BURST_SIZE_4_64BIT (2 << 22) 142#define TX_BURST_SIZE_16_64BIT (4 << 22)
134 143
135#if defined(__BIG_ENDIAN) 144#if defined(__BIG_ENDIAN)
136#define PORT_SDMA_CONFIG_DEFAULT_VALUE \ 145#define PORT_SDMA_CONFIG_DEFAULT_VALUE \
137 RX_BURST_SIZE_4_64BIT | \ 146 RX_BURST_SIZE_16_64BIT | \
138 TX_BURST_SIZE_4_64BIT 147 TX_BURST_SIZE_16_64BIT
139#elif defined(__LITTLE_ENDIAN) 148#elif defined(__LITTLE_ENDIAN)
140#define PORT_SDMA_CONFIG_DEFAULT_VALUE \ 149#define PORT_SDMA_CONFIG_DEFAULT_VALUE \
141 RX_BURST_SIZE_4_64BIT | \ 150 RX_BURST_SIZE_16_64BIT | \
142 BLM_RX_NO_SWAP | \ 151 BLM_RX_NO_SWAP | \
143 BLM_TX_NO_SWAP | \ 152 BLM_TX_NO_SWAP | \
144 TX_BURST_SIZE_4_64BIT 153 TX_BURST_SIZE_16_64BIT
145#else 154#else
146#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined 155#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
147#endif 156#endif
@@ -153,9 +162,7 @@ static char mv643xx_eth_driver_version[] = "1.1";
153#define SET_MII_SPEED_TO_100 (1 << 24) 162#define SET_MII_SPEED_TO_100 (1 << 24)
154#define SET_GMII_SPEED_TO_1000 (1 << 23) 163#define SET_GMII_SPEED_TO_1000 (1 << 23)
155#define SET_FULL_DUPLEX_MODE (1 << 21) 164#define SET_FULL_DUPLEX_MODE (1 << 21)
156#define MAX_RX_PACKET_1522BYTE (1 << 17)
157#define MAX_RX_PACKET_9700BYTE (5 << 17) 165#define MAX_RX_PACKET_9700BYTE (5 << 17)
158#define MAX_RX_PACKET_MASK (7 << 17)
159#define DISABLE_AUTO_NEG_SPEED_GMII (1 << 13) 166#define DISABLE_AUTO_NEG_SPEED_GMII (1 << 13)
160#define DO_NOT_FORCE_LINK_FAIL (1 << 10) 167#define DO_NOT_FORCE_LINK_FAIL (1 << 10)
161#define SERIAL_PORT_CONTROL_RESERVED (1 << 9) 168#define SERIAL_PORT_CONTROL_RESERVED (1 << 9)
@@ -228,6 +235,8 @@ struct tx_desc {
228#define GEN_IP_V4_CHECKSUM 0x00040000 235#define GEN_IP_V4_CHECKSUM 0x00040000
229#define GEN_TCP_UDP_CHECKSUM 0x00020000 236#define GEN_TCP_UDP_CHECKSUM 0x00020000
230#define UDP_FRAME 0x00010000 237#define UDP_FRAME 0x00010000
238#define MAC_HDR_EXTRA_4_BYTES 0x00008000
239#define MAC_HDR_EXTRA_8_BYTES 0x00000200
231 240
232#define TX_IHL_SHIFT 11 241#define TX_IHL_SHIFT 11
233 242
@@ -404,6 +413,17 @@ static void rxq_disable(struct rx_queue *rxq)
404 udelay(10); 413 udelay(10);
405} 414}
406 415
416static void txq_reset_hw_ptr(struct tx_queue *txq)
417{
418 struct mv643xx_eth_private *mp = txq_to_mp(txq);
419 int off = TXQ_CURRENT_DESC_PTR(mp->port_num, txq->index);
420 u32 addr;
421
422 addr = (u32)txq->tx_desc_dma;
423 addr += txq->tx_curr_desc * sizeof(struct tx_desc);
424 wrl(mp, off, addr);
425}
426
407static void txq_enable(struct tx_queue *txq) 427static void txq_enable(struct tx_queue *txq)
408{ 428{
409 struct mv643xx_eth_private *mp = txq_to_mp(txq); 429 struct mv643xx_eth_private *mp = txq_to_mp(txq);
@@ -614,6 +634,12 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
614 for (i = 0; i < 8; i++) 634 for (i = 0; i < 8; i++)
615 if (mp->txq_mask & (1 << i)) 635 if (mp->txq_mask & (1 << i))
616 txq_reclaim(mp->txq + i, 0); 636 txq_reclaim(mp->txq + i, 0);
637
638 if (netif_carrier_ok(mp->dev)) {
639 spin_lock(&mp->lock);
640 __txq_maybe_wake(mp->txq + mp->txq_primary);
641 spin_unlock(&mp->lock);
642 }
617 } 643 }
618#endif 644#endif
619 645
@@ -706,6 +732,7 @@ static inline __be16 sum16_as_be(__sum16 sum)
706 732
707static void txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb) 733static void txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
708{ 734{
735 struct mv643xx_eth_private *mp = txq_to_mp(txq);
709 int nr_frags = skb_shinfo(skb)->nr_frags; 736 int nr_frags = skb_shinfo(skb)->nr_frags;
710 int tx_index; 737 int tx_index;
711 struct tx_desc *desc; 738 struct tx_desc *desc;
@@ -732,12 +759,36 @@ static void txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
732 desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE); 759 desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
733 760
734 if (skb->ip_summed == CHECKSUM_PARTIAL) { 761 if (skb->ip_summed == CHECKSUM_PARTIAL) {
735 BUG_ON(skb->protocol != htons(ETH_P_IP)); 762 int mac_hdr_len;
763
764 BUG_ON(skb->protocol != htons(ETH_P_IP) &&
765 skb->protocol != htons(ETH_P_8021Q));
736 766
737 cmd_sts |= GEN_TCP_UDP_CHECKSUM | 767 cmd_sts |= GEN_TCP_UDP_CHECKSUM |
738 GEN_IP_V4_CHECKSUM | 768 GEN_IP_V4_CHECKSUM |
739 ip_hdr(skb)->ihl << TX_IHL_SHIFT; 769 ip_hdr(skb)->ihl << TX_IHL_SHIFT;
740 770
771 mac_hdr_len = (void *)ip_hdr(skb) - (void *)skb->data;
772 switch (mac_hdr_len - ETH_HLEN) {
773 case 0:
774 break;
775 case 4:
776 cmd_sts |= MAC_HDR_EXTRA_4_BYTES;
777 break;
778 case 8:
779 cmd_sts |= MAC_HDR_EXTRA_8_BYTES;
780 break;
781 case 12:
782 cmd_sts |= MAC_HDR_EXTRA_4_BYTES;
783 cmd_sts |= MAC_HDR_EXTRA_8_BYTES;
784 break;
785 default:
786 if (net_ratelimit())
787 dev_printk(KERN_ERR, &txq_to_mp(txq)->dev->dev,
788 "mac header length is %d?!\n", mac_hdr_len);
789 break;
790 }
791
741 switch (ip_hdr(skb)->protocol) { 792 switch (ip_hdr(skb)->protocol) {
742 case IPPROTO_UDP: 793 case IPPROTO_UDP:
743 cmd_sts |= UDP_FRAME; 794 cmd_sts |= UDP_FRAME;
@@ -759,6 +810,10 @@ static void txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
759 wmb(); 810 wmb();
760 desc->cmd_sts = cmd_sts; 811 desc->cmd_sts = cmd_sts;
761 812
813 /* clear TX_END interrupt status */
814 wrl(mp, INT_CAUSE(mp->port_num), ~(INT_TX_END_0 << txq->index));
815 rdl(mp, INT_CAUSE(mp->port_num));
816
762 /* ensure all descriptors are written before poking hardware */ 817 /* ensure all descriptors are written before poking hardware */
763 wmb(); 818 wmb();
764 txq_enable(txq); 819 txq_enable(txq);
@@ -1112,10 +1167,28 @@ static int mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *
1112 1167
1113static int mv643xx_eth_get_settings_phyless(struct net_device *dev, struct ethtool_cmd *cmd) 1168static int mv643xx_eth_get_settings_phyless(struct net_device *dev, struct ethtool_cmd *cmd)
1114{ 1169{
1170 struct mv643xx_eth_private *mp = netdev_priv(dev);
1171 u32 port_status;
1172
1173 port_status = rdl(mp, PORT_STATUS(mp->port_num));
1174
1115 cmd->supported = SUPPORTED_MII; 1175 cmd->supported = SUPPORTED_MII;
1116 cmd->advertising = ADVERTISED_MII; 1176 cmd->advertising = ADVERTISED_MII;
1117 cmd->speed = SPEED_1000; 1177 switch (port_status & PORT_SPEED_MASK) {
1118 cmd->duplex = DUPLEX_FULL; 1178 case PORT_SPEED_10:
1179 cmd->speed = SPEED_10;
1180 break;
1181 case PORT_SPEED_100:
1182 cmd->speed = SPEED_100;
1183 break;
1184 case PORT_SPEED_1000:
1185 cmd->speed = SPEED_1000;
1186 break;
1187 default:
1188 cmd->speed = -1;
1189 break;
1190 }
1191 cmd->duplex = (port_status & FULL_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF;
1119 cmd->port = PORT_MII; 1192 cmd->port = PORT_MII;
1120 cmd->phy_address = 0; 1193 cmd->phy_address = 0;
1121 cmd->transceiver = XCVR_INTERNAL; 1194 cmd->transceiver = XCVR_INTERNAL;
@@ -1539,8 +1612,11 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
1539 1612
1540 tx_desc = (struct tx_desc *)txq->tx_desc_area; 1613 tx_desc = (struct tx_desc *)txq->tx_desc_area;
1541 for (i = 0; i < txq->tx_ring_size; i++) { 1614 for (i = 0; i < txq->tx_ring_size; i++) {
1615 struct tx_desc *txd = tx_desc + i;
1542 int nexti = (i + 1) % txq->tx_ring_size; 1616 int nexti = (i + 1) % txq->tx_ring_size;
1543 tx_desc[i].next_desc_ptr = txq->tx_desc_dma + 1617
1618 txd->cmd_sts = 0;
1619 txd->next_desc_ptr = txq->tx_desc_dma +
1544 nexti * sizeof(struct tx_desc); 1620 nexti * sizeof(struct tx_desc);
1545 } 1621 }
1546 1622
@@ -1577,8 +1653,11 @@ static void txq_reclaim(struct tx_queue *txq, int force)
1577 desc = &txq->tx_desc_area[tx_index]; 1653 desc = &txq->tx_desc_area[tx_index];
1578 cmd_sts = desc->cmd_sts; 1654 cmd_sts = desc->cmd_sts;
1579 1655
1580 if (!force && (cmd_sts & BUFFER_OWNED_BY_DMA)) 1656 if (cmd_sts & BUFFER_OWNED_BY_DMA) {
1581 break; 1657 if (!force)
1658 break;
1659 desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA;
1660 }
1582 1661
1583 txq->tx_used_desc = (tx_index + 1) % txq->tx_ring_size; 1662 txq->tx_used_desc = (tx_index + 1) % txq->tx_ring_size;
1584 txq->tx_desc_count--; 1663 txq->tx_desc_count--;
@@ -1632,49 +1711,61 @@ static void txq_deinit(struct tx_queue *txq)
1632 1711
1633 1712
1634/* netdev ops and related ***************************************************/ 1713/* netdev ops and related ***************************************************/
1635static void update_pscr(struct mv643xx_eth_private *mp, int speed, int duplex) 1714static void handle_link_event(struct mv643xx_eth_private *mp)
1636{ 1715{
1637 u32 pscr_o; 1716 struct net_device *dev = mp->dev;
1638 u32 pscr_n; 1717 u32 port_status;
1639 1718 int speed;
1640 pscr_o = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num)); 1719 int duplex;
1720 int fc;
1721
1722 port_status = rdl(mp, PORT_STATUS(mp->port_num));
1723 if (!(port_status & LINK_UP)) {
1724 if (netif_carrier_ok(dev)) {
1725 int i;
1641 1726
1642 /* clear speed, duplex and rx buffer size fields */ 1727 printk(KERN_INFO "%s: link down\n", dev->name);
1643 pscr_n = pscr_o & ~(SET_MII_SPEED_TO_100 |
1644 SET_GMII_SPEED_TO_1000 |
1645 SET_FULL_DUPLEX_MODE |
1646 MAX_RX_PACKET_MASK);
1647 1728
1648 if (speed == SPEED_1000) { 1729 netif_carrier_off(dev);
1649 pscr_n |= SET_GMII_SPEED_TO_1000 | MAX_RX_PACKET_9700BYTE; 1730 netif_stop_queue(dev);
1650 } else {
1651 if (speed == SPEED_100)
1652 pscr_n |= SET_MII_SPEED_TO_100;
1653 pscr_n |= MAX_RX_PACKET_1522BYTE;
1654 }
1655 1731
1656 if (duplex == DUPLEX_FULL) 1732 for (i = 0; i < 8; i++) {
1657 pscr_n |= SET_FULL_DUPLEX_MODE; 1733 struct tx_queue *txq = mp->txq + i;
1658 1734
1659 if (pscr_n != pscr_o) { 1735 if (mp->txq_mask & (1 << i)) {
1660 if ((pscr_o & SERIAL_PORT_ENABLE) == 0) 1736 txq_reclaim(txq, 1);
1661 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_n); 1737 txq_reset_hw_ptr(txq);
1662 else { 1738 }
1663 int i; 1739 }
1740 }
1741 return;
1742 }
1664 1743
1665 for (i = 0; i < 8; i++) 1744 switch (port_status & PORT_SPEED_MASK) {
1666 if (mp->txq_mask & (1 << i)) 1745 case PORT_SPEED_10:
1667 txq_disable(mp->txq + i); 1746 speed = 10;
1747 break;
1748 case PORT_SPEED_100:
1749 speed = 100;
1750 break;
1751 case PORT_SPEED_1000:
1752 speed = 1000;
1753 break;
1754 default:
1755 speed = -1;
1756 break;
1757 }
1758 duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
1759 fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;
1668 1760
1669 pscr_o &= ~SERIAL_PORT_ENABLE; 1761 printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, "
1670 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_o); 1762 "flow control %sabled\n", dev->name,
1671 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_n); 1763 speed, duplex ? "full" : "half",
1672 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_n); 1764 fc ? "en" : "dis");
1673 1765
1674 for (i = 0; i < 8; i++) 1766 if (!netif_carrier_ok(dev)) {
1675 if (mp->txq_mask & (1 << i)) 1767 netif_carrier_on(dev);
1676 txq_enable(mp->txq + i); 1768 netif_wake_queue(dev);
1677 }
1678 } 1769 }
1679} 1770}
1680 1771
@@ -1684,7 +1775,6 @@ static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
1684 struct mv643xx_eth_private *mp = netdev_priv(dev); 1775 struct mv643xx_eth_private *mp = netdev_priv(dev);
1685 u32 int_cause; 1776 u32 int_cause;
1686 u32 int_cause_ext; 1777 u32 int_cause_ext;
1687 u32 txq_active;
1688 1778
1689 int_cause = rdl(mp, INT_CAUSE(mp->port_num)) & 1779 int_cause = rdl(mp, INT_CAUSE(mp->port_num)) &
1690 (INT_TX_END | INT_RX | INT_EXT); 1780 (INT_TX_END | INT_RX | INT_EXT);
@@ -1698,30 +1788,8 @@ static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
1698 wrl(mp, INT_CAUSE_EXT(mp->port_num), ~int_cause_ext); 1788 wrl(mp, INT_CAUSE_EXT(mp->port_num), ~int_cause_ext);
1699 } 1789 }
1700 1790
1701 if (int_cause_ext & (INT_EXT_PHY | INT_EXT_LINK)) { 1791 if (int_cause_ext & (INT_EXT_PHY | INT_EXT_LINK))
1702 if (mp->phy_addr == -1 || mii_link_ok(&mp->mii)) { 1792 handle_link_event(mp);
1703 int i;
1704
1705 if (mp->phy_addr != -1) {
1706 struct ethtool_cmd cmd;
1707
1708 mii_ethtool_gset(&mp->mii, &cmd);
1709 update_pscr(mp, cmd.speed, cmd.duplex);
1710 }
1711
1712 for (i = 0; i < 8; i++)
1713 if (mp->txq_mask & (1 << i))
1714 txq_enable(mp->txq + i);
1715
1716 if (!netif_carrier_ok(dev)) {
1717 netif_carrier_on(dev);
1718 __txq_maybe_wake(mp->txq + mp->txq_primary);
1719 }
1720 } else if (netif_carrier_ok(dev)) {
1721 netif_stop_queue(dev);
1722 netif_carrier_off(dev);
1723 }
1724 }
1725 1793
1726 /* 1794 /*
1727 * RxBuffer or RxError set for any of the 8 queues? 1795 * RxBuffer or RxError set for any of the 8 queues?
@@ -1743,8 +1811,6 @@ static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
1743 } 1811 }
1744#endif 1812#endif
1745 1813
1746 txq_active = rdl(mp, TXQ_COMMAND(mp->port_num));
1747
1748 /* 1814 /*
1749 * TxBuffer or TxError set for any of the 8 queues? 1815 * TxBuffer or TxError set for any of the 8 queues?
1750 */ 1816 */
@@ -1754,6 +1820,16 @@ static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
1754 for (i = 0; i < 8; i++) 1820 for (i = 0; i < 8; i++)
1755 if (mp->txq_mask & (1 << i)) 1821 if (mp->txq_mask & (1 << i))
1756 txq_reclaim(mp->txq + i, 0); 1822 txq_reclaim(mp->txq + i, 0);
1823
1824 /*
1825 * Enough space again in the primary TX queue for a
1826 * full packet?
1827 */
1828 if (netif_carrier_ok(dev)) {
1829 spin_lock(&mp->lock);
1830 __txq_maybe_wake(mp->txq + mp->txq_primary);
1831 spin_unlock(&mp->lock);
1832 }
1757 } 1833 }
1758 1834
1759 /* 1835 /*
@@ -1763,19 +1839,25 @@ static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
1763 int i; 1839 int i;
1764 1840
1765 wrl(mp, INT_CAUSE(mp->port_num), ~(int_cause & INT_TX_END)); 1841 wrl(mp, INT_CAUSE(mp->port_num), ~(int_cause & INT_TX_END));
1842
1843 spin_lock(&mp->lock);
1766 for (i = 0; i < 8; i++) { 1844 for (i = 0; i < 8; i++) {
1767 struct tx_queue *txq = mp->txq + i; 1845 struct tx_queue *txq = mp->txq + i;
1768 if (txq->tx_desc_count && !((txq_active >> i) & 1)) 1846 u32 hw_desc_ptr;
1847 u32 expected_ptr;
1848
1849 if ((int_cause & (INT_TX_END_0 << i)) == 0)
1850 continue;
1851
1852 hw_desc_ptr =
1853 rdl(mp, TXQ_CURRENT_DESC_PTR(mp->port_num, i));
1854 expected_ptr = (u32)txq->tx_desc_dma +
1855 txq->tx_curr_desc * sizeof(struct tx_desc);
1856
1857 if (hw_desc_ptr != expected_ptr)
1769 txq_enable(txq); 1858 txq_enable(txq);
1770 } 1859 }
1771 } 1860 spin_unlock(&mp->lock);
1772
1773 /*
1774 * Enough space again in the primary TX queue for a full packet?
1775 */
1776 if (int_cause_ext & INT_EXT_TX) {
1777 struct tx_queue *txq = mp->txq + mp->txq_primary;
1778 __txq_maybe_wake(txq);
1779 } 1861 }
1780 1862
1781 return IRQ_HANDLED; 1863 return IRQ_HANDLED;
@@ -1785,14 +1867,14 @@ static void phy_reset(struct mv643xx_eth_private *mp)
1785{ 1867{
1786 unsigned int data; 1868 unsigned int data;
1787 1869
1788 smi_reg_read(mp, mp->phy_addr, 0, &data); 1870 smi_reg_read(mp, mp->phy_addr, MII_BMCR, &data);
1789 data |= 0x8000; 1871 data |= BMCR_RESET;
1790 smi_reg_write(mp, mp->phy_addr, 0, data); 1872 smi_reg_write(mp, mp->phy_addr, MII_BMCR, data);
1791 1873
1792 do { 1874 do {
1793 udelay(1); 1875 udelay(1);
1794 smi_reg_read(mp, mp->phy_addr, 0, &data); 1876 smi_reg_read(mp, mp->phy_addr, MII_BMCR, &data);
1795 } while (data & 0x8000); 1877 } while (data & BMCR_RESET);
1796} 1878}
1797 1879
1798static void port_start(struct mv643xx_eth_private *mp) 1880static void port_start(struct mv643xx_eth_private *mp)
@@ -1801,23 +1883,6 @@ static void port_start(struct mv643xx_eth_private *mp)
1801 int i; 1883 int i;
1802 1884
1803 /* 1885 /*
1804 * Configure basic link parameters.
1805 */
1806 pscr = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num));
1807 pscr &= ~(SERIAL_PORT_ENABLE | FORCE_LINK_PASS);
1808 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
1809 pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL |
1810 DISABLE_AUTO_NEG_SPEED_GMII |
1811 DISABLE_AUTO_NEG_FOR_DUPLEX |
1812 DO_NOT_FORCE_LINK_FAIL |
1813 SERIAL_PORT_CONTROL_RESERVED;
1814 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
1815 pscr |= SERIAL_PORT_ENABLE;
1816 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
1817
1818 wrl(mp, SDMA_CONFIG(mp->port_num), PORT_SDMA_CONFIG_DEFAULT_VALUE);
1819
1820 /*
1821 * Perform PHY reset, if there is a PHY. 1886 * Perform PHY reset, if there is a PHY.
1822 */ 1887 */
1823 if (mp->phy_addr != -1) { 1888 if (mp->phy_addr != -1) {
@@ -1829,21 +1894,31 @@ static void port_start(struct mv643xx_eth_private *mp)
1829 } 1894 }
1830 1895
1831 /* 1896 /*
1897 * Configure basic link parameters.
1898 */
1899 pscr = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num));
1900
1901 pscr |= SERIAL_PORT_ENABLE;
1902 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
1903
1904 pscr |= DO_NOT_FORCE_LINK_FAIL;
1905 if (mp->phy_addr == -1)
1906 pscr |= FORCE_LINK_PASS;
1907 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
1908
1909 wrl(mp, SDMA_CONFIG(mp->port_num), PORT_SDMA_CONFIG_DEFAULT_VALUE);
1910
1911 /*
1832 * Configure TX path and queues. 1912 * Configure TX path and queues.
1833 */ 1913 */
1834 tx_set_rate(mp, 1000000000, 16777216); 1914 tx_set_rate(mp, 1000000000, 16777216);
1835 for (i = 0; i < 8; i++) { 1915 for (i = 0; i < 8; i++) {
1836 struct tx_queue *txq = mp->txq + i; 1916 struct tx_queue *txq = mp->txq + i;
1837 int off = TXQ_CURRENT_DESC_PTR(mp->port_num, i);
1838 u32 addr;
1839 1917
1840 if ((mp->txq_mask & (1 << i)) == 0) 1918 if ((mp->txq_mask & (1 << i)) == 0)
1841 continue; 1919 continue;
1842 1920
1843 addr = (u32)txq->tx_desc_dma; 1921 txq_reset_hw_ptr(txq);
1844 addr += txq->tx_curr_desc * sizeof(struct tx_desc);
1845 wrl(mp, off, addr);
1846
1847 txq_set_rate(txq, 1000000000, 16777216); 1922 txq_set_rate(txq, 1000000000, 16777216);
1848 txq_set_fixed_prio_mode(txq); 1923 txq_set_fixed_prio_mode(txq);
1849 } 1924 }
@@ -1965,6 +2040,9 @@ static int mv643xx_eth_open(struct net_device *dev)
1965 napi_enable(&mp->napi); 2040 napi_enable(&mp->napi);
1966#endif 2041#endif
1967 2042
2043 netif_carrier_off(dev);
2044 netif_stop_queue(dev);
2045
1968 port_start(mp); 2046 port_start(mp);
1969 2047
1970 set_rx_coal(mp, 0); 2048 set_rx_coal(mp, 0);
@@ -1999,8 +2077,14 @@ static void port_reset(struct mv643xx_eth_private *mp)
1999 if (mp->txq_mask & (1 << i)) 2077 if (mp->txq_mask & (1 << i))
2000 txq_disable(mp->txq + i); 2078 txq_disable(mp->txq + i);
2001 } 2079 }
2002 while (!(rdl(mp, PORT_STATUS(mp->port_num)) & TX_FIFO_EMPTY)) 2080
2081 while (1) {
2082 u32 ps = rdl(mp, PORT_STATUS(mp->port_num));
2083
2084 if ((ps & (TX_IN_PROGRESS | TX_FIFO_EMPTY)) == TX_FIFO_EMPTY)
2085 break;
2003 udelay(10); 2086 udelay(10);
2087 }
2004 2088
2005 /* Reset the Enable bit in the Configuration Register */ 2089 /* Reset the Enable bit in the Configuration Register */
2006 data = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num)); 2090 data = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num));
@@ -2202,7 +2286,8 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
2202 int ret; 2286 int ret;
2203 2287
2204 if (!mv643xx_eth_version_printed++) 2288 if (!mv643xx_eth_version_printed++)
2205 printk(KERN_NOTICE "MV-643xx 10/100/1000 Ethernet Driver\n"); 2289 printk(KERN_NOTICE "MV-643xx 10/100/1000 ethernet "
2290 "driver version %s\n", mv643xx_eth_driver_version);
2206 2291
2207 ret = -EINVAL; 2292 ret = -EINVAL;
2208 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2293 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2338,14 +2423,14 @@ static int phy_detect(struct mv643xx_eth_private *mp)
2338 unsigned int data; 2423 unsigned int data;
2339 unsigned int data2; 2424 unsigned int data2;
2340 2425
2341 smi_reg_read(mp, mp->phy_addr, 0, &data); 2426 smi_reg_read(mp, mp->phy_addr, MII_BMCR, &data);
2342 smi_reg_write(mp, mp->phy_addr, 0, data ^ 0x1000); 2427 smi_reg_write(mp, mp->phy_addr, MII_BMCR, data ^ BMCR_ANENABLE);
2343 2428
2344 smi_reg_read(mp, mp->phy_addr, 0, &data2); 2429 smi_reg_read(mp, mp->phy_addr, MII_BMCR, &data2);
2345 if (((data ^ data2) & 0x1000) == 0) 2430 if (((data ^ data2) & BMCR_ANENABLE) == 0)
2346 return -ENODEV; 2431 return -ENODEV;
2347 2432
2348 smi_reg_write(mp, mp->phy_addr, 0, data); 2433 smi_reg_write(mp, mp->phy_addr, MII_BMCR, data);
2349 2434
2350 return 0; 2435 return 0;
2351} 2436}
@@ -2393,12 +2478,39 @@ static int phy_init(struct mv643xx_eth_private *mp,
2393 cmd.duplex = pd->duplex; 2478 cmd.duplex = pd->duplex;
2394 } 2479 }
2395 2480
2396 update_pscr(mp, cmd.speed, cmd.duplex);
2397 mv643xx_eth_set_settings(mp->dev, &cmd); 2481 mv643xx_eth_set_settings(mp->dev, &cmd);
2398 2482
2399 return 0; 2483 return 0;
2400} 2484}
2401 2485
2486static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex)
2487{
2488 u32 pscr;
2489
2490 pscr = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num));
2491 if (pscr & SERIAL_PORT_ENABLE) {
2492 pscr &= ~SERIAL_PORT_ENABLE;
2493 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
2494 }
2495
2496 pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED;
2497 if (mp->phy_addr == -1) {
2498 pscr |= DISABLE_AUTO_NEG_SPEED_GMII;
2499 if (speed == SPEED_1000)
2500 pscr |= SET_GMII_SPEED_TO_1000;
2501 else if (speed == SPEED_100)
2502 pscr |= SET_MII_SPEED_TO_100;
2503
2504 pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL;
2505
2506 pscr |= DISABLE_AUTO_NEG_FOR_DUPLEX;
2507 if (duplex == DUPLEX_FULL)
2508 pscr |= SET_FULL_DUPLEX_MODE;
2509 }
2510
2511 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
2512}
2513
2402static int mv643xx_eth_probe(struct platform_device *pdev) 2514static int mv643xx_eth_probe(struct platform_device *pdev)
2403{ 2515{
2404 struct mv643xx_eth_platform_data *pd; 2516 struct mv643xx_eth_platform_data *pd;
@@ -2452,6 +2564,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2452 } else { 2564 } else {
2453 SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops_phyless); 2565 SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops_phyless);
2454 } 2566 }
2567 init_pscr(mp, pd->speed, pd->duplex);
2455 2568
2456 2569
2457 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 2570 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
@@ -2478,6 +2591,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2478 * have to map the buffers to ISA memory which is only 16 MB 2591 * have to map the buffers to ISA memory which is only 16 MB
2479 */ 2592 */
2480 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM; 2593 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
2594 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM;
2481#endif 2595#endif
2482 2596
2483 SET_NETDEV_DEV(dev, &pdev->dev); 2597 SET_NETDEV_DEV(dev, &pdev->dev);
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 3ab0e5289f7a..f1de38f8b742 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -3699,6 +3699,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3699 dev_err(&pdev->dev, "Error %d setting DMA mask\n", status); 3699 dev_err(&pdev->dev, "Error %d setting DMA mask\n", status);
3700 goto abort_with_netdev; 3700 goto abort_with_netdev;
3701 } 3701 }
3702 (void)pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3702 mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd), 3703 mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd),
3703 &mgp->cmd_bus, GFP_KERNEL); 3704 &mgp->cmd_bus, GFP_KERNEL);
3704 if (mgp->cmd == NULL) 3705 if (mgp->cmd == NULL)
diff --git a/drivers/net/myri10ge/myri10ge_mcp.h b/drivers/net/myri10ge/myri10ge_mcp.h
index fdbeeee07372..993721090777 100644
--- a/drivers/net/myri10ge/myri10ge_mcp.h
+++ b/drivers/net/myri10ge/myri10ge_mcp.h
@@ -101,6 +101,8 @@ struct mcp_kreq_ether_recv {
101#define MXGEFW_ETH_SEND_3 0x2c0000 101#define MXGEFW_ETH_SEND_3 0x2c0000
102#define MXGEFW_ETH_RECV_SMALL 0x300000 102#define MXGEFW_ETH_RECV_SMALL 0x300000
103#define MXGEFW_ETH_RECV_BIG 0x340000 103#define MXGEFW_ETH_RECV_BIG 0x340000
104#define MXGEFW_ETH_SEND_GO 0x380000
105#define MXGEFW_ETH_SEND_STOP 0x3C0000
104 106
105#define MXGEFW_ETH_SEND(n) (0x200000 + (((n) & 0x03) * 0x40000)) 107#define MXGEFW_ETH_SEND(n) (0x200000 + (((n) & 0x03) * 0x40000))
106#define MXGEFW_ETH_SEND_OFFSET(n) (MXGEFW_ETH_SEND(n) - MXGEFW_ETH_SEND_4) 108#define MXGEFW_ETH_SEND_OFFSET(n) (MXGEFW_ETH_SEND(n) - MXGEFW_ETH_SEND_4)
@@ -120,6 +122,11 @@ enum myri10ge_mcp_cmd_type {
120 * MXGEFW_CMD_RESET is issued */ 122 * MXGEFW_CMD_RESET is issued */
121 123
122 MXGEFW_CMD_SET_INTRQ_DMA, 124 MXGEFW_CMD_SET_INTRQ_DMA,
125 /* data0 = LSW of the host address
126 * data1 = MSW of the host address
127 * data2 = slice number if multiple slices are used
128 */
129
123 MXGEFW_CMD_SET_BIG_BUFFER_SIZE, /* in bytes, power of 2 */ 130 MXGEFW_CMD_SET_BIG_BUFFER_SIZE, /* in bytes, power of 2 */
124 MXGEFW_CMD_SET_SMALL_BUFFER_SIZE, /* in bytes */ 131 MXGEFW_CMD_SET_SMALL_BUFFER_SIZE, /* in bytes */
125 132
@@ -129,6 +136,8 @@ enum myri10ge_mcp_cmd_type {
129 MXGEFW_CMD_GET_SEND_OFFSET, 136 MXGEFW_CMD_GET_SEND_OFFSET,
130 MXGEFW_CMD_GET_SMALL_RX_OFFSET, 137 MXGEFW_CMD_GET_SMALL_RX_OFFSET,
131 MXGEFW_CMD_GET_BIG_RX_OFFSET, 138 MXGEFW_CMD_GET_BIG_RX_OFFSET,
139 /* data0 = slice number if multiple slices are used */
140
132 MXGEFW_CMD_GET_IRQ_ACK_OFFSET, 141 MXGEFW_CMD_GET_IRQ_ACK_OFFSET,
133 MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET, 142 MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET,
134 143
@@ -200,7 +209,12 @@ enum myri10ge_mcp_cmd_type {
200 MXGEFW_CMD_SET_STATS_DMA_V2, 209 MXGEFW_CMD_SET_STATS_DMA_V2,
201 /* data0, data1 = bus addr, 210 /* data0, data1 = bus addr,
202 * data2 = sizeof(struct mcp_irq_data) from driver point of view, allows 211 * data2 = sizeof(struct mcp_irq_data) from driver point of view, allows
203 * adding new stuff to mcp_irq_data without changing the ABI */ 212 * adding new stuff to mcp_irq_data without changing the ABI
213 *
214 * If multiple slices are used, data2 contains both the size of the
215 * structure (in the lower 16 bits) and the slice number
216 * (in the upper 16 bits).
217 */
204 218
205 MXGEFW_CMD_UNALIGNED_TEST, 219 MXGEFW_CMD_UNALIGNED_TEST,
206 /* same than DMA_TEST (same args) but abort with UNALIGNED on unaligned 220 /* same than DMA_TEST (same args) but abort with UNALIGNED on unaligned
@@ -222,13 +236,18 @@ enum myri10ge_mcp_cmd_type {
222 MXGEFW_CMD_GET_MAX_RSS_QUEUES, 236 MXGEFW_CMD_GET_MAX_RSS_QUEUES,
223 MXGEFW_CMD_ENABLE_RSS_QUEUES, 237 MXGEFW_CMD_ENABLE_RSS_QUEUES,
224 /* data0 = number of slices n (0, 1, ..., n-1) to enable 238 /* data0 = number of slices n (0, 1, ..., n-1) to enable
225 * data1 = interrupt mode. 239 * data1 = interrupt mode | use of multiple transmit queues.
226 * 0=share one INTx/MSI, 1=use one MSI-X per queue. 240 * 0=share one INTx/MSI.
241 * 1=use one MSI-X per queue.
227 * If all queues share one interrupt, the driver must have set 242 * If all queues share one interrupt, the driver must have set
228 * RSS_SHARED_INTERRUPT_DMA before enabling queues. 243 * RSS_SHARED_INTERRUPT_DMA before enabling queues.
244 * 2=enable both receive and send queues.
245 * Without this bit set, only one send queue (slice 0's send queue)
246 * is enabled. The receive queues are always enabled.
229 */ 247 */
230#define MXGEFW_SLICE_INTR_MODE_SHARED 0 248#define MXGEFW_SLICE_INTR_MODE_SHARED 0x0
231#define MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE 1 249#define MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE 0x1
250#define MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES 0x2
232 251
233 MXGEFW_CMD_GET_RSS_SHARED_INTERRUPT_MASK_OFFSET, 252 MXGEFW_CMD_GET_RSS_SHARED_INTERRUPT_MASK_OFFSET,
234 MXGEFW_CMD_SET_RSS_SHARED_INTERRUPT_DMA, 253 MXGEFW_CMD_SET_RSS_SHARED_INTERRUPT_DMA,
@@ -250,10 +269,13 @@ enum myri10ge_mcp_cmd_type {
250 * 2: TCP_IPV4 (required by RSS) 269 * 2: TCP_IPV4 (required by RSS)
251 * 3: IPV4 | TCP_IPV4 (required by RSS) 270 * 3: IPV4 | TCP_IPV4 (required by RSS)
252 * 4: source port 271 * 4: source port
272 * 5: source port + destination port
253 */ 273 */
254#define MXGEFW_RSS_HASH_TYPE_IPV4 0x1 274#define MXGEFW_RSS_HASH_TYPE_IPV4 0x1
255#define MXGEFW_RSS_HASH_TYPE_TCP_IPV4 0x2 275#define MXGEFW_RSS_HASH_TYPE_TCP_IPV4 0x2
256#define MXGEFW_RSS_HASH_TYPE_SRC_PORT 0x4 276#define MXGEFW_RSS_HASH_TYPE_SRC_PORT 0x4
277#define MXGEFW_RSS_HASH_TYPE_SRC_DST_PORT 0x5
278#define MXGEFW_RSS_HASH_TYPE_MAX 0x5
257 279
258 MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE, 280 MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE,
259 /* Return data = the max. size of the entire headers of a IPv6 TSO packet. 281 /* Return data = the max. size of the entire headers of a IPv6 TSO packet.
@@ -329,6 +351,20 @@ enum myri10ge_mcp_cmd_type {
329 351
330 MXGEFW_CMD_GET_DCA_OFFSET, 352 MXGEFW_CMD_GET_DCA_OFFSET,
331 /* offset of dca control for WDMAs */ 353 /* offset of dca control for WDMAs */
354
355 /* VMWare NetQueue commands */
356 MXGEFW_CMD_NETQ_GET_FILTERS_PER_QUEUE,
357 MXGEFW_CMD_NETQ_ADD_FILTER,
358 /* data0 = filter_id << 16 | queue << 8 | type */
359 /* data1 = MS4 of MAC Addr */
360 /* data2 = LS2_MAC << 16 | VLAN_tag */
361 MXGEFW_CMD_NETQ_DEL_FILTER,
362 /* data0 = filter_id */
363 MXGEFW_CMD_NETQ_QUERY1,
364 MXGEFW_CMD_NETQ_QUERY2,
365 MXGEFW_CMD_NETQ_QUERY3,
366 MXGEFW_CMD_NETQ_QUERY4,
367
332}; 368};
333 369
334enum myri10ge_mcp_cmd_status { 370enum myri10ge_mcp_cmd_status {
@@ -381,4 +417,10 @@ struct mcp_irq_data {
381 u8 valid; 417 u8 valid;
382}; 418};
383 419
420/* definitions for NETQ filter type */
421#define MXGEFW_NETQ_FILTERTYPE_NONE 0
422#define MXGEFW_NETQ_FILTERTYPE_MACADDR 1
423#define MXGEFW_NETQ_FILTERTYPE_VLAN 2
424#define MXGEFW_NETQ_FILTERTYPE_VLANMACADDR 3
425
384#endif /* __MYRI10GE_MCP_H__ */ 426#endif /* __MYRI10GE_MCP_H__ */
diff --git a/drivers/net/myri10ge/myri10ge_mcp_gen_header.h b/drivers/net/myri10ge/myri10ge_mcp_gen_header.h
index 07d65c2cbb24..a8662ea8079a 100644
--- a/drivers/net/myri10ge/myri10ge_mcp_gen_header.h
+++ b/drivers/net/myri10ge/myri10ge_mcp_gen_header.h
@@ -35,7 +35,7 @@ struct mcp_gen_header {
35 unsigned char mcp_index; 35 unsigned char mcp_index;
36 unsigned char disable_rabbit; 36 unsigned char disable_rabbit;
37 unsigned char unaligned_tlp; 37 unsigned char unaligned_tlp;
38 unsigned char pad1; 38 unsigned char pcie_link_algo;
39 unsigned counters_addr; 39 unsigned counters_addr;
40 unsigned copy_block_info; /* for small mcps loaded with "lload -d" */ 40 unsigned copy_block_info; /* for small mcps loaded with "lload -d" */
41 unsigned short handoff_id_major; /* must be equal */ 41 unsigned short handoff_id_major; /* must be equal */
diff --git a/drivers/net/ne.c b/drivers/net/ne.c
index 2fec6122c7fa..42443d697423 100644
--- a/drivers/net/ne.c
+++ b/drivers/net/ne.c
@@ -536,7 +536,7 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
536#ifdef CONFIG_NET_POLL_CONTROLLER 536#ifdef CONFIG_NET_POLL_CONTROLLER
537 dev->poll_controller = eip_poll; 537 dev->poll_controller = eip_poll;
538#endif 538#endif
539 NS8390_init(dev, 0); 539 NS8390p_init(dev, 0);
540 540
541 ret = register_netdev(dev); 541 ret = register_netdev(dev);
542 if (ret) 542 if (ret)
@@ -794,7 +794,7 @@ retry:
794 if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */ 794 if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */
795 printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name); 795 printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name);
796 ne_reset_8390(dev); 796 ne_reset_8390(dev);
797 NS8390_init(dev,1); 797 NS8390p_init(dev, 1);
798 break; 798 break;
799 } 799 }
800 800
@@ -855,7 +855,7 @@ static int ne_drv_resume(struct platform_device *pdev)
855 855
856 if (netif_running(dev)) { 856 if (netif_running(dev)) {
857 ne_reset_8390(dev); 857 ne_reset_8390(dev);
858 NS8390_init(dev, 1); 858 NS8390p_init(dev, 1);
859 netif_device_attach(dev); 859 netif_device_attach(dev);
860 } 860 }
861 return 0; 861 return 0;
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index e13966bb5f77..9681618c3232 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -53,7 +53,7 @@ MODULE_LICENSE("GPL");
53 53
54static char config[MAX_PARAM_LENGTH]; 54static char config[MAX_PARAM_LENGTH];
55module_param_string(netconsole, config, MAX_PARAM_LENGTH, 0); 55module_param_string(netconsole, config, MAX_PARAM_LENGTH, 0);
56MODULE_PARM_DESC(netconsole, " netconsole=[src-port]@[src-ip]/[dev],[tgt-port]@<tgt-ip>/[tgt-macaddr]\n"); 56MODULE_PARM_DESC(netconsole, " netconsole=[src-port]@[src-ip]/[dev],[tgt-port]@<tgt-ip>/[tgt-macaddr]");
57 57
58#ifndef MODULE 58#ifndef MODULE
59static int __init option_setup(char *opt) 59static int __init option_setup(char *opt)
diff --git a/drivers/net/netx-eth.c b/drivers/net/netx-eth.c
index dc442e370850..3f9af759cb90 100644
--- a/drivers/net/netx-eth.c
+++ b/drivers/net/netx-eth.c
@@ -29,12 +29,11 @@
29#include <linux/mii.h> 29#include <linux/mii.h>
30 30
31#include <asm/io.h> 31#include <asm/io.h>
32#include <asm/hardware.h> 32#include <mach/hardware.h>
33#include <asm/arch/hardware.h> 33#include <mach/netx-regs.h>
34#include <asm/arch/netx-regs.h> 34#include <mach/pfifo.h>
35#include <asm/arch/pfifo.h> 35#include <mach/xc.h>
36#include <asm/arch/xc.h> 36#include <mach/eth.h>
37#include <asm/arch/eth.h>
38 37
39/* XC Fifo Offsets */ 38/* XC Fifo Offsets */
40#define EMPTY_PTR_FIFO(xcno) (0 + ((xcno) << 3)) /* Index of the empty pointer FIFO */ 39#define EMPTY_PTR_FIFO(xcno) (0 + ((xcno) << 3)) /* Index of the empty pointer FIFO */
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 8e736614407d..93a7b9b668d5 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -508,6 +508,8 @@ typedef enum {
508 NETXEN_BRDTYPE_P3_10000_BASE_T = 0x0027, 508 NETXEN_BRDTYPE_P3_10000_BASE_T = 0x0027,
509 NETXEN_BRDTYPE_P3_XG_LOM = 0x0028, 509 NETXEN_BRDTYPE_P3_XG_LOM = 0x0028,
510 NETXEN_BRDTYPE_P3_4_GB_MM = 0x0029, 510 NETXEN_BRDTYPE_P3_4_GB_MM = 0x0029,
511 NETXEN_BRDTYPE_P3_10G_SFP_CT = 0x002a,
512 NETXEN_BRDTYPE_P3_10G_SFP_QT = 0x002b,
511 NETXEN_BRDTYPE_P3_10G_CX4 = 0x0031, 513 NETXEN_BRDTYPE_P3_10G_CX4 = 0x0031,
512 NETXEN_BRDTYPE_P3_10G_XFP = 0x0032 514 NETXEN_BRDTYPE_P3_10G_XFP = 0x0032
513 515
@@ -1170,6 +1172,36 @@ typedef struct {
1170 nx_nic_intr_coalesce_data_t irq; 1172 nx_nic_intr_coalesce_data_t irq;
1171} nx_nic_intr_coalesce_t; 1173} nx_nic_intr_coalesce_t;
1172 1174
1175#define NX_HOST_REQUEST 0x13
1176#define NX_NIC_REQUEST 0x14
1177
1178#define NX_MAC_EVENT 0x1
1179
1180enum {
1181 NX_NIC_H2C_OPCODE_START = 0,
1182 NX_NIC_H2C_OPCODE_CONFIG_RSS,
1183 NX_NIC_H2C_OPCODE_CONFIG_RSS_TBL,
1184 NX_NIC_H2C_OPCODE_CONFIG_INTR_COALESCE,
1185 NX_NIC_H2C_OPCODE_CONFIG_LED,
1186 NX_NIC_H2C_OPCODE_CONFIG_PROMISCUOUS,
1187 NX_NIC_H2C_OPCODE_CONFIG_L2_MAC,
1188 NX_NIC_H2C_OPCODE_LRO_REQUEST,
1189 NX_NIC_H2C_OPCODE_GET_SNMP_STATS,
1190 NX_NIC_H2C_OPCODE_PROXY_START_REQUEST,
1191 NX_NIC_H2C_OPCODE_PROXY_STOP_REQUEST,
1192 NX_NIC_H2C_OPCODE_PROXY_SET_MTU,
1193 NX_NIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE,
1194 NX_H2P_OPCODE_GET_FINGER_PRINT_REQUEST,
1195 NX_H2P_OPCODE_INSTALL_LICENSE_REQUEST,
1196 NX_H2P_OPCODE_GET_LICENSE_CAPABILITY_REQUEST,
1197 NX_NIC_H2C_OPCODE_GET_NET_STATS,
1198 NX_NIC_H2C_OPCODE_LAST
1199};
1200
1201#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */
1202#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */
1203#define VPORT_MISS_MODE_ACCEPT_MULTI 2 /* accept unmatched multicast */
1204
1173typedef struct { 1205typedef struct {
1174 u64 qhdr; 1206 u64 qhdr;
1175 u64 req_hdr; 1207 u64 req_hdr;
@@ -1288,7 +1320,7 @@ struct netxen_adapter {
1288 int (*disable_phy_interrupts) (struct netxen_adapter *); 1320 int (*disable_phy_interrupts) (struct netxen_adapter *);
1289 int (*macaddr_set) (struct netxen_adapter *, netxen_ethernet_macaddr_t); 1321 int (*macaddr_set) (struct netxen_adapter *, netxen_ethernet_macaddr_t);
1290 int (*set_mtu) (struct netxen_adapter *, int); 1322 int (*set_mtu) (struct netxen_adapter *, int);
1291 int (*set_promisc) (struct netxen_adapter *, netxen_niu_prom_mode_t); 1323 int (*set_promisc) (struct netxen_adapter *, u32);
1292 int (*phy_read) (struct netxen_adapter *, long reg, u32 *); 1324 int (*phy_read) (struct netxen_adapter *, long reg, u32 *);
1293 int (*phy_write) (struct netxen_adapter *, long reg, u32 val); 1325 int (*phy_write) (struct netxen_adapter *, long reg, u32 val);
1294 int (*init_port) (struct netxen_adapter *, int); 1326 int (*init_port) (struct netxen_adapter *, int);
@@ -1465,9 +1497,10 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter);
1465u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctx, int max); 1497u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctx, int max);
1466void netxen_p2_nic_set_multi(struct net_device *netdev); 1498void netxen_p2_nic_set_multi(struct net_device *netdev);
1467void netxen_p3_nic_set_multi(struct net_device *netdev); 1499void netxen_p3_nic_set_multi(struct net_device *netdev);
1500int netxen_p3_nic_set_promisc(struct netxen_adapter *adapter, u32);
1468int netxen_config_intr_coalesce(struct netxen_adapter *adapter); 1501int netxen_config_intr_coalesce(struct netxen_adapter *adapter);
1469 1502
1470u32 nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, u32 mtu); 1503int nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu);
1471int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu); 1504int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu);
1472 1505
1473int netxen_nic_set_mac(struct net_device *netdev, void *p); 1506int netxen_nic_set_mac(struct net_device *netdev, void *p);
@@ -1502,7 +1535,9 @@ static const struct netxen_brdinfo netxen_boards[] = {
1502 {NETXEN_BRDTYPE_P3_10G_SFP_PLUS, 2, "Dual XGb SFP+ LP"}, 1535 {NETXEN_BRDTYPE_P3_10G_SFP_PLUS, 2, "Dual XGb SFP+ LP"},
1503 {NETXEN_BRDTYPE_P3_10000_BASE_T, 1, "XGB 10G BaseT LP"}, 1536 {NETXEN_BRDTYPE_P3_10000_BASE_T, 1, "XGB 10G BaseT LP"},
1504 {NETXEN_BRDTYPE_P3_XG_LOM, 2, "Dual XGb LOM"}, 1537 {NETXEN_BRDTYPE_P3_XG_LOM, 2, "Dual XGb LOM"},
1505 {NETXEN_BRDTYPE_P3_4_GB_MM, 4, "Quad GB - March Madness"}, 1538 {NETXEN_BRDTYPE_P3_4_GB_MM, 4, "NX3031 Gigabit Ethernet"},
1539 {NETXEN_BRDTYPE_P3_10G_SFP_CT, 2, "NX3031 10 Gigabit Ethernet"},
1540 {NETXEN_BRDTYPE_P3_10G_SFP_QT, 2, "Quanta Dual XGb SFP+"},
1506 {NETXEN_BRDTYPE_P3_10G_CX4, 2, "Reference Dual CX4 Option"}, 1541 {NETXEN_BRDTYPE_P3_10G_CX4, 2, "Reference Dual CX4 Option"},
1507 {NETXEN_BRDTYPE_P3_10G_XFP, 1, "Reference Single XFP Option"} 1542 {NETXEN_BRDTYPE_P3_10G_XFP, 1, "Reference Single XFP Option"}
1508}; 1543};
diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c
index 64babc59e699..64b51643c626 100644
--- a/drivers/net/netxen/netxen_nic_ctx.c
+++ b/drivers/net/netxen/netxen_nic_ctx.c
@@ -145,8 +145,8 @@ netxen_issue_cmd(struct netxen_adapter *adapter,
145 return rcode; 145 return rcode;
146} 146}
147 147
148u32 148int
149nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, u32 mtu) 149nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu)
150{ 150{
151 u32 rcode = NX_RCODE_SUCCESS; 151 u32 rcode = NX_RCODE_SUCCESS;
152 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx[0]; 152 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx[0];
@@ -160,7 +160,10 @@ nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, u32 mtu)
160 0, 160 0,
161 NX_CDRP_CMD_SET_MTU); 161 NX_CDRP_CMD_SET_MTU);
162 162
163 return rcode; 163 if (rcode != NX_RCODE_SUCCESS)
164 return -EIO;
165
166 return 0;
164} 167}
165 168
166static int 169static int
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index 48ee06b6f4e9..4ad3e0844b99 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -140,18 +140,33 @@ netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
140 if (netif_running(dev)) { 140 if (netif_running(dev)) {
141 ecmd->speed = adapter->link_speed; 141 ecmd->speed = adapter->link_speed;
142 ecmd->duplex = adapter->link_duplex; 142 ecmd->duplex = adapter->link_duplex;
143 } else 143 ecmd->autoneg = adapter->link_autoneg;
144 return -EIO; /* link absent */ 144 }
145
145 } else if (adapter->ahw.board_type == NETXEN_NIC_XGBE) { 146 } else if (adapter->ahw.board_type == NETXEN_NIC_XGBE) {
146 ecmd->supported = (SUPPORTED_TP | 147 u32 val;
147 SUPPORTED_1000baseT_Full | 148
148 SUPPORTED_10000baseT_Full); 149 adapter->hw_read_wx(adapter, NETXEN_PORT_MODE_ADDR, &val, 4);
149 ecmd->advertising = (ADVERTISED_TP | 150 if (val == NETXEN_PORT_MODE_802_3_AP) {
150 ADVERTISED_1000baseT_Full | 151 ecmd->supported = SUPPORTED_1000baseT_Full;
151 ADVERTISED_10000baseT_Full); 152 ecmd->advertising = ADVERTISED_1000baseT_Full;
153 } else {
154 ecmd->supported = SUPPORTED_10000baseT_Full;
155 ecmd->advertising = ADVERTISED_10000baseT_Full;
156 }
157
152 ecmd->port = PORT_TP; 158 ecmd->port = PORT_TP;
153 159
154 ecmd->speed = SPEED_10000; 160 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
161 u16 pcifn = adapter->ahw.pci_func;
162
163 adapter->hw_read_wx(adapter,
164 P3_LINK_SPEED_REG(pcifn), &val, 4);
165 ecmd->speed = P3_LINK_SPEED_MHZ *
166 P3_LINK_SPEED_VAL(pcifn, val);
167 } else
168 ecmd->speed = SPEED_10000;
169
155 ecmd->duplex = DUPLEX_FULL; 170 ecmd->duplex = DUPLEX_FULL;
156 ecmd->autoneg = AUTONEG_DISABLE; 171 ecmd->autoneg = AUTONEG_DISABLE;
157 } else 172 } else
@@ -192,6 +207,8 @@ netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
192 break; 207 break;
193 case NETXEN_BRDTYPE_P2_SB31_10G: 208 case NETXEN_BRDTYPE_P2_SB31_10G:
194 case NETXEN_BRDTYPE_P3_10G_SFP_PLUS: 209 case NETXEN_BRDTYPE_P3_10G_SFP_PLUS:
210 case NETXEN_BRDTYPE_P3_10G_SFP_CT:
211 case NETXEN_BRDTYPE_P3_10G_SFP_QT:
195 case NETXEN_BRDTYPE_P3_10G_XFP: 212 case NETXEN_BRDTYPE_P3_10G_XFP:
196 ecmd->supported |= SUPPORTED_FIBRE; 213 ecmd->supported |= SUPPORTED_FIBRE;
197 ecmd->advertising |= ADVERTISED_FIBRE; 214 ecmd->advertising |= ADVERTISED_FIBRE;
diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h
index 3ce13e451aac..e8e8d73f6ed7 100644
--- a/drivers/net/netxen/netxen_nic_hdr.h
+++ b/drivers/net/netxen/netxen_nic_hdr.h
@@ -724,6 +724,13 @@ enum {
724#define XG_LINK_STATE_P3(pcifn,val) \ 724#define XG_LINK_STATE_P3(pcifn,val) \
725 (((val) >> ((pcifn) * 4)) & XG_LINK_STATE_P3_MASK) 725 (((val) >> ((pcifn) * 4)) & XG_LINK_STATE_P3_MASK)
726 726
727#define P3_LINK_SPEED_MHZ 100
728#define P3_LINK_SPEED_MASK 0xff
729#define P3_LINK_SPEED_REG(pcifn) \
730 (CRB_PF_LINK_SPEED_1 + (((pcifn) / 4) * 4))
731#define P3_LINK_SPEED_VAL(pcifn, reg) \
732 (((reg) >> (8 * ((pcifn) & 0x3))) & P3_LINK_SPEED_MASK)
733
727#define NETXEN_CAM_RAM_BASE (NETXEN_CRB_CAM + 0x02000) 734#define NETXEN_CAM_RAM_BASE (NETXEN_CRB_CAM + 0x02000)
728#define NETXEN_CAM_RAM(reg) (NETXEN_CAM_RAM_BASE + (reg)) 735#define NETXEN_CAM_RAM(reg) (NETXEN_CAM_RAM_BASE + (reg))
729#define NETXEN_FW_VERSION_MAJOR (NETXEN_CAM_RAM(0x150)) 736#define NETXEN_FW_VERSION_MAJOR (NETXEN_CAM_RAM(0x150))
@@ -836,9 +843,11 @@ enum {
836 843
837#define PCIE_SETUP_FUNCTION (0x12040) 844#define PCIE_SETUP_FUNCTION (0x12040)
838#define PCIE_SETUP_FUNCTION2 (0x12048) 845#define PCIE_SETUP_FUNCTION2 (0x12048)
846#define PCIE_MISCCFG_RC (0x1206c)
839#define PCIE_TGT_SPLIT_CHICKEN (0x12080) 847#define PCIE_TGT_SPLIT_CHICKEN (0x12080)
840#define PCIE_CHICKEN3 (0x120c8) 848#define PCIE_CHICKEN3 (0x120c8)
841 849
850#define ISR_INT_STATE_REG (NETXEN_PCIX_PS_REG(PCIE_MISCCFG_RC))
842#define PCIE_MAX_MASTER_SPLIT (0x14048) 851#define PCIE_MAX_MASTER_SPLIT (0x14048)
843 852
844#define NETXEN_PORT_MODE_NONE 0 853#define NETXEN_PORT_MODE_NONE 0
@@ -854,6 +863,7 @@ enum {
854#define NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL (0x14) 863#define NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL (0x14)
855 864
856#define ISR_MSI_INT_TRIGGER(FUNC) (NETXEN_PCIX_PS_REG(PCIX_MSI_F(FUNC))) 865#define ISR_MSI_INT_TRIGGER(FUNC) (NETXEN_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
866#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200)
857 867
858/* 868/*
859 * PCI Interrupt Vector Values. 869 * PCI Interrupt Vector Values.
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index 96a3bc6426e2..9aa20f961618 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -285,14 +285,7 @@ static unsigned crb_hub_agt[64] =
285#define ADDR_IN_RANGE(addr, low, high) \ 285#define ADDR_IN_RANGE(addr, low, high) \
286 (((addr) <= (high)) && ((addr) >= (low))) 286 (((addr) <= (high)) && ((addr) >= (low)))
287 287
288#define NETXEN_MAX_MTU 8000 + NETXEN_ENET_HEADER_SIZE + NETXEN_ETH_FCS_SIZE
289#define NETXEN_MIN_MTU 64
290#define NETXEN_ETH_FCS_SIZE 4
291#define NETXEN_ENET_HEADER_SIZE 14
292#define NETXEN_WINDOW_ONE 0x2000000 /*CRB Window: bit 25 of CRB address */ 288#define NETXEN_WINDOW_ONE 0x2000000 /*CRB Window: bit 25 of CRB address */
293#define NETXEN_FIRMWARE_LEN ((16 * 1024) / 4)
294#define NETXEN_NIU_HDRSIZE (0x1 << 6)
295#define NETXEN_NIU_TLRSIZE (0x1 << 5)
296 289
297#define NETXEN_NIC_ZERO_PAUSE_ADDR 0ULL 290#define NETXEN_NIC_ZERO_PAUSE_ADDR 0ULL
298#define NETXEN_NIC_UNIT_PAUSE_ADDR 0x200ULL 291#define NETXEN_NIC_UNIT_PAUSE_ADDR 0x200ULL
@@ -541,9 +534,6 @@ netxen_send_cmd_descs(struct netxen_adapter *adapter,
541 return 0; 534 return 0;
542} 535}
543 536
544#define NIC_REQUEST 0x14
545#define NETXEN_MAC_EVENT 0x1
546
547static int nx_p3_sre_macaddr_change(struct net_device *dev, 537static int nx_p3_sre_macaddr_change(struct net_device *dev,
548 u8 *addr, unsigned op) 538 u8 *addr, unsigned op)
549{ 539{
@@ -553,8 +543,8 @@ static int nx_p3_sre_macaddr_change(struct net_device *dev,
553 int rv; 543 int rv;
554 544
555 memset(&req, 0, sizeof(nx_nic_req_t)); 545 memset(&req, 0, sizeof(nx_nic_req_t));
556 req.qhdr |= (NIC_REQUEST << 23); 546 req.qhdr |= (NX_NIC_REQUEST << 23);
557 req.req_hdr |= NETXEN_MAC_EVENT; 547 req.req_hdr |= NX_MAC_EVENT;
558 req.req_hdr |= ((u64)adapter->portnum << 16); 548 req.req_hdr |= ((u64)adapter->portnum << 16);
559 mac_req.op = op; 549 mac_req.op = op;
560 memcpy(&mac_req.mac_addr, addr, 6); 550 memcpy(&mac_req.mac_addr, addr, 6);
@@ -575,31 +565,35 @@ void netxen_p3_nic_set_multi(struct net_device *netdev)
575 nx_mac_list_t *cur, *next, *del_list, *add_list = NULL; 565 nx_mac_list_t *cur, *next, *del_list, *add_list = NULL;
576 struct dev_mc_list *mc_ptr; 566 struct dev_mc_list *mc_ptr;
577 u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 567 u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
578 568 u32 mode = VPORT_MISS_MODE_DROP;
579 adapter->set_promisc(adapter, NETXEN_NIU_PROMISC_MODE);
580
581 /*
582 * Programming mac addresses will automaticly enabling L2 filtering.
583 * HW will replace timestamp with L2 conid when L2 filtering is
584 * enabled. This causes problem for LSA. Do not enabling L2 filtering
585 * until that problem is fixed.
586 */
587 if ((netdev->flags & IFF_PROMISC) ||
588 (netdev->mc_count > adapter->max_mc_count))
589 return;
590 569
591 del_list = adapter->mac_list; 570 del_list = adapter->mac_list;
592 adapter->mac_list = NULL; 571 adapter->mac_list = NULL;
593 572
594 nx_p3_nic_add_mac(adapter, netdev->dev_addr, &add_list, &del_list); 573 nx_p3_nic_add_mac(adapter, netdev->dev_addr, &add_list, &del_list);
574 nx_p3_nic_add_mac(adapter, bcast_addr, &add_list, &del_list);
575
576 if (netdev->flags & IFF_PROMISC) {
577 mode = VPORT_MISS_MODE_ACCEPT_ALL;
578 goto send_fw_cmd;
579 }
580
581 if ((netdev->flags & IFF_ALLMULTI) ||
582 (netdev->mc_count > adapter->max_mc_count)) {
583 mode = VPORT_MISS_MODE_ACCEPT_MULTI;
584 goto send_fw_cmd;
585 }
586
595 if (netdev->mc_count > 0) { 587 if (netdev->mc_count > 0) {
596 nx_p3_nic_add_mac(adapter, bcast_addr, &add_list, &del_list);
597 for (mc_ptr = netdev->mc_list; mc_ptr; 588 for (mc_ptr = netdev->mc_list; mc_ptr;
598 mc_ptr = mc_ptr->next) { 589 mc_ptr = mc_ptr->next) {
599 nx_p3_nic_add_mac(adapter, mc_ptr->dmi_addr, 590 nx_p3_nic_add_mac(adapter, mc_ptr->dmi_addr,
600 &add_list, &del_list); 591 &add_list, &del_list);
601 } 592 }
602 } 593 }
594
595send_fw_cmd:
596 adapter->set_promisc(adapter, mode);
603 for (cur = del_list; cur;) { 597 for (cur = del_list; cur;) {
604 nx_p3_sre_macaddr_change(netdev, cur->mac_addr, NETXEN_MAC_DEL); 598 nx_p3_sre_macaddr_change(netdev, cur->mac_addr, NETXEN_MAC_DEL);
605 next = cur->next; 599 next = cur->next;
@@ -615,6 +609,21 @@ void netxen_p3_nic_set_multi(struct net_device *netdev)
615 } 609 }
616} 610}
617 611
612int netxen_p3_nic_set_promisc(struct netxen_adapter *adapter, u32 mode)
613{
614 nx_nic_req_t req;
615
616 memset(&req, 0, sizeof(nx_nic_req_t));
617
618 req.qhdr |= (NX_HOST_REQUEST << 23);
619 req.req_hdr |= NX_NIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE;
620 req.req_hdr |= ((u64)adapter->portnum << 16);
621 req.words[0] = cpu_to_le64(mode);
622
623 return netxen_send_cmd_descs(adapter,
624 (struct cmd_desc_type0 *)&req, 1);
625}
626
618#define NETXEN_CONFIG_INTR_COALESCE 3 627#define NETXEN_CONFIG_INTR_COALESCE 3
619 628
620/* 629/*
@@ -627,7 +636,7 @@ int netxen_config_intr_coalesce(struct netxen_adapter *adapter)
627 636
628 memset(&req, 0, sizeof(nx_nic_req_t)); 637 memset(&req, 0, sizeof(nx_nic_req_t));
629 638
630 req.qhdr |= (NIC_REQUEST << 23); 639 req.qhdr |= (NX_NIC_REQUEST << 23);
631 req.req_hdr |= NETXEN_CONFIG_INTR_COALESCE; 640 req.req_hdr |= NETXEN_CONFIG_INTR_COALESCE;
632 req.req_hdr |= ((u64)adapter->portnum << 16); 641 req.req_hdr |= ((u64)adapter->portnum << 16);
633 642
@@ -653,6 +662,7 @@ int netxen_nic_change_mtu(struct net_device *netdev, int mtu)
653{ 662{
654 struct netxen_adapter *adapter = netdev_priv(netdev); 663 struct netxen_adapter *adapter = netdev_priv(netdev);
655 int max_mtu; 664 int max_mtu;
665 int rc = 0;
656 666
657 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 667 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
658 max_mtu = P3_MAX_MTU; 668 max_mtu = P3_MAX_MTU;
@@ -666,16 +676,12 @@ int netxen_nic_change_mtu(struct net_device *netdev, int mtu)
666 } 676 }
667 677
668 if (adapter->set_mtu) 678 if (adapter->set_mtu)
669 adapter->set_mtu(adapter, mtu); 679 rc = adapter->set_mtu(adapter, mtu);
670 netdev->mtu = mtu;
671 680
672 mtu += MTU_FUDGE_FACTOR; 681 if (!rc)
673 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 682 netdev->mtu = mtu;
674 nx_fw_cmd_set_mtu(adapter, mtu);
675 else if (adapter->set_mtu)
676 adapter->set_mtu(adapter, mtu);
677 683
678 return 0; 684 return rc;
679} 685}
680 686
681int netxen_is_flash_supported(struct netxen_adapter *adapter) 687int netxen_is_flash_supported(struct netxen_adapter *adapter)
@@ -1411,7 +1417,8 @@ static int netxen_nic_pci_mem_read_direct(struct netxen_adapter *adapter,
1411 (netxen_nic_pci_is_same_window(adapter, off+size-1) == 0)) { 1417 (netxen_nic_pci_is_same_window(adapter, off+size-1) == 0)) {
1412 write_unlock_irqrestore(&adapter->adapter_lock, flags); 1418 write_unlock_irqrestore(&adapter->adapter_lock, flags);
1413 printk(KERN_ERR "%s out of bound pci memory access. " 1419 printk(KERN_ERR "%s out of bound pci memory access. "
1414 "offset is 0x%llx\n", netxen_nic_driver_name, off); 1420 "offset is 0x%llx\n", netxen_nic_driver_name,
1421 (unsigned long long)off);
1415 return -1; 1422 return -1;
1416 } 1423 }
1417 1424
@@ -1484,7 +1491,8 @@ netxen_nic_pci_mem_write_direct(struct netxen_adapter *adapter, u64 off,
1484 (netxen_nic_pci_is_same_window(adapter, off+size-1) == 0)) { 1491 (netxen_nic_pci_is_same_window(adapter, off+size-1) == 0)) {
1485 write_unlock_irqrestore(&adapter->adapter_lock, flags); 1492 write_unlock_irqrestore(&adapter->adapter_lock, flags);
1486 printk(KERN_ERR "%s out of bound pci memory access. " 1493 printk(KERN_ERR "%s out of bound pci memory access. "
1487 "offset is 0x%llx\n", netxen_nic_driver_name, off); 1494 "offset is 0x%llx\n", netxen_nic_driver_name,
1495 (unsigned long long)off);
1488 return -1; 1496 return -1;
1489 } 1497 }
1490 1498
@@ -2016,6 +2024,8 @@ int netxen_nic_get_board_info(struct netxen_adapter *adapter)
2016 case NETXEN_BRDTYPE_P3_10G_CX4_LP: 2024 case NETXEN_BRDTYPE_P3_10G_CX4_LP:
2017 case NETXEN_BRDTYPE_P3_IMEZ: 2025 case NETXEN_BRDTYPE_P3_IMEZ:
2018 case NETXEN_BRDTYPE_P3_10G_SFP_PLUS: 2026 case NETXEN_BRDTYPE_P3_10G_SFP_PLUS:
2027 case NETXEN_BRDTYPE_P3_10G_SFP_CT:
2028 case NETXEN_BRDTYPE_P3_10G_SFP_QT:
2019 case NETXEN_BRDTYPE_P3_10G_XFP: 2029 case NETXEN_BRDTYPE_P3_10G_XFP:
2020 case NETXEN_BRDTYPE_P3_10000_BASE_T: 2030 case NETXEN_BRDTYPE_P3_10000_BASE_T:
2021 2031
@@ -2034,6 +2044,7 @@ int netxen_nic_get_board_info(struct netxen_adapter *adapter)
2034 default: 2044 default:
2035 printk("%s: Unknown(%x)\n", netxen_nic_driver_name, 2045 printk("%s: Unknown(%x)\n", netxen_nic_driver_name,
2036 boardinfo->board_type); 2046 boardinfo->board_type);
2047 rv = -ENODEV;
2037 break; 2048 break;
2038 } 2049 }
2039 2050
@@ -2044,6 +2055,7 @@ int netxen_nic_get_board_info(struct netxen_adapter *adapter)
2044 2055
2045int netxen_nic_set_mtu_gb(struct netxen_adapter *adapter, int new_mtu) 2056int netxen_nic_set_mtu_gb(struct netxen_adapter *adapter, int new_mtu)
2046{ 2057{
2058 new_mtu += MTU_FUDGE_FACTOR;
2047 netxen_nic_write_w0(adapter, 2059 netxen_nic_write_w0(adapter,
2048 NETXEN_NIU_GB_MAX_FRAME_SIZE(adapter->physical_port), 2060 NETXEN_NIU_GB_MAX_FRAME_SIZE(adapter->physical_port),
2049 new_mtu); 2061 new_mtu);
@@ -2052,7 +2064,7 @@ int netxen_nic_set_mtu_gb(struct netxen_adapter *adapter, int new_mtu)
2052 2064
2053int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu) 2065int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu)
2054{ 2066{
2055 new_mtu += NETXEN_NIU_HDRSIZE + NETXEN_NIU_TLRSIZE; 2067 new_mtu += MTU_FUDGE_FACTOR;
2056 if (adapter->physical_port == 0) 2068 if (adapter->physical_port == 0)
2057 netxen_nic_write_w0(adapter, NETXEN_NIU_XGE_MAX_FRAME_SIZE, 2069 netxen_nic_write_w0(adapter, NETXEN_NIU_XGE_MAX_FRAME_SIZE,
2058 new_mtu); 2070 new_mtu);
@@ -2074,12 +2086,22 @@ void netxen_nic_set_link_parameters(struct netxen_adapter *adapter)
2074 __u32 status; 2086 __u32 status;
2075 __u32 autoneg; 2087 __u32 autoneg;
2076 __u32 mode; 2088 __u32 mode;
2089 __u32 port_mode;
2077 2090
2078 netxen_nic_read_w0(adapter, NETXEN_NIU_MODE, &mode); 2091 netxen_nic_read_w0(adapter, NETXEN_NIU_MODE, &mode);
2079 if (netxen_get_niu_enable_ge(mode)) { /* Gb 10/100/1000 Mbps mode */ 2092 if (netxen_get_niu_enable_ge(mode)) { /* Gb 10/100/1000 Mbps mode */
2093
2094 adapter->hw_read_wx(adapter,
2095 NETXEN_PORT_MODE_ADDR, &port_mode, 4);
2096 if (port_mode == NETXEN_PORT_MODE_802_3_AP) {
2097 adapter->link_speed = SPEED_1000;
2098 adapter->link_duplex = DUPLEX_FULL;
2099 adapter->link_autoneg = AUTONEG_DISABLE;
2100 return;
2101 }
2102
2080 if (adapter->phy_read 2103 if (adapter->phy_read
2081 && adapter-> 2104 && adapter->phy_read(adapter,
2082 phy_read(adapter,
2083 NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS, 2105 NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
2084 &status) == 0) { 2106 &status) == 0) {
2085 if (netxen_get_phy_link(status)) { 2107 if (netxen_get_phy_link(status)) {
@@ -2109,8 +2131,7 @@ void netxen_nic_set_link_parameters(struct netxen_adapter *adapter)
2109 break; 2131 break;
2110 } 2132 }
2111 if (adapter->phy_read 2133 if (adapter->phy_read
2112 && adapter-> 2134 && adapter->phy_read(adapter,
2113 phy_read(adapter,
2114 NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG, 2135 NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG,
2115 &autoneg) != 0) 2136 &autoneg) != 0)
2116 adapter->link_autoneg = autoneg; 2137 adapter->link_autoneg = autoneg;
diff --git a/drivers/net/netxen/netxen_nic_hw.h b/drivers/net/netxen/netxen_nic_hw.h
index b8e0030f03d7..aae737dc77a8 100644
--- a/drivers/net/netxen/netxen_nic_hw.h
+++ b/drivers/net/netxen/netxen_nic_hw.h
@@ -419,12 +419,9 @@ typedef enum {
419#define netxen_get_niu_enable_ge(config_word) \ 419#define netxen_get_niu_enable_ge(config_word) \
420 _netxen_crb_get_bit(config_word, 1) 420 _netxen_crb_get_bit(config_word, 1)
421 421
422/* Promiscous mode options (GbE mode only) */ 422#define NETXEN_NIU_NON_PROMISC_MODE 0
423typedef enum { 423#define NETXEN_NIU_PROMISC_MODE 1
424 NETXEN_NIU_PROMISC_MODE = 0, 424#define NETXEN_NIU_ALLMULTI_MODE 2
425 NETXEN_NIU_NON_PROMISC_MODE,
426 NETXEN_NIU_ALLMULTI_MODE
427} netxen_niu_prom_mode_t;
428 425
429/* 426/*
430 * NIU GB Drop CRC Register 427 * NIU GB Drop CRC Register
@@ -471,9 +468,9 @@ typedef enum {
471 468
472/* Set promiscuous mode for a GbE interface */ 469/* Set promiscuous mode for a GbE interface */
473int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter, 470int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter,
474 netxen_niu_prom_mode_t mode); 471 u32 mode);
475int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter, 472int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter,
476 netxen_niu_prom_mode_t mode); 473 u32 mode);
477 474
478/* set the MAC address for a given MAC */ 475/* set the MAC address for a given MAC */
479int netxen_niu_macaddr_set(struct netxen_adapter *adapter, 476int netxen_niu_macaddr_set(struct netxen_adapter *adapter,
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 01ab31b34a85..519fc860e17e 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -364,6 +364,11 @@ void netxen_initialize_adapter_ops(struct netxen_adapter *adapter)
364 default: 364 default:
365 break; 365 break;
366 } 366 }
367
368 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
369 adapter->set_mtu = nx_fw_cmd_set_mtu;
370 adapter->set_promisc = netxen_p3_nic_set_promisc;
371 }
367} 372}
368 373
369/* 374/*
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 91d209a8f6cb..7615c715e66e 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -166,7 +166,8 @@ static void netxen_nic_disable_int(struct netxen_adapter *adapter)
166 if (!NETXEN_IS_MSI_FAMILY(adapter)) { 166 if (!NETXEN_IS_MSI_FAMILY(adapter)) {
167 do { 167 do {
168 adapter->pci_write_immediate(adapter, 168 adapter->pci_write_immediate(adapter,
169 ISR_INT_TARGET_STATUS, 0xffffffff); 169 adapter->legacy_intr.tgt_status_reg,
170 0xffffffff);
170 mask = adapter->pci_read_immediate(adapter, 171 mask = adapter->pci_read_immediate(adapter,
171 ISR_INT_VECTOR); 172 ISR_INT_VECTOR);
172 if (!(mask & 0x80)) 173 if (!(mask & 0x80))
@@ -175,7 +176,7 @@ static void netxen_nic_disable_int(struct netxen_adapter *adapter)
175 } while (--retries); 176 } while (--retries);
176 177
177 if (!retries) { 178 if (!retries) {
178 printk(KERN_NOTICE "%s: Failed to disable interrupt completely\n", 179 printk(KERN_NOTICE "%s: Failed to disable interrupt\n",
179 netxen_nic_driver_name); 180 netxen_nic_driver_name);
180 } 181 }
181 } else { 182 } else {
@@ -190,8 +191,6 @@ static void netxen_nic_enable_int(struct netxen_adapter *adapter)
190{ 191{
191 u32 mask; 192 u32 mask;
192 193
193 DPRINTK(1, INFO, "Entered ISR Enable \n");
194
195 if (adapter->intr_scheme != -1 && 194 if (adapter->intr_scheme != -1 &&
196 adapter->intr_scheme != INTR_SCHEME_PERPORT) { 195 adapter->intr_scheme != INTR_SCHEME_PERPORT) {
197 switch (adapter->ahw.board_type) { 196 switch (adapter->ahw.board_type) {
@@ -213,16 +212,13 @@ static void netxen_nic_enable_int(struct netxen_adapter *adapter)
213 212
214 if (!NETXEN_IS_MSI_FAMILY(adapter)) { 213 if (!NETXEN_IS_MSI_FAMILY(adapter)) {
215 mask = 0xbff; 214 mask = 0xbff;
216 if (adapter->intr_scheme != -1 && 215 if (adapter->intr_scheme == INTR_SCHEME_PERPORT)
217 adapter->intr_scheme != INTR_SCHEME_PERPORT) { 216 adapter->pci_write_immediate(adapter,
217 adapter->legacy_intr.tgt_mask_reg, mask);
218 else
218 adapter->pci_write_normalize(adapter, 219 adapter->pci_write_normalize(adapter,
219 CRB_INT_VECTOR, 0); 220 CRB_INT_VECTOR, 0);
220 }
221 adapter->pci_write_immediate(adapter,
222 ISR_INT_TARGET_MASK, mask);
223 } 221 }
224
225 DPRINTK(1, INFO, "Done with enable Int\n");
226} 222}
227 223
228static int nx_set_dma_mask(struct netxen_adapter *adapter, uint8_t revision_id) 224static int nx_set_dma_mask(struct netxen_adapter *adapter, uint8_t revision_id)
@@ -284,6 +280,8 @@ static void netxen_check_options(struct netxen_adapter *adapter)
284 case NETXEN_BRDTYPE_P3_10G_CX4_LP: 280 case NETXEN_BRDTYPE_P3_10G_CX4_LP:
285 case NETXEN_BRDTYPE_P3_IMEZ: 281 case NETXEN_BRDTYPE_P3_IMEZ:
286 case NETXEN_BRDTYPE_P3_10G_SFP_PLUS: 282 case NETXEN_BRDTYPE_P3_10G_SFP_PLUS:
283 case NETXEN_BRDTYPE_P3_10G_SFP_QT:
284 case NETXEN_BRDTYPE_P3_10G_SFP_CT:
287 case NETXEN_BRDTYPE_P3_10G_XFP: 285 case NETXEN_BRDTYPE_P3_10G_XFP:
288 case NETXEN_BRDTYPE_P3_10000_BASE_T: 286 case NETXEN_BRDTYPE_P3_10000_BASE_T:
289 adapter->msix_supported = !!use_msi_x; 287 adapter->msix_supported = !!use_msi_x;
@@ -301,6 +299,10 @@ static void netxen_check_options(struct netxen_adapter *adapter)
301 case NETXEN_BRDTYPE_P3_REF_QG: 299 case NETXEN_BRDTYPE_P3_REF_QG:
302 case NETXEN_BRDTYPE_P3_4_GB: 300 case NETXEN_BRDTYPE_P3_4_GB:
303 case NETXEN_BRDTYPE_P3_4_GB_MM: 301 case NETXEN_BRDTYPE_P3_4_GB_MM:
302 adapter->msix_supported = 0;
303 adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_10G;
304 break;
305
304 case NETXEN_BRDTYPE_P2_SB35_4G: 306 case NETXEN_BRDTYPE_P2_SB35_4G:
305 case NETXEN_BRDTYPE_P2_SB31_2G: 307 case NETXEN_BRDTYPE_P2_SB31_2G:
306 adapter->msix_supported = 0; 308 adapter->msix_supported = 0;
@@ -700,13 +702,10 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
700 adapter->status &= ~NETXEN_NETDEV_STATUS; 702 adapter->status &= ~NETXEN_NETDEV_STATUS;
701 adapter->rx_csum = 1; 703 adapter->rx_csum = 1;
702 adapter->mc_enabled = 0; 704 adapter->mc_enabled = 0;
703 if (NX_IS_REVISION_P3(revision_id)) { 705 if (NX_IS_REVISION_P3(revision_id))
704 adapter->max_mc_count = 38; 706 adapter->max_mc_count = 38;
705 adapter->max_rds_rings = 2; 707 else
706 } else {
707 adapter->max_mc_count = 16; 708 adapter->max_mc_count = 16;
708 adapter->max_rds_rings = 3;
709 }
710 709
711 netdev->open = netxen_nic_open; 710 netdev->open = netxen_nic_open;
712 netdev->stop = netxen_nic_close; 711 netdev->stop = netxen_nic_close;
@@ -779,10 +778,6 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
779 if (adapter->portnum == 0) 778 if (adapter->portnum == 0)
780 first_driver = 1; 779 first_driver = 1;
781 } 780 }
782 adapter->crb_addr_cmd_producer = crb_cmd_producer[adapter->portnum];
783 adapter->crb_addr_cmd_consumer = crb_cmd_consumer[adapter->portnum];
784 netxen_nic_update_cmd_producer(adapter, 0);
785 netxen_nic_update_cmd_consumer(adapter, 0);
786 781
787 if (first_driver) { 782 if (first_driver) {
788 first_boot = adapter->pci_read_normalize(adapter, 783 first_boot = adapter->pci_read_normalize(adapter,
@@ -1053,6 +1048,11 @@ static int netxen_nic_open(struct net_device *netdev)
1053 return -EIO; 1048 return -EIO;
1054 } 1049 }
1055 1050
1051 if (adapter->fw_major < 4)
1052 adapter->max_rds_rings = 3;
1053 else
1054 adapter->max_rds_rings = 2;
1055
1056 err = netxen_alloc_sw_resources(adapter); 1056 err = netxen_alloc_sw_resources(adapter);
1057 if (err) { 1057 if (err) {
1058 printk(KERN_ERR "%s: Error in setting sw resources\n", 1058 printk(KERN_ERR "%s: Error in setting sw resources\n",
@@ -1074,10 +1074,10 @@ static int netxen_nic_open(struct net_device *netdev)
1074 crb_cmd_producer[adapter->portnum]; 1074 crb_cmd_producer[adapter->portnum];
1075 adapter->crb_addr_cmd_consumer = 1075 adapter->crb_addr_cmd_consumer =
1076 crb_cmd_consumer[adapter->portnum]; 1076 crb_cmd_consumer[adapter->portnum];
1077 }
1078 1077
1079 netxen_nic_update_cmd_producer(adapter, 0); 1078 netxen_nic_update_cmd_producer(adapter, 0);
1080 netxen_nic_update_cmd_consumer(adapter, 0); 1079 netxen_nic_update_cmd_consumer(adapter, 0);
1080 }
1081 1081
1082 for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) { 1082 for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
1083 for (ring = 0; ring < adapter->max_rds_rings; ring++) 1083 for (ring = 0; ring < adapter->max_rds_rings; ring++)
@@ -1113,9 +1113,7 @@ static int netxen_nic_open(struct net_device *netdev)
1113 netxen_nic_set_link_parameters(adapter); 1113 netxen_nic_set_link_parameters(adapter);
1114 1114
1115 netdev->set_multicast_list(netdev); 1115 netdev->set_multicast_list(netdev);
1116 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 1116 if (adapter->set_mtu)
1117 nx_fw_cmd_set_mtu(adapter, netdev->mtu);
1118 else
1119 adapter->set_mtu(adapter, netdev->mtu); 1117 adapter->set_mtu(adapter, netdev->mtu);
1120 1118
1121 mod_timer(&adapter->watchdog_timer, jiffies); 1119 mod_timer(&adapter->watchdog_timer, jiffies);
@@ -1410,20 +1408,17 @@ static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter)
1410 1408
1411 port = adapter->physical_port; 1409 port = adapter->physical_port;
1412 1410
1413 if (adapter->ahw.board_type == NETXEN_NIC_GBE) { 1411 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
1414 val = adapter->pci_read_normalize(adapter, CRB_XG_STATE); 1412 val = adapter->pci_read_normalize(adapter, CRB_XG_STATE_P3);
1415 linkup = (val >> port) & 1; 1413 val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val);
1414 linkup = (val == XG_LINK_UP_P3);
1416 } else { 1415 } else {
1417 if (adapter->fw_major < 4) { 1416 val = adapter->pci_read_normalize(adapter, CRB_XG_STATE);
1418 val = adapter->pci_read_normalize(adapter, 1417 if (adapter->ahw.board_type == NETXEN_NIC_GBE)
1419 CRB_XG_STATE); 1418 linkup = (val >> port) & 1;
1419 else {
1420 val = (val >> port*8) & 0xff; 1420 val = (val >> port*8) & 0xff;
1421 linkup = (val == XG_LINK_UP); 1421 linkup = (val == XG_LINK_UP);
1422 } else {
1423 val = adapter->pci_read_normalize(adapter,
1424 CRB_XG_STATE_P3);
1425 val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val);
1426 linkup = (val == XG_LINK_UP_P3);
1427 } 1422 }
1428 } 1423 }
1429 1424
@@ -1535,15 +1530,33 @@ static irqreturn_t netxen_intr(int irq, void *data)
1535 struct netxen_adapter *adapter = data; 1530 struct netxen_adapter *adapter = data;
1536 u32 our_int = 0; 1531 u32 our_int = 0;
1537 1532
1538 our_int = adapter->pci_read_normalize(adapter, CRB_INT_VECTOR); 1533 u32 status = 0;
1539 /* not our interrupt */ 1534
1540 if ((our_int & (0x80 << adapter->portnum)) == 0) 1535 status = adapter->pci_read_immediate(adapter, ISR_INT_VECTOR);
1536
1537 if (!(status & adapter->legacy_intr.int_vec_bit))
1541 return IRQ_NONE; 1538 return IRQ_NONE;
1542 1539
1543 if (adapter->intr_scheme == INTR_SCHEME_PERPORT) { 1540 if (adapter->ahw.revision_id >= NX_P3_B1) {
1544 /* claim interrupt */ 1541 /* check interrupt state machine, to be sure */
1545 adapter->pci_write_normalize(adapter, CRB_INT_VECTOR, 1542 status = adapter->pci_read_immediate(adapter,
1543 ISR_INT_STATE_REG);
1544 if (!ISR_LEGACY_INT_TRIGGERED(status))
1545 return IRQ_NONE;
1546
1547 } else if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
1548
1549 our_int = adapter->pci_read_normalize(adapter, CRB_INT_VECTOR);
1550 /* not our interrupt */
1551 if ((our_int & (0x80 << adapter->portnum)) == 0)
1552 return IRQ_NONE;
1553
1554 if (adapter->intr_scheme == INTR_SCHEME_PERPORT) {
1555 /* claim interrupt */
1556 adapter->pci_write_normalize(adapter,
1557 CRB_INT_VECTOR,
1546 our_int & ~((u32)(0x80 << adapter->portnum))); 1558 our_int & ~((u32)(0x80 << adapter->portnum)));
1559 }
1547 } 1560 }
1548 1561
1549 netxen_handle_int(adapter); 1562 netxen_handle_int(adapter);
diff --git a/drivers/net/netxen/netxen_nic_niu.c b/drivers/net/netxen/netxen_nic_niu.c
index 4cb8f4a1cf4b..27f07f6a45b1 100644
--- a/drivers/net/netxen/netxen_nic_niu.c
+++ b/drivers/net/netxen/netxen_nic_niu.c
@@ -610,6 +610,9 @@ int netxen_niu_macaddr_set(struct netxen_adapter *adapter,
610 int i; 610 int i;
611 DECLARE_MAC_BUF(mac); 611 DECLARE_MAC_BUF(mac);
612 612
613 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
614 return 0;
615
613 for (i = 0; i < 10; i++) { 616 for (i = 0; i < 10; i++) {
614 temp[0] = temp[1] = 0; 617 temp[0] = temp[1] = 0;
615 memcpy(temp + 2, addr, 2); 618 memcpy(temp + 2, addr, 2);
@@ -727,6 +730,9 @@ int netxen_niu_disable_gbe_port(struct netxen_adapter *adapter)
727 __u32 mac_cfg0; 730 __u32 mac_cfg0;
728 u32 port = adapter->physical_port; 731 u32 port = adapter->physical_port;
729 732
733 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
734 return 0;
735
730 if (port > NETXEN_NIU_MAX_GBE_PORTS) 736 if (port > NETXEN_NIU_MAX_GBE_PORTS)
731 return -EINVAL; 737 return -EINVAL;
732 mac_cfg0 = 0; 738 mac_cfg0 = 0;
@@ -743,6 +749,9 @@ int netxen_niu_disable_xg_port(struct netxen_adapter *adapter)
743 __u32 mac_cfg; 749 __u32 mac_cfg;
744 u32 port = adapter->physical_port; 750 u32 port = adapter->physical_port;
745 751
752 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
753 return 0;
754
746 if (port > NETXEN_NIU_MAX_XG_PORTS) 755 if (port > NETXEN_NIU_MAX_XG_PORTS)
747 return -EINVAL; 756 return -EINVAL;
748 757
@@ -755,7 +764,7 @@ int netxen_niu_disable_xg_port(struct netxen_adapter *adapter)
755 764
756/* Set promiscuous mode for a GbE interface */ 765/* Set promiscuous mode for a GbE interface */
757int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter, 766int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter,
758 netxen_niu_prom_mode_t mode) 767 u32 mode)
759{ 768{
760 __u32 reg; 769 __u32 reg;
761 u32 port = adapter->physical_port; 770 u32 port = adapter->physical_port;
@@ -819,6 +828,9 @@ int netxen_niu_xg_macaddr_set(struct netxen_adapter *adapter,
819 u8 temp[4]; 828 u8 temp[4];
820 u32 val; 829 u32 val;
821 830
831 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
832 return 0;
833
822 if ((phy < 0) || (phy > NETXEN_NIU_MAX_XG_PORTS)) 834 if ((phy < 0) || (phy > NETXEN_NIU_MAX_XG_PORTS))
823 return -EIO; 835 return -EIO;
824 836
@@ -894,7 +906,7 @@ int netxen_niu_xg_macaddr_get(struct netxen_adapter *adapter,
894#endif /* 0 */ 906#endif /* 0 */
895 907
896int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter, 908int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter,
897 netxen_niu_prom_mode_t mode) 909 u32 mode)
898{ 910{
899 __u32 reg; 911 __u32 reg;
900 u32 port = adapter->physical_port; 912 u32 port = adapter->physical_port;
diff --git a/drivers/net/netxen/netxen_nic_phan_reg.h b/drivers/net/netxen/netxen_nic_phan_reg.h
index 3bfa51b62a4f..83e5ee57bfef 100644
--- a/drivers/net/netxen/netxen_nic_phan_reg.h
+++ b/drivers/net/netxen/netxen_nic_phan_reg.h
@@ -95,8 +95,8 @@
95#define CRB_HOST_STS_PROD NETXEN_NIC_REG(0xdc) 95#define CRB_HOST_STS_PROD NETXEN_NIC_REG(0xdc)
96#define CRB_HOST_STS_CONS NETXEN_NIC_REG(0xe0) 96#define CRB_HOST_STS_CONS NETXEN_NIC_REG(0xe0)
97#define CRB_PEG_CMD_PROD NETXEN_NIC_REG(0xe4) 97#define CRB_PEG_CMD_PROD NETXEN_NIC_REG(0xe4)
98#define CRB_PEG_CMD_CONS NETXEN_NIC_REG(0xe8) 98#define CRB_PF_LINK_SPEED_1 NETXEN_NIC_REG(0xe8)
99#define CRB_HOST_BUFFER_PROD NETXEN_NIC_REG(0xec) 99#define CRB_PF_LINK_SPEED_2 NETXEN_NIC_REG(0xec)
100#define CRB_HOST_BUFFER_CONS NETXEN_NIC_REG(0xf0) 100#define CRB_HOST_BUFFER_CONS NETXEN_NIC_REG(0xf0)
101#define CRB_JUMBO_BUFFER_PROD NETXEN_NIC_REG(0xf4) 101#define CRB_JUMBO_BUFFER_PROD NETXEN_NIC_REG(0xf4)
102#define CRB_JUMBO_BUFFER_CONS NETXEN_NIC_REG(0xf8) 102#define CRB_JUMBO_BUFFER_CONS NETXEN_NIC_REG(0xf8)
diff --git a/drivers/net/ni5010.c b/drivers/net/ni5010.c
index a20005c09e07..8e0ca9f4e404 100644
--- a/drivers/net/ni5010.c
+++ b/drivers/net/ni5010.c
@@ -648,7 +648,6 @@ static void ni5010_set_multicast_list(struct net_device *dev)
648 PRINTK2((KERN_DEBUG "%s: entering set_multicast_list\n", dev->name)); 648 PRINTK2((KERN_DEBUG "%s: entering set_multicast_list\n", dev->name));
649 649
650 if (dev->flags&IFF_PROMISC || dev->flags&IFF_ALLMULTI || dev->mc_list) { 650 if (dev->flags&IFF_PROMISC || dev->flags&IFF_ALLMULTI || dev->mc_list) {
651 dev->flags |= IFF_PROMISC;
652 outb(RMD_PROMISC, EDLC_RMODE); /* Enable promiscuous mode */ 651 outb(RMD_PROMISC, EDLC_RMODE); /* Enable promiscuous mode */
653 PRINTK((KERN_DEBUG "%s: Entering promiscuous mode\n", dev->name)); 652 PRINTK((KERN_DEBUG "%s: Entering promiscuous mode\n", dev->name));
654 } else { 653 } else {
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c
index a316dcc8a06d..b9a882d362da 100644
--- a/drivers/net/ni52.c
+++ b/drivers/net/ni52.c
@@ -621,7 +621,7 @@ static int init586(struct net_device *dev)
621 if (num_addrs > len) { 621 if (num_addrs > len) {
622 printk(KERN_ERR "%s: switching to promisc. mode\n", 622 printk(KERN_ERR "%s: switching to promisc. mode\n",
623 dev->name); 623 dev->name);
624 dev->flags |= IFF_PROMISC; 624 writeb(0x01, &cfg_cmd->promisc);
625 } 625 }
626 } 626 }
627 if (dev->flags & IFF_PROMISC) 627 if (dev->flags & IFF_PROMISC)
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 8ee7d7bb951b..e4765b713aba 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -6417,7 +6417,7 @@ static int niu_ethflow_to_class(int flow_type, u64 *class)
6417 *class = CLASS_CODE_SCTP_IPV6; 6417 *class = CLASS_CODE_SCTP_IPV6;
6418 break; 6418 break;
6419 default: 6419 default:
6420 return -1; 6420 return 0;
6421 } 6421 }
6422 6422
6423 return 1; 6423 return 1;
diff --git a/drivers/net/ps3_gelic_wireless.c b/drivers/net/ps3_gelic_wireless.c
index 6b2dee0cf3a9..a834b52a6a2c 100644
--- a/drivers/net/ps3_gelic_wireless.c
+++ b/drivers/net/ps3_gelic_wireless.c
@@ -1024,7 +1024,7 @@ static int gelic_wl_set_encode(struct net_device *netdev,
1024 struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); 1024 struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
1025 struct iw_point *enc = &data->encoding; 1025 struct iw_point *enc = &data->encoding;
1026 __u16 flags; 1026 __u16 flags;
1027 unsigned int irqflag; 1027 unsigned long irqflag;
1028 int key_index, index_specified; 1028 int key_index, index_specified;
1029 int ret = 0; 1029 int ret = 0;
1030 1030
@@ -1097,7 +1097,7 @@ static int gelic_wl_get_encode(struct net_device *netdev,
1097{ 1097{
1098 struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); 1098 struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
1099 struct iw_point *enc = &data->encoding; 1099 struct iw_point *enc = &data->encoding;
1100 unsigned int irqflag; 1100 unsigned long irqflag;
1101 unsigned int key_index, index_specified; 1101 unsigned int key_index, index_specified;
1102 int ret = 0; 1102 int ret = 0;
1103 1103
@@ -1215,7 +1215,7 @@ static int gelic_wl_set_encodeext(struct net_device *netdev,
1215 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; 1215 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
1216 __u16 alg; 1216 __u16 alg;
1217 __u16 flags; 1217 __u16 flags;
1218 unsigned int irqflag; 1218 unsigned long irqflag;
1219 int key_index; 1219 int key_index;
1220 int ret = 0; 1220 int ret = 0;
1221 1221
@@ -1303,7 +1303,7 @@ static int gelic_wl_get_encodeext(struct net_device *netdev,
1303 struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); 1303 struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
1304 struct iw_point *enc = &data->encoding; 1304 struct iw_point *enc = &data->encoding;
1305 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; 1305 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
1306 unsigned int irqflag; 1306 unsigned long irqflag;
1307 int key_index; 1307 int key_index;
1308 int ret = 0; 1308 int ret = 0;
1309 int max_key_len; 1309 int max_key_len;
@@ -1426,7 +1426,7 @@ static int gelic_wl_priv_set_psk(struct net_device *net_dev,
1426{ 1426{
1427 struct gelic_wl_info *wl = port_wl(netdev_priv(net_dev)); 1427 struct gelic_wl_info *wl = port_wl(netdev_priv(net_dev));
1428 unsigned int len; 1428 unsigned int len;
1429 unsigned int irqflag; 1429 unsigned long irqflag;
1430 int ret = 0; 1430 int ret = 0;
1431 1431
1432 pr_debug("%s:<- len=%d\n", __func__, data->data.length); 1432 pr_debug("%s:<- len=%d\n", __func__, data->data.length);
@@ -1467,7 +1467,7 @@ static int gelic_wl_priv_get_psk(struct net_device *net_dev,
1467{ 1467{
1468 struct gelic_wl_info *wl = port_wl(netdev_priv(net_dev)); 1468 struct gelic_wl_info *wl = port_wl(netdev_priv(net_dev));
1469 char *p; 1469 char *p;
1470 unsigned int irqflag; 1470 unsigned long irqflag;
1471 unsigned int i; 1471 unsigned int i;
1472 1472
1473 pr_debug("%s:<-\n", __func__); 1473 pr_debug("%s:<-\n", __func__);
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index e82b37bbd6c3..3cdd07c45b6d 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -38,7 +38,7 @@
38 38
39#define DRV_NAME "qla3xxx" 39#define DRV_NAME "qla3xxx"
40#define DRV_STRING "QLogic ISP3XXX Network Driver" 40#define DRV_STRING "QLogic ISP3XXX Network Driver"
41#define DRV_VERSION "v2.03.00-k4" 41#define DRV_VERSION "v2.03.00-k5"
42#define PFX DRV_NAME " " 42#define PFX DRV_NAME " "
43 43
44static const char ql3xxx_driver_name[] = DRV_NAME; 44static const char ql3xxx_driver_name[] = DRV_NAME;
@@ -3495,8 +3495,6 @@ static void ql_set_mac_info(struct ql3_adapter *qdev)
3495 case ISP_CONTROL_FN0_NET: 3495 case ISP_CONTROL_FN0_NET:
3496 qdev->mac_index = 0; 3496 qdev->mac_index = 0;
3497 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; 3497 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
3498 qdev->tcp_ob_opcode = OUTBOUND_TCP_IOCB | func_number;
3499 qdev->update_ob_opcode = UPDATE_NCB_IOCB | func_number;
3500 qdev->mb_bit_mask = FN0_MA_BITS_MASK; 3498 qdev->mb_bit_mask = FN0_MA_BITS_MASK;
3501 qdev->PHYAddr = PORT0_PHY_ADDRESS; 3499 qdev->PHYAddr = PORT0_PHY_ADDRESS;
3502 if (port_status & PORT_STATUS_SM0) 3500 if (port_status & PORT_STATUS_SM0)
@@ -3508,8 +3506,6 @@ static void ql_set_mac_info(struct ql3_adapter *qdev)
3508 case ISP_CONTROL_FN1_NET: 3506 case ISP_CONTROL_FN1_NET:
3509 qdev->mac_index = 1; 3507 qdev->mac_index = 1;
3510 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; 3508 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
3511 qdev->tcp_ob_opcode = OUTBOUND_TCP_IOCB | func_number;
3512 qdev->update_ob_opcode = UPDATE_NCB_IOCB | func_number;
3513 qdev->mb_bit_mask = FN1_MA_BITS_MASK; 3509 qdev->mb_bit_mask = FN1_MA_BITS_MASK;
3514 qdev->PHYAddr = PORT1_PHY_ADDRESS; 3510 qdev->PHYAddr = PORT1_PHY_ADDRESS;
3515 if (port_status & PORT_STATUS_SM1) 3511 if (port_status & PORT_STATUS_SM1)
@@ -3730,14 +3726,6 @@ static int ql3xxx_open(struct net_device *ndev)
3730 return (ql_adapter_up(qdev)); 3726 return (ql_adapter_up(qdev));
3731} 3727}
3732 3728
3733static void ql3xxx_set_multicast_list(struct net_device *ndev)
3734{
3735 /*
3736 * We are manually parsing the list in the net_device structure.
3737 */
3738 return;
3739}
3740
3741static int ql3xxx_set_mac_address(struct net_device *ndev, void *p) 3729static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
3742{ 3730{
3743 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); 3731 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
@@ -4007,7 +3995,11 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
4007 ndev->open = ql3xxx_open; 3995 ndev->open = ql3xxx_open;
4008 ndev->hard_start_xmit = ql3xxx_send; 3996 ndev->hard_start_xmit = ql3xxx_send;
4009 ndev->stop = ql3xxx_close; 3997 ndev->stop = ql3xxx_close;
4010 ndev->set_multicast_list = ql3xxx_set_multicast_list; 3998 /* ndev->set_multicast_list
3999 * This device is one side of a two-function adapter
4000 * (NIC and iSCSI). Promiscuous mode setting/clearing is
4001 * not allowed from the NIC side.
4002 */
4011 SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops); 4003 SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops);
4012 ndev->set_mac_address = ql3xxx_set_mac_address; 4004 ndev->set_mac_address = ql3xxx_set_mac_address;
4013 ndev->tx_timeout = ql3xxx_tx_timeout; 4005 ndev->tx_timeout = ql3xxx_tx_timeout;
@@ -4040,9 +4032,6 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
4040 4032
4041 ndev->tx_queue_len = NUM_REQ_Q_ENTRIES; 4033 ndev->tx_queue_len = NUM_REQ_Q_ENTRIES;
4042 4034
4043 /* Turn off support for multicasting */
4044 ndev->flags &= ~IFF_MULTICAST;
4045
4046 /* Record PCI bus information. */ 4035 /* Record PCI bus information. */
4047 ql_get_board_info(qdev); 4036 ql_get_board_info(qdev);
4048 4037
diff --git a/drivers/net/qla3xxx.h b/drivers/net/qla3xxx.h
index 58a086fddec6..7113e71b15a1 100644
--- a/drivers/net/qla3xxx.h
+++ b/drivers/net/qla3xxx.h
@@ -14,24 +14,14 @@
14 14
15#define OPCODE_OB_MAC_IOCB_FN0 0x01 15#define OPCODE_OB_MAC_IOCB_FN0 0x01
16#define OPCODE_OB_MAC_IOCB_FN2 0x21 16#define OPCODE_OB_MAC_IOCB_FN2 0x21
17#define OPCODE_OB_TCP_IOCB_FN0 0x03
18#define OPCODE_OB_TCP_IOCB_FN2 0x23
19#define OPCODE_UPDATE_NCB_IOCB_FN0 0x00
20#define OPCODE_UPDATE_NCB_IOCB_FN2 0x20
21 17
22#define OPCODE_UPDATE_NCB_IOCB 0xF0
23#define OPCODE_IB_MAC_IOCB 0xF9 18#define OPCODE_IB_MAC_IOCB 0xF9
24#define OPCODE_IB_3032_MAC_IOCB 0x09 19#define OPCODE_IB_3032_MAC_IOCB 0x09
25#define OPCODE_IB_IP_IOCB 0xFA 20#define OPCODE_IB_IP_IOCB 0xFA
26#define OPCODE_IB_3032_IP_IOCB 0x0A 21#define OPCODE_IB_3032_IP_IOCB 0x0A
27#define OPCODE_IB_TCP_IOCB 0xFB
28#define OPCODE_DUMP_PROTO_IOCB 0xFE
29#define OPCODE_BUFFER_ALERT_IOCB 0xFB
30 22
31#define OPCODE_FUNC_ID_MASK 0x30 23#define OPCODE_FUNC_ID_MASK 0x30
32#define OUTBOUND_MAC_IOCB 0x01 /* plus function bits */ 24#define OUTBOUND_MAC_IOCB 0x01 /* plus function bits */
33#define OUTBOUND_TCP_IOCB 0x03 /* plus function bits */
34#define UPDATE_NCB_IOCB 0x00 /* plus function bits */
35 25
36#define FN0_MA_BITS_MASK 0x00 26#define FN0_MA_BITS_MASK 0x00
37#define FN1_MA_BITS_MASK 0x80 27#define FN1_MA_BITS_MASK 0x80
@@ -159,75 +149,6 @@ struct ob_ip_iocb_rsp {
159 __le32 reserved2; 149 __le32 reserved2;
160}; 150};
161 151
162struct ob_tcp_iocb_req {
163 u8 opcode;
164
165 u8 flags0;
166#define OB_TCP_IOCB_REQ_P 0x80
167#define OB_TCP_IOCB_REQ_CI 0x20
168#define OB_TCP_IOCB_REQ_H 0x10
169#define OB_TCP_IOCB_REQ_LN 0x08
170#define OB_TCP_IOCB_REQ_K 0x04
171#define OB_TCP_IOCB_REQ_D 0x02
172#define OB_TCP_IOCB_REQ_I 0x01
173
174 u8 flags1;
175#define OB_TCP_IOCB_REQ_OSM 0x40
176#define OB_TCP_IOCB_REQ_URG 0x20
177#define OB_TCP_IOCB_REQ_ACK 0x10
178#define OB_TCP_IOCB_REQ_PSH 0x08
179#define OB_TCP_IOCB_REQ_RST 0x04
180#define OB_TCP_IOCB_REQ_SYN 0x02
181#define OB_TCP_IOCB_REQ_FIN 0x01
182
183 u8 options_len;
184#define OB_TCP_IOCB_REQ_OMASK 0xF0
185#define OB_TCP_IOCB_REQ_SHIFT 4
186
187 __le32 transaction_id;
188 __le32 data_len;
189 __le32 hncb_ptr_low;
190 __le32 hncb_ptr_high;
191 __le32 buf_addr0_low;
192 __le32 buf_addr0_high;
193 __le32 buf_0_len;
194 __le32 buf_addr1_low;
195 __le32 buf_addr1_high;
196 __le32 buf_1_len;
197 __le32 buf_addr2_low;
198 __le32 buf_addr2_high;
199 __le32 buf_2_len;
200 __le32 time_stamp;
201 __le32 reserved1;
202};
203
204struct ob_tcp_iocb_rsp {
205 u8 opcode;
206
207 u8 flags0;
208#define OB_TCP_IOCB_RSP_C 0x20
209#define OB_TCP_IOCB_RSP_H 0x10
210#define OB_TCP_IOCB_RSP_LN 0x08
211#define OB_TCP_IOCB_RSP_K 0x04
212#define OB_TCP_IOCB_RSP_D 0x02
213#define OB_TCP_IOCB_RSP_I 0x01
214
215 u8 flags1;
216#define OB_TCP_IOCB_RSP_E 0x10
217#define OB_TCP_IOCB_RSP_W 0x08
218#define OB_TCP_IOCB_RSP_P 0x04
219#define OB_TCP_IOCB_RSP_T 0x02
220#define OB_TCP_IOCB_RSP_F 0x01
221
222 u8 state;
223#define OB_TCP_IOCB_RSP_SMASK 0xF0
224#define OB_TCP_IOCB_RSP_SHIFT 4
225
226 __le32 transaction_id;
227 __le32 local_ncb_ptr;
228 __le32 reserved0;
229};
230
231struct ib_ip_iocb_rsp { 152struct ib_ip_iocb_rsp {
232 u8 opcode; 153 u8 opcode;
233#define IB_IP_IOCB_RSP_3032_V 0x80 154#define IB_IP_IOCB_RSP_3032_V 0x80
@@ -256,25 +177,6 @@ struct ib_ip_iocb_rsp {
256 __le32 ial_high; 177 __le32 ial_high;
257}; 178};
258 179
259struct ib_tcp_iocb_rsp {
260 u8 opcode;
261 u8 flags;
262#define IB_TCP_IOCB_RSP_P 0x80
263#define IB_TCP_IOCB_RSP_T 0x40
264#define IB_TCP_IOCB_RSP_D 0x20
265#define IB_TCP_IOCB_RSP_N 0x10
266#define IB_TCP_IOCB_RSP_IP 0x03
267#define IB_TCP_FLAG_MASK 0xf0
268#define IB_TCP_FLAG_IOCB_SYN 0x00
269
270#define TCP_IB_RSP_FLAGS(x) (x->flags & ~IB_TCP_FLAG_MASK)
271
272 __le16 length;
273 __le32 hncb_ref_num;
274 __le32 ial_low;
275 __le32 ial_high;
276};
277
278struct net_rsp_iocb { 180struct net_rsp_iocb {
279 u8 opcode; 181 u8 opcode;
280 u8 flags; 182 u8 flags;
@@ -1266,20 +1168,13 @@ struct ql3_adapter {
1266 u32 small_buf_release_cnt; 1168 u32 small_buf_release_cnt;
1267 u32 small_buf_total_size; 1169 u32 small_buf_total_size;
1268 1170
1269 /* ISR related, saves status for DPC. */
1270 u32 control_status;
1271
1272 struct eeprom_data nvram_data; 1171 struct eeprom_data nvram_data;
1273 struct timer_list ioctl_timer;
1274 u32 port_link_state; 1172 u32 port_link_state;
1275 u32 last_rsp_offset;
1276 1173
1277 /* 4022 specific */ 1174 /* 4022 specific */
1278 u32 mac_index; /* Driver's MAC number can be 0 or 1 for first and second networking functions respectively */ 1175 u32 mac_index; /* Driver's MAC number can be 0 or 1 for first and second networking functions respectively */
1279 u32 PHYAddr; /* Address of PHY 0x1e00 Port 0 and 0x1f00 Port 1 */ 1176 u32 PHYAddr; /* Address of PHY 0x1e00 Port 0 and 0x1f00 Port 1 */
1280 u32 mac_ob_opcode; /* Opcode to use on mac transmission */ 1177 u32 mac_ob_opcode; /* Opcode to use on mac transmission */
1281 u32 tcp_ob_opcode; /* Opcode to use on tcp transmission */
1282 u32 update_ob_opcode; /* Opcode to use for updating NCB */
1283 u32 mb_bit_mask; /* MA Bits mask to use on transmission */ 1178 u32 mb_bit_mask; /* MA Bits mask to use on transmission */
1284 u32 numPorts; 1179 u32 numPorts;
1285 struct workqueue_struct *workqueue; 1180 struct workqueue_struct *workqueue;
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 86d77d05190a..a2b073097e5c 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -3143,7 +3143,7 @@ static void tx_intr_handler(struct fifo_info *fifo_data)
3143 pkt_cnt++; 3143 pkt_cnt++;
3144 3144
3145 /* Updating the statistics block */ 3145 /* Updating the statistics block */
3146 nic->stats.tx_bytes += skb->len; 3146 nic->dev->stats.tx_bytes += skb->len;
3147 nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize; 3147 nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
3148 dev_kfree_skb_irq(skb); 3148 dev_kfree_skb_irq(skb);
3149 3149
@@ -4896,25 +4896,42 @@ static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4896 /* Configure Stats for immediate updt */ 4896 /* Configure Stats for immediate updt */
4897 s2io_updt_stats(sp); 4897 s2io_updt_stats(sp);
4898 4898
4899 /* Using sp->stats as a staging area, because reset (due to mtu
4900 change, for example) will clear some hardware counters */
4901 dev->stats.tx_packets +=
4902 le32_to_cpu(mac_control->stats_info->tmac_frms) -
4903 sp->stats.tx_packets;
4899 sp->stats.tx_packets = 4904 sp->stats.tx_packets =
4900 le32_to_cpu(mac_control->stats_info->tmac_frms); 4905 le32_to_cpu(mac_control->stats_info->tmac_frms);
4906 dev->stats.tx_errors +=
4907 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms) -
4908 sp->stats.tx_errors;
4901 sp->stats.tx_errors = 4909 sp->stats.tx_errors =
4902 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms); 4910 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4911 dev->stats.rx_errors +=
4912 le64_to_cpu(mac_control->stats_info->rmac_drop_frms) -
4913 sp->stats.rx_errors;
4903 sp->stats.rx_errors = 4914 sp->stats.rx_errors =
4904 le64_to_cpu(mac_control->stats_info->rmac_drop_frms); 4915 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
4916 dev->stats.multicast =
4917 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms) -
4918 sp->stats.multicast;
4905 sp->stats.multicast = 4919 sp->stats.multicast =
4906 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms); 4920 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4921 dev->stats.rx_length_errors =
4922 le64_to_cpu(mac_control->stats_info->rmac_long_frms) -
4923 sp->stats.rx_length_errors;
4907 sp->stats.rx_length_errors = 4924 sp->stats.rx_length_errors =
4908 le64_to_cpu(mac_control->stats_info->rmac_long_frms); 4925 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4909 4926
4910 /* collect per-ring rx_packets and rx_bytes */ 4927 /* collect per-ring rx_packets and rx_bytes */
4911 sp->stats.rx_packets = sp->stats.rx_bytes = 0; 4928 dev->stats.rx_packets = dev->stats.rx_bytes = 0;
4912 for (i = 0; i < config->rx_ring_num; i++) { 4929 for (i = 0; i < config->rx_ring_num; i++) {
4913 sp->stats.rx_packets += mac_control->rings[i].rx_packets; 4930 dev->stats.rx_packets += mac_control->rings[i].rx_packets;
4914 sp->stats.rx_bytes += mac_control->rings[i].rx_bytes; 4931 dev->stats.rx_bytes += mac_control->rings[i].rx_bytes;
4915 } 4932 }
4916 4933
4917 return (&sp->stats); 4934 return (&dev->stats);
4918} 4935}
4919 4936
4920/** 4937/**
@@ -7419,7 +7436,7 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7419 if (err_mask != 0x5) { 7436 if (err_mask != 0x5) {
7420 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n", 7437 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7421 dev->name, err_mask); 7438 dev->name, err_mask);
7422 sp->stats.rx_crc_errors++; 7439 dev->stats.rx_crc_errors++;
7423 sp->mac_control.stats_info->sw_stat.mem_freed 7440 sp->mac_control.stats_info->sw_stat.mem_freed
7424 += skb->truesize; 7441 += skb->truesize;
7425 dev_kfree_skb(skb); 7442 dev_kfree_skb(skb);
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index c69ba1395fa9..25e62cf58d3a 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * SuperH Ethernet device driver 2 * SuperH Ethernet device driver
3 * 3 *
4 * Copyright (C) 2006,2007 Nobuhiro Iwamatsu 4 * Copyright (C) 2006-2008 Nobuhiro Iwamatsu
5 * Copyright (C) 2008 Renesas Solutions Corp. 5 * Copyright (C) 2008 Renesas Solutions Corp.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify it 7 * This program is free software; you can redistribute it and/or modify it
@@ -34,6 +34,29 @@
34 34
35#include "sh_eth.h" 35#include "sh_eth.h"
36 36
37/* CPU <-> EDMAC endian convert */
38static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x)
39{
40 switch (mdp->edmac_endian) {
41 case EDMAC_LITTLE_ENDIAN:
42 return cpu_to_le32(x);
43 case EDMAC_BIG_ENDIAN:
44 return cpu_to_be32(x);
45 }
46 return x;
47}
48
49static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
50{
51 switch (mdp->edmac_endian) {
52 case EDMAC_LITTLE_ENDIAN:
53 return le32_to_cpu(x);
54 case EDMAC_BIG_ENDIAN:
55 return be32_to_cpu(x);
56 }
57 return x;
58}
59
37/* 60/*
38 * Program the hardware MAC address from dev->dev_addr. 61 * Program the hardware MAC address from dev->dev_addr.
39 */ 62 */
@@ -143,13 +166,39 @@ static struct mdiobb_ops bb_ops = {
143 .get_mdio_data = sh_get_mdio, 166 .get_mdio_data = sh_get_mdio,
144}; 167};
145 168
169/* Chip Reset */
146static void sh_eth_reset(struct net_device *ndev) 170static void sh_eth_reset(struct net_device *ndev)
147{ 171{
148 u32 ioaddr = ndev->base_addr; 172 u32 ioaddr = ndev->base_addr;
149 173
174#if defined(CONFIG_CPU_SUBTYPE_SH7763)
175 int cnt = 100;
176
177 ctrl_outl(EDSR_ENALL, ioaddr + EDSR);
178 ctrl_outl(ctrl_inl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
179 while (cnt > 0) {
180 if (!(ctrl_inl(ioaddr + EDMR) & 0x3))
181 break;
182 mdelay(1);
183 cnt--;
184 }
185 if (cnt < 0)
186 printk(KERN_ERR "Device reset fail\n");
187
188 /* Table Init */
189 ctrl_outl(0x0, ioaddr + TDLAR);
190 ctrl_outl(0x0, ioaddr + TDFAR);
191 ctrl_outl(0x0, ioaddr + TDFXR);
192 ctrl_outl(0x0, ioaddr + TDFFR);
193 ctrl_outl(0x0, ioaddr + RDLAR);
194 ctrl_outl(0x0, ioaddr + RDFAR);
195 ctrl_outl(0x0, ioaddr + RDFXR);
196 ctrl_outl(0x0, ioaddr + RDFFR);
197#else
150 ctrl_outl(ctrl_inl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR); 198 ctrl_outl(ctrl_inl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
151 mdelay(3); 199 mdelay(3);
152 ctrl_outl(ctrl_inl(ioaddr + EDMR) & ~EDMR_SRST, ioaddr + EDMR); 200 ctrl_outl(ctrl_inl(ioaddr + EDMR) & ~EDMR_SRST, ioaddr + EDMR);
201#endif
153} 202}
154 203
155/* free skb and descriptor buffer */ 204/* free skb and descriptor buffer */
@@ -180,6 +229,7 @@ static void sh_eth_ring_free(struct net_device *ndev)
180/* format skb and descriptor buffer */ 229/* format skb and descriptor buffer */
181static void sh_eth_ring_format(struct net_device *ndev) 230static void sh_eth_ring_format(struct net_device *ndev)
182{ 231{
232 u32 ioaddr = ndev->base_addr, reserve = 0;
183 struct sh_eth_private *mdp = netdev_priv(ndev); 233 struct sh_eth_private *mdp = netdev_priv(ndev);
184 int i; 234 int i;
185 struct sk_buff *skb; 235 struct sk_buff *skb;
@@ -201,22 +251,41 @@ static void sh_eth_ring_format(struct net_device *ndev)
201 mdp->rx_skbuff[i] = skb; 251 mdp->rx_skbuff[i] = skb;
202 if (skb == NULL) 252 if (skb == NULL)
203 break; 253 break;
204 skb->dev = ndev; /* Mark as being used by this device. */ 254 skb->dev = ndev; /* Mark as being used by this device. */
255#if defined(CONFIG_CPU_SUBTYPE_SH7763)
256 reserve = SH7763_SKB_ALIGN
257 - ((uint32_t)skb->data & (SH7763_SKB_ALIGN-1));
258 if (reserve)
259 skb_reserve(skb, reserve);
260#else
205 skb_reserve(skb, RX_OFFSET); 261 skb_reserve(skb, RX_OFFSET);
206 262#endif
207 /* RX descriptor */ 263 /* RX descriptor */
208 rxdesc = &mdp->rx_ring[i]; 264 rxdesc = &mdp->rx_ring[i];
209 rxdesc->addr = (u32)skb->data & ~0x3UL; 265 rxdesc->addr = (u32)skb->data & ~0x3UL;
210 rxdesc->status = cpu_to_le32(RD_RACT | RD_RFP); 266 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
211 267
212 /* The size of the buffer is 16 byte boundary. */ 268 /* The size of the buffer is 16 byte boundary. */
213 rxdesc->buffer_length = (mdp->rx_buf_sz + 16) & ~0x0F; 269 rxdesc->buffer_length = (mdp->rx_buf_sz + 16) & ~0x0F;
270 /* Rx descriptor address set */
271 if (i == 0) {
272 ctrl_outl((u32)rxdesc, ioaddr + RDLAR);
273#if defined(CONFIG_CPU_SUBTYPE_SH7763)
274 ctrl_outl((u32)rxdesc, ioaddr + RDFAR);
275#endif
276 }
214 } 277 }
215 278
279 /* Rx descriptor address set */
280#if defined(CONFIG_CPU_SUBTYPE_SH7763)
281 ctrl_outl((u32)rxdesc, ioaddr + RDFXR);
282 ctrl_outl(0x1, ioaddr + RDFFR);
283#endif
284
216 mdp->dirty_rx = (u32) (i - RX_RING_SIZE); 285 mdp->dirty_rx = (u32) (i - RX_RING_SIZE);
217 286
218 /* Mark the last entry as wrapping the ring. */ 287 /* Mark the last entry as wrapping the ring. */
219 rxdesc->status |= cpu_to_le32(RC_RDEL); 288 rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
220 289
221 memset(mdp->tx_ring, 0, tx_ringsize); 290 memset(mdp->tx_ring, 0, tx_ringsize);
222 291
@@ -224,11 +293,24 @@ static void sh_eth_ring_format(struct net_device *ndev)
224 for (i = 0; i < TX_RING_SIZE; i++) { 293 for (i = 0; i < TX_RING_SIZE; i++) {
225 mdp->tx_skbuff[i] = NULL; 294 mdp->tx_skbuff[i] = NULL;
226 txdesc = &mdp->tx_ring[i]; 295 txdesc = &mdp->tx_ring[i];
227 txdesc->status = cpu_to_le32(TD_TFP); 296 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
228 txdesc->buffer_length = 0; 297 txdesc->buffer_length = 0;
298 if (i == 0) {
299 /* Tx descriptor address set */
300 ctrl_outl((u32)txdesc, ioaddr + TDLAR);
301#if defined(CONFIG_CPU_SUBTYPE_SH7763)
302 ctrl_outl((u32)txdesc, ioaddr + TDFAR);
303#endif
304 }
229 } 305 }
230 306
231 txdesc->status |= cpu_to_le32(TD_TDLE); 307 /* Tx descriptor address set */
308#if defined(CONFIG_CPU_SUBTYPE_SH7763)
309 ctrl_outl((u32)txdesc, ioaddr + TDFXR);
310 ctrl_outl(0x1, ioaddr + TDFFR);
311#endif
312
313 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
232} 314}
233 315
234/* Get skb and descriptor buffer */ 316/* Get skb and descriptor buffer */
@@ -311,31 +393,43 @@ static int sh_eth_dev_init(struct net_device *ndev)
311 /* Soft Reset */ 393 /* Soft Reset */
312 sh_eth_reset(ndev); 394 sh_eth_reset(ndev);
313 395
314 ctrl_outl(RPADIR_PADS1, ioaddr + RPADIR); /* SH7712-DMA-RX-PAD2 */ 396 /* Descriptor format */
397 sh_eth_ring_format(ndev);
398 ctrl_outl(RPADIR_INIT, ioaddr + RPADIR);
315 399
316 /* all sh_eth int mask */ 400 /* all sh_eth int mask */
317 ctrl_outl(0, ioaddr + EESIPR); 401 ctrl_outl(0, ioaddr + EESIPR);
318 402
319 /* FIFO size set */ 403#if defined(CONFIG_CPU_SUBTYPE_SH7763)
404 ctrl_outl(EDMR_EL, ioaddr + EDMR);
405#else
320 ctrl_outl(0, ioaddr + EDMR); /* Endian change */ 406 ctrl_outl(0, ioaddr + EDMR); /* Endian change */
407#endif
321 408
409 /* FIFO size set */
322 ctrl_outl((FIFO_SIZE_T | FIFO_SIZE_R), ioaddr + FDR); 410 ctrl_outl((FIFO_SIZE_T | FIFO_SIZE_R), ioaddr + FDR);
323 ctrl_outl(0, ioaddr + TFTR); 411 ctrl_outl(0, ioaddr + TFTR);
324 412
413 /* Frame recv control */
325 ctrl_outl(0, ioaddr + RMCR); 414 ctrl_outl(0, ioaddr + RMCR);
326 415
327 rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5; 416 rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5;
328 tx_int_var = mdp->tx_int_var = DESC_I_TINT2; 417 tx_int_var = mdp->tx_int_var = DESC_I_TINT2;
329 ctrl_outl(rx_int_var | tx_int_var, ioaddr + TRSCER); 418 ctrl_outl(rx_int_var | tx_int_var, ioaddr + TRSCER);
330 419
420#if defined(CONFIG_CPU_SUBTYPE_SH7763)
421 /* Burst sycle set */
422 ctrl_outl(0x800, ioaddr + BCULR);
423#endif
424
331 ctrl_outl((FIFO_F_D_RFF | FIFO_F_D_RFD), ioaddr + FCFTR); 425 ctrl_outl((FIFO_F_D_RFF | FIFO_F_D_RFD), ioaddr + FCFTR);
332 ctrl_outl(0, ioaddr + TRIMD);
333 426
334 /* Descriptor format */ 427#if !defined(CONFIG_CPU_SUBTYPE_SH7763)
335 sh_eth_ring_format(ndev); 428 ctrl_outl(0, ioaddr + TRIMD);
429#endif
336 430
337 ctrl_outl((u32)mdp->rx_ring, ioaddr + RDLAR); 431 /* Recv frame limit set register */
338 ctrl_outl((u32)mdp->tx_ring, ioaddr + TDLAR); 432 ctrl_outl(RFLR_VALUE, ioaddr + RFLR);
339 433
340 ctrl_outl(ctrl_inl(ioaddr + EESR), ioaddr + EESR); 434 ctrl_outl(ctrl_inl(ioaddr + EESR), ioaddr + EESR);
341 ctrl_outl((DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff), ioaddr + EESIPR); 435 ctrl_outl((DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff), ioaddr + EESIPR);
@@ -345,21 +439,26 @@ static int sh_eth_dev_init(struct net_device *ndev)
345 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE; 439 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
346 440
347 ctrl_outl(val, ioaddr + ECMR); 441 ctrl_outl(val, ioaddr + ECMR);
348 ctrl_outl(ECSR_BRCRX | ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD | 442
349 ECSIPR_MPDIP, ioaddr + ECSR); 443 /* E-MAC Status Register clear */
350 ctrl_outl(ECSIPR_BRCRXIP | ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | 444 ctrl_outl(ECSR_INIT, ioaddr + ECSR);
351 ECSIPR_ICDIP | ECSIPR_MPDIP, ioaddr + ECSIPR); 445
446 /* E-MAC Interrupt Enable register */
447 ctrl_outl(ECSIPR_INIT, ioaddr + ECSIPR);
352 448
353 /* Set MAC address */ 449 /* Set MAC address */
354 update_mac_address(ndev); 450 update_mac_address(ndev);
355 451
356 /* mask reset */ 452 /* mask reset */
357#if defined(CONFIG_CPU_SUBTYPE_SH7710) 453#if defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7763)
358 ctrl_outl(APR_AP, ioaddr + APR); 454 ctrl_outl(APR_AP, ioaddr + APR);
359 ctrl_outl(MPR_MP, ioaddr + MPR); 455 ctrl_outl(MPR_MP, ioaddr + MPR);
360 ctrl_outl(TPAUSER_UNLIMITED, ioaddr + TPAUSER); 456 ctrl_outl(TPAUSER_UNLIMITED, ioaddr + TPAUSER);
457#endif
458#if defined(CONFIG_CPU_SUBTYPE_SH7710)
361 ctrl_outl(BCFR_UNLIMITED, ioaddr + BCFR); 459 ctrl_outl(BCFR_UNLIMITED, ioaddr + BCFR);
362#endif 460#endif
461
363 /* Setting the Rx mode will start the Rx process. */ 462 /* Setting the Rx mode will start the Rx process. */
364 ctrl_outl(EDRRR_R, ioaddr + EDRRR); 463 ctrl_outl(EDRRR_R, ioaddr + EDRRR);
365 464
@@ -379,7 +478,7 @@ static int sh_eth_txfree(struct net_device *ndev)
379 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { 478 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
380 entry = mdp->dirty_tx % TX_RING_SIZE; 479 entry = mdp->dirty_tx % TX_RING_SIZE;
381 txdesc = &mdp->tx_ring[entry]; 480 txdesc = &mdp->tx_ring[entry];
382 if (txdesc->status & cpu_to_le32(TD_TACT)) 481 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
383 break; 482 break;
384 /* Free the original skb. */ 483 /* Free the original skb. */
385 if (mdp->tx_skbuff[entry]) { 484 if (mdp->tx_skbuff[entry]) {
@@ -387,9 +486,9 @@ static int sh_eth_txfree(struct net_device *ndev)
387 mdp->tx_skbuff[entry] = NULL; 486 mdp->tx_skbuff[entry] = NULL;
388 freeNum++; 487 freeNum++;
389 } 488 }
390 txdesc->status = cpu_to_le32(TD_TFP); 489 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
391 if (entry >= TX_RING_SIZE - 1) 490 if (entry >= TX_RING_SIZE - 1)
392 txdesc->status |= cpu_to_le32(TD_TDLE); 491 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
393 492
394 mdp->stats.tx_packets++; 493 mdp->stats.tx_packets++;
395 mdp->stats.tx_bytes += txdesc->buffer_length; 494 mdp->stats.tx_bytes += txdesc->buffer_length;
@@ -407,11 +506,11 @@ static int sh_eth_rx(struct net_device *ndev)
407 int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx; 506 int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx;
408 struct sk_buff *skb; 507 struct sk_buff *skb;
409 u16 pkt_len = 0; 508 u16 pkt_len = 0;
410 u32 desc_status; 509 u32 desc_status, reserve = 0;
411 510
412 rxdesc = &mdp->rx_ring[entry]; 511 rxdesc = &mdp->rx_ring[entry];
413 while (!(rxdesc->status & cpu_to_le32(RD_RACT))) { 512 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
414 desc_status = le32_to_cpu(rxdesc->status); 513 desc_status = edmac_to_cpu(mdp, rxdesc->status);
415 pkt_len = rxdesc->frame_length; 514 pkt_len = rxdesc->frame_length;
416 515
417 if (--boguscnt < 0) 516 if (--boguscnt < 0)
@@ -446,7 +545,7 @@ static int sh_eth_rx(struct net_device *ndev)
446 mdp->stats.rx_packets++; 545 mdp->stats.rx_packets++;
447 mdp->stats.rx_bytes += pkt_len; 546 mdp->stats.rx_bytes += pkt_len;
448 } 547 }
449 rxdesc->status |= cpu_to_le32(RD_RACT); 548 rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
450 entry = (++mdp->cur_rx) % RX_RING_SIZE; 549 entry = (++mdp->cur_rx) % RX_RING_SIZE;
451 } 550 }
452 551
@@ -454,28 +553,38 @@ static int sh_eth_rx(struct net_device *ndev)
454 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) { 553 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
455 entry = mdp->dirty_rx % RX_RING_SIZE; 554 entry = mdp->dirty_rx % RX_RING_SIZE;
456 rxdesc = &mdp->rx_ring[entry]; 555 rxdesc = &mdp->rx_ring[entry];
556 /* The size of the buffer is 16 byte boundary. */
557 rxdesc->buffer_length = (mdp->rx_buf_sz + 16) & ~0x0F;
558
457 if (mdp->rx_skbuff[entry] == NULL) { 559 if (mdp->rx_skbuff[entry] == NULL) {
458 skb = dev_alloc_skb(mdp->rx_buf_sz); 560 skb = dev_alloc_skb(mdp->rx_buf_sz);
459 mdp->rx_skbuff[entry] = skb; 561 mdp->rx_skbuff[entry] = skb;
460 if (skb == NULL) 562 if (skb == NULL)
461 break; /* Better luck next round. */ 563 break; /* Better luck next round. */
462 skb->dev = ndev; 564 skb->dev = ndev;
565#if defined(CONFIG_CPU_SUBTYPE_SH7763)
566 reserve = SH7763_SKB_ALIGN
567 - ((uint32_t)skb->data & (SH7763_SKB_ALIGN-1));
568 if (reserve)
569 skb_reserve(skb, reserve);
570#else
463 skb_reserve(skb, RX_OFFSET); 571 skb_reserve(skb, RX_OFFSET);
572#endif
573 skb->ip_summed = CHECKSUM_NONE;
464 rxdesc->addr = (u32)skb->data & ~0x3UL; 574 rxdesc->addr = (u32)skb->data & ~0x3UL;
465 } 575 }
466 /* The size of the buffer is 16 byte boundary. */
467 rxdesc->buffer_length = (mdp->rx_buf_sz + 16) & ~0x0F;
468 if (entry >= RX_RING_SIZE - 1) 576 if (entry >= RX_RING_SIZE - 1)
469 rxdesc->status |= 577 rxdesc->status |=
470 cpu_to_le32(RD_RACT | RD_RFP | RC_RDEL); 578 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
471 else 579 else
472 rxdesc->status |= 580 rxdesc->status |=
473 cpu_to_le32(RD_RACT | RD_RFP); 581 cpu_to_edmac(mdp, RD_RACT | RD_RFP);
474 } 582 }
475 583
476 /* Restart Rx engine if stopped. */ 584 /* Restart Rx engine if stopped. */
477 /* If we don't need to check status, don't. -KDU */ 585 /* If we don't need to check status, don't. -KDU */
478 ctrl_outl(EDRRR_R, ndev->base_addr + EDRRR); 586 if (!(ctrl_inl(ndev->base_addr + EDRRR) & EDRRR_R))
587 ctrl_outl(EDRRR_R, ndev->base_addr + EDRRR);
479 588
480 return 0; 589 return 0;
481} 590}
@@ -529,13 +638,14 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
529 printk(KERN_ERR "Receive Frame Overflow\n"); 638 printk(KERN_ERR "Receive Frame Overflow\n");
530 } 639 }
531 } 640 }
532 641#if !defined(CONFIG_CPU_SUBTYPE_SH7763)
533 if (intr_status & EESR_ADE) { 642 if (intr_status & EESR_ADE) {
534 if (intr_status & EESR_TDE) { 643 if (intr_status & EESR_TDE) {
535 if (intr_status & EESR_TFE) 644 if (intr_status & EESR_TFE)
536 mdp->stats.tx_fifo_errors++; 645 mdp->stats.tx_fifo_errors++;
537 } 646 }
538 } 647 }
648#endif
539 649
540 if (intr_status & EESR_RDE) { 650 if (intr_status & EESR_RDE) {
541 /* Receive Descriptor Empty int */ 651 /* Receive Descriptor Empty int */
@@ -550,8 +660,11 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
550 mdp->stats.rx_fifo_errors++; 660 mdp->stats.rx_fifo_errors++;
551 printk(KERN_ERR "Receive FIFO Overflow\n"); 661 printk(KERN_ERR "Receive FIFO Overflow\n");
552 } 662 }
553 if (intr_status & 663 if (intr_status & (EESR_TWB | EESR_TABT |
554 (EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE)) { 664#if !defined(CONFIG_CPU_SUBTYPE_SH7763)
665 EESR_ADE |
666#endif
667 EESR_TDE | EESR_TFE)) {
555 /* Tx error */ 668 /* Tx error */
556 u32 edtrr = ctrl_inl(ndev->base_addr + EDTRR); 669 u32 edtrr = ctrl_inl(ndev->base_addr + EDTRR);
557 /* dmesg */ 670 /* dmesg */
@@ -582,17 +695,23 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
582 ioaddr = ndev->base_addr; 695 ioaddr = ndev->base_addr;
583 spin_lock(&mdp->lock); 696 spin_lock(&mdp->lock);
584 697
698 /* Get interrpt stat */
585 intr_status = ctrl_inl(ioaddr + EESR); 699 intr_status = ctrl_inl(ioaddr + EESR);
586 /* Clear interrupt */ 700 /* Clear interrupt */
587 ctrl_outl(intr_status, ioaddr + EESR); 701 ctrl_outl(intr_status, ioaddr + EESR);
588 702
589 if (intr_status & (EESR_FRC | EESR_RINT8 | 703 if (intr_status & (EESR_FRC | /* Frame recv*/
590 EESR_RINT5 | EESR_RINT4 | EESR_RINT3 | EESR_RINT2 | 704 EESR_RMAF | /* Multi cast address recv*/
591 EESR_RINT1)) 705 EESR_RRF | /* Bit frame recv */
706 EESR_RTLF | /* Long frame recv*/
707 EESR_RTSF | /* short frame recv */
708 EESR_PRE | /* PHY-LSI recv error */
709 EESR_CERF)){ /* recv frame CRC error */
592 sh_eth_rx(ndev); 710 sh_eth_rx(ndev);
593 if (intr_status & (EESR_FTC | 711 }
594 EESR_TINT4 | EESR_TINT3 | EESR_TINT2 | EESR_TINT1)) {
595 712
713 /* Tx Check */
714 if (intr_status & TX_CHECK) {
596 sh_eth_txfree(ndev); 715 sh_eth_txfree(ndev);
597 netif_wake_queue(ndev); 716 netif_wake_queue(ndev);
598 } 717 }
@@ -631,11 +750,32 @@ static void sh_eth_adjust_link(struct net_device *ndev)
631 if (phydev->duplex != mdp->duplex) { 750 if (phydev->duplex != mdp->duplex) {
632 new_state = 1; 751 new_state = 1;
633 mdp->duplex = phydev->duplex; 752 mdp->duplex = phydev->duplex;
753#if defined(CONFIG_CPU_SUBTYPE_SH7763)
754 if (mdp->duplex) { /* FULL */
755 ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_DM,
756 ioaddr + ECMR);
757 } else { /* Half */
758 ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_DM,
759 ioaddr + ECMR);
760 }
761#endif
634 } 762 }
635 763
636 if (phydev->speed != mdp->speed) { 764 if (phydev->speed != mdp->speed) {
637 new_state = 1; 765 new_state = 1;
638 mdp->speed = phydev->speed; 766 mdp->speed = phydev->speed;
767#if defined(CONFIG_CPU_SUBTYPE_SH7763)
768 switch (mdp->speed) {
769 case 10: /* 10BASE */
770 ctrl_outl(GECMR_10, ioaddr + GECMR); break;
771 case 100:/* 100BASE */
772 ctrl_outl(GECMR_100, ioaddr + GECMR); break;
773 case 1000: /* 1000BASE */
774 ctrl_outl(GECMR_1000, ioaddr + GECMR); break;
775 default:
776 break;
777 }
778#endif
639 } 779 }
640 if (mdp->link == PHY_DOWN) { 780 if (mdp->link == PHY_DOWN) {
641 ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_TXF) 781 ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_TXF)
@@ -730,7 +870,7 @@ static int sh_eth_open(struct net_device *ndev)
730 /* Set the timer to check for link beat. */ 870 /* Set the timer to check for link beat. */
731 init_timer(&mdp->timer); 871 init_timer(&mdp->timer);
732 mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */ 872 mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
733 setup_timer(&mdp->timer, sh_eth_timer, ndev); 873 setup_timer(&mdp->timer, sh_eth_timer, (unsigned long)ndev);
734 874
735 return ret; 875 return ret;
736 876
@@ -814,13 +954,15 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
814 txdesc->buffer_length = skb->len; 954 txdesc->buffer_length = skb->len;
815 955
816 if (entry >= TX_RING_SIZE - 1) 956 if (entry >= TX_RING_SIZE - 1)
817 txdesc->status |= cpu_to_le32(TD_TACT | TD_TDLE); 957 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
818 else 958 else
819 txdesc->status |= cpu_to_le32(TD_TACT); 959 txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
820 960
821 mdp->cur_tx++; 961 mdp->cur_tx++;
822 962
823 ctrl_outl(EDTRR_TRNS, ndev->base_addr + EDTRR); 963 if (!(ctrl_inl(ndev->base_addr + EDTRR) & EDTRR_TRNS))
964 ctrl_outl(EDTRR_TRNS, ndev->base_addr + EDTRR);
965
824 ndev->trans_start = jiffies; 966 ndev->trans_start = jiffies;
825 967
826 return 0; 968 return 0;
@@ -877,9 +1019,15 @@ static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
877 ctrl_outl(0, ioaddr + CDCR); /* (write clear) */ 1019 ctrl_outl(0, ioaddr + CDCR); /* (write clear) */
878 mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + LCCR); 1020 mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + LCCR);
879 ctrl_outl(0, ioaddr + LCCR); /* (write clear) */ 1021 ctrl_outl(0, ioaddr + LCCR); /* (write clear) */
1022#if defined(CONFIG_CPU_SUBTYPE_SH7763)
1023 mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CERCR);/* CERCR */
1024 ctrl_outl(0, ioaddr + CERCR); /* (write clear) */
1025 mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CEECR);/* CEECR */
1026 ctrl_outl(0, ioaddr + CEECR); /* (write clear) */
1027#else
880 mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CNDCR); 1028 mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CNDCR);
881 ctrl_outl(0, ioaddr + CNDCR); /* (write clear) */ 1029 ctrl_outl(0, ioaddr + CNDCR); /* (write clear) */
882 1030#endif
883 return &mdp->stats; 1031 return &mdp->stats;
884} 1032}
885 1033
@@ -929,8 +1077,13 @@ static void sh_eth_tsu_init(u32 ioaddr)
929 ctrl_outl(0, ioaddr + TSU_FWSL0); 1077 ctrl_outl(0, ioaddr + TSU_FWSL0);
930 ctrl_outl(0, ioaddr + TSU_FWSL1); 1078 ctrl_outl(0, ioaddr + TSU_FWSL1);
931 ctrl_outl(TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, ioaddr + TSU_FWSLC); 1079 ctrl_outl(TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, ioaddr + TSU_FWSLC);
1080#if defined(CONFIG_CPU_SUBTYPE_SH7763)
1081 ctrl_outl(0, ioaddr + TSU_QTAG0); /* Disable QTAG(0->1) */
1082 ctrl_outl(0, ioaddr + TSU_QTAG1); /* Disable QTAG(1->0) */
1083#else
932 ctrl_outl(0, ioaddr + TSU_QTAGM0); /* Disable QTAG(0->1) */ 1084 ctrl_outl(0, ioaddr + TSU_QTAGM0); /* Disable QTAG(0->1) */
933 ctrl_outl(0, ioaddr + TSU_QTAGM1); /* Disable QTAG(1->0) */ 1085 ctrl_outl(0, ioaddr + TSU_QTAGM1); /* Disable QTAG(1->0) */
1086#endif
934 ctrl_outl(0, ioaddr + TSU_FWSR); /* all interrupt status clear */ 1087 ctrl_outl(0, ioaddr + TSU_FWSR); /* all interrupt status clear */
935 ctrl_outl(0, ioaddr + TSU_FWINMK); /* Disable all interrupt */ 1088 ctrl_outl(0, ioaddr + TSU_FWINMK); /* Disable all interrupt */
936 ctrl_outl(0, ioaddr + TSU_TEN); /* Disable all CAM entry */ 1089 ctrl_outl(0, ioaddr + TSU_TEN); /* Disable all CAM entry */
@@ -1029,6 +1182,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
1029 struct resource *res; 1182 struct resource *res;
1030 struct net_device *ndev = NULL; 1183 struct net_device *ndev = NULL;
1031 struct sh_eth_private *mdp; 1184 struct sh_eth_private *mdp;
1185 struct sh_eth_plat_data *pd;
1032 1186
1033 /* get base addr */ 1187 /* get base addr */
1034 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1188 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1066,8 +1220,11 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
1066 mdp = netdev_priv(ndev); 1220 mdp = netdev_priv(ndev);
1067 spin_lock_init(&mdp->lock); 1221 spin_lock_init(&mdp->lock);
1068 1222
1223 pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data);
1069 /* get PHY ID */ 1224 /* get PHY ID */
1070 mdp->phy_id = (int)pdev->dev.platform_data; 1225 mdp->phy_id = pd->phy;
1226 /* EDMAC endian */
1227 mdp->edmac_endian = pd->edmac_endian;
1071 1228
1072 /* set function */ 1229 /* set function */
1073 ndev->open = sh_eth_open; 1230 ndev->open = sh_eth_open;
@@ -1087,12 +1244,16 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
1087 1244
1088 /* First device only init */ 1245 /* First device only init */
1089 if (!devno) { 1246 if (!devno) {
1247#if defined(ARSTR)
1090 /* reset device */ 1248 /* reset device */
1091 ctrl_outl(ARSTR_ARSTR, ndev->base_addr + ARSTR); 1249 ctrl_outl(ARSTR_ARSTR, ARSTR);
1092 mdelay(1); 1250 mdelay(1);
1251#endif
1093 1252
1253#if defined(SH_TSU_ADDR)
1094 /* TSU init (Init only)*/ 1254 /* TSU init (Init only)*/
1095 sh_eth_tsu_init(SH_TSU_ADDR); 1255 sh_eth_tsu_init(SH_TSU_ADDR);
1256#endif
1096 } 1257 }
1097 1258
1098 /* network device register */ 1259 /* network device register */
@@ -1110,8 +1271,8 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
1110 ndev->name, CARDNAME, (u32) ndev->base_addr); 1271 ndev->name, CARDNAME, (u32) ndev->base_addr);
1111 1272
1112 for (i = 0; i < 5; i++) 1273 for (i = 0; i < 5; i++)
1113 printk(KERN_INFO "%2.2x:", ndev->dev_addr[i]); 1274 printk("%02X:", ndev->dev_addr[i]);
1114 printk(KERN_INFO "%2.2x, IRQ %d.\n", ndev->dev_addr[i], ndev->irq); 1275 printk("%02X, IRQ %d.\n", ndev->dev_addr[i], ndev->irq);
1115 1276
1116 platform_set_drvdata(pdev, ndev); 1277 platform_set_drvdata(pdev, ndev);
1117 1278
diff --git a/drivers/net/sh_eth.h b/drivers/net/sh_eth.h
index e01e1c347715..73bc7181cc18 100644
--- a/drivers/net/sh_eth.h
+++ b/drivers/net/sh_eth.h
@@ -30,120 +30,254 @@
30#include <linux/netdevice.h> 30#include <linux/netdevice.h>
31#include <linux/phy.h> 31#include <linux/phy.h>
32 32
33#include <asm/sh_eth.h>
34
33#define CARDNAME "sh-eth" 35#define CARDNAME "sh-eth"
34#define TX_TIMEOUT (5*HZ) 36#define TX_TIMEOUT (5*HZ)
35 37#define TX_RING_SIZE 64 /* Tx ring size */
36#define TX_RING_SIZE 128 /* Tx ring size */ 38#define RX_RING_SIZE 64 /* Rx ring size */
37#define RX_RING_SIZE 128 /* Rx ring size */
38#define RX_OFFSET 2 /* skb offset */
39#define ETHERSMALL 60 39#define ETHERSMALL 60
40#define PKT_BUF_SZ 1538 40#define PKT_BUF_SZ 1538
41 41
42#ifdef CONFIG_CPU_SUBTYPE_SH7763
43
44#define SH7763_SKB_ALIGN 32
42/* Chip Base Address */ 45/* Chip Base Address */
43#define SH_TSU_ADDR 0xA7000804 46# define SH_TSU_ADDR 0xFFE01800
47# define ARSTR 0xFFE01800
44 48
45/* Chip Registers */ 49/* Chip Registers */
46/* E-DMAC */ 50/* E-DMAC */
47#define EDMR 0x0000 51# define EDSR 0x000
48#define EDTRR 0x0004 52# define EDMR 0x400
49#define EDRRR 0x0008 53# define EDTRR 0x408
50#define TDLAR 0x000C 54# define EDRRR 0x410
51#define RDLAR 0x0010 55# define EESR 0x428
52#define EESR 0x0014 56# define EESIPR 0x430
53#define EESIPR 0x0018 57# define TDLAR 0x010
54#define TRSCER 0x001C 58# define TDFAR 0x014
55#define RMFCR 0x0020 59# define TDFXR 0x018
56#define TFTR 0x0024 60# define TDFFR 0x01C
57#define FDR 0x0028 61# define RDLAR 0x030
58#define RMCR 0x002C 62# define RDFAR 0x034
59#define EDOCR 0x0030 63# define RDFXR 0x038
60#define FCFTR 0x0034 64# define RDFFR 0x03C
61#define RPADIR 0x0038 65# define TRSCER 0x438
62#define TRIMD 0x003C 66# define RMFCR 0x440
63#define RBWAR 0x0040 67# define TFTR 0x448
64#define RDFAR 0x0044 68# define FDR 0x450
65#define TBRAR 0x004C 69# define RMCR 0x458
66#define TDFAR 0x0050 70# define RPADIR 0x460
71# define FCFTR 0x468
72
73/* Ether Register */
74# define ECMR 0x500
75# define ECSR 0x510
76# define ECSIPR 0x518
77# define PIR 0x520
78# define PSR 0x528
79# define PIPR 0x52C
80# define RFLR 0x508
81# define APR 0x554
82# define MPR 0x558
83# define PFTCR 0x55C
84# define PFRCR 0x560
85# define TPAUSER 0x564
86# define GECMR 0x5B0
87# define BCULR 0x5B4
88# define MAHR 0x5C0
89# define MALR 0x5C8
90# define TROCR 0x700
91# define CDCR 0x708
92# define LCCR 0x710
93# define CEFCR 0x740
94# define FRECR 0x748
95# define TSFRCR 0x750
96# define TLFRCR 0x758
97# define RFCR 0x760
98# define CERCR 0x768
99# define CEECR 0x770
100# define MAFCR 0x778
101
102/* TSU Absolute Address */
103# define TSU_CTRST 0x004
104# define TSU_FWEN0 0x010
105# define TSU_FWEN1 0x014
106# define TSU_FCM 0x18
107# define TSU_BSYSL0 0x20
108# define TSU_BSYSL1 0x24
109# define TSU_PRISL0 0x28
110# define TSU_PRISL1 0x2C
111# define TSU_FWSL0 0x30
112# define TSU_FWSL1 0x34
113# define TSU_FWSLC 0x38
114# define TSU_QTAG0 0x40
115# define TSU_QTAG1 0x44
116# define TSU_FWSR 0x50
117# define TSU_FWINMK 0x54
118# define TSU_ADQT0 0x48
119# define TSU_ADQT1 0x4C
120# define TSU_VTAG0 0x58
121# define TSU_VTAG1 0x5C
122# define TSU_ADSBSY 0x60
123# define TSU_TEN 0x64
124# define TSU_POST1 0x70
125# define TSU_POST2 0x74
126# define TSU_POST3 0x78
127# define TSU_POST4 0x7C
128# define TSU_ADRH0 0x100
129# define TSU_ADRL0 0x104
130# define TSU_ADRH31 0x1F8
131# define TSU_ADRL31 0x1FC
132
133# define TXNLCR0 0x80
134# define TXALCR0 0x84
135# define RXNLCR0 0x88
136# define RXALCR0 0x8C
137# define FWNLCR0 0x90
138# define FWALCR0 0x94
139# define TXNLCR1 0xA0
140# define TXALCR1 0xA4
141# define RXNLCR1 0xA8
142# define RXALCR1 0xAC
143# define FWNLCR1 0xB0
144# define FWALCR1 0x40
145
146#else /* CONFIG_CPU_SUBTYPE_SH7763 */
147# define RX_OFFSET 2 /* skb offset */
148#ifndef CONFIG_CPU_SUBTYPE_SH7619
149/* Chip base address */
150# define SH_TSU_ADDR 0xA7000804
151# define ARSTR 0xA7000800
152#endif
153/* Chip Registers */
154/* E-DMAC */
155# define EDMR 0x0000
156# define EDTRR 0x0004
157# define EDRRR 0x0008
158# define TDLAR 0x000C
159# define RDLAR 0x0010
160# define EESR 0x0014
161# define EESIPR 0x0018
162# define TRSCER 0x001C
163# define RMFCR 0x0020
164# define TFTR 0x0024
165# define FDR 0x0028
166# define RMCR 0x002C
167# define EDOCR 0x0030
168# define FCFTR 0x0034
169# define RPADIR 0x0038
170# define TRIMD 0x003C
171# define RBWAR 0x0040
172# define RDFAR 0x0044
173# define TBRAR 0x004C
174# define TDFAR 0x0050
175
67/* Ether Register */ 176/* Ether Register */
68#define ECMR 0x0160 177# define ECMR 0x0160
69#define ECSR 0x0164 178# define ECSR 0x0164
70#define ECSIPR 0x0168 179# define ECSIPR 0x0168
71#define PIR 0x016C 180# define PIR 0x016C
72#define MAHR 0x0170 181# define MAHR 0x0170
73#define MALR 0x0174 182# define MALR 0x0174
74#define RFLR 0x0178 183# define RFLR 0x0178
75#define PSR 0x017C 184# define PSR 0x017C
76#define TROCR 0x0180 185# define TROCR 0x0180
77#define CDCR 0x0184 186# define CDCR 0x0184
78#define LCCR 0x0188 187# define LCCR 0x0188
79#define CNDCR 0x018C 188# define CNDCR 0x018C
80#define CEFCR 0x0194 189# define CEFCR 0x0194
81#define FRECR 0x0198 190# define FRECR 0x0198
82#define TSFRCR 0x019C 191# define TSFRCR 0x019C
83#define TLFRCR 0x01A0 192# define TLFRCR 0x01A0
84#define RFCR 0x01A4 193# define RFCR 0x01A4
85#define MAFCR 0x01A8 194# define MAFCR 0x01A8
86#define IPGR 0x01B4 195# define IPGR 0x01B4
87#if defined(CONFIG_CPU_SUBTYPE_SH7710) 196# if defined(CONFIG_CPU_SUBTYPE_SH7710)
88#define APR 0x01B8 197# define APR 0x01B8
89#define MPR 0x01BC 198# define MPR 0x01BC
90#define TPAUSER 0x1C4 199# define TPAUSER 0x1C4
91#define BCFR 0x1CC 200# define BCFR 0x1CC
92#endif /* CONFIG_CPU_SH7710 */ 201# endif /* CONFIG_CPU_SH7710 */
93
94#define ARSTR 0x0800
95 202
96/* TSU */ 203/* TSU */
97#define TSU_CTRST 0x004 204# define TSU_CTRST 0x004
98#define TSU_FWEN0 0x010 205# define TSU_FWEN0 0x010
99#define TSU_FWEN1 0x014 206# define TSU_FWEN1 0x014
100#define TSU_FCM 0x018 207# define TSU_FCM 0x018
101#define TSU_BSYSL0 0x020 208# define TSU_BSYSL0 0x020
102#define TSU_BSYSL1 0x024 209# define TSU_BSYSL1 0x024
103#define TSU_PRISL0 0x028 210# define TSU_PRISL0 0x028
104#define TSU_PRISL1 0x02C 211# define TSU_PRISL1 0x02C
105#define TSU_FWSL0 0x030 212# define TSU_FWSL0 0x030
106#define TSU_FWSL1 0x034 213# define TSU_FWSL1 0x034
107#define TSU_FWSLC 0x038 214# define TSU_FWSLC 0x038
108#define TSU_QTAGM0 0x040 215# define TSU_QTAGM0 0x040
109#define TSU_QTAGM1 0x044 216# define TSU_QTAGM1 0x044
110#define TSU_ADQT0 0x048 217# define TSU_ADQT0 0x048
111#define TSU_ADQT1 0x04C 218# define TSU_ADQT1 0x04C
112#define TSU_FWSR 0x050 219# define TSU_FWSR 0x050
113#define TSU_FWINMK 0x054 220# define TSU_FWINMK 0x054
114#define TSU_ADSBSY 0x060 221# define TSU_ADSBSY 0x060
115#define TSU_TEN 0x064 222# define TSU_TEN 0x064
116#define TSU_POST1 0x070 223# define TSU_POST1 0x070
117#define TSU_POST2 0x074 224# define TSU_POST2 0x074
118#define TSU_POST3 0x078 225# define TSU_POST3 0x078
119#define TSU_POST4 0x07C 226# define TSU_POST4 0x07C
120#define TXNLCR0 0x080 227# define TXNLCR0 0x080
121#define TXALCR0 0x084 228# define TXALCR0 0x084
122#define RXNLCR0 0x088 229# define RXNLCR0 0x088
123#define RXALCR0 0x08C 230# define RXALCR0 0x08C
124#define FWNLCR0 0x090 231# define FWNLCR0 0x090
125#define FWALCR0 0x094 232# define FWALCR0 0x094
126#define TXNLCR1 0x0A0 233# define TXNLCR1 0x0A0
127#define TXALCR1 0x0A4 234# define TXALCR1 0x0A4
128#define RXNLCR1 0x0A8 235# define RXNLCR1 0x0A8
129#define RXALCR1 0x0AC 236# define RXALCR1 0x0AC
130#define FWNLCR1 0x0B0 237# define FWNLCR1 0x0B0
131#define FWALCR1 0x0B4 238# define FWALCR1 0x0B4
132 239
133#define TSU_ADRH0 0x0100 240#define TSU_ADRH0 0x0100
134#define TSU_ADRL0 0x0104 241#define TSU_ADRL0 0x0104
135#define TSU_ADRL31 0x01FC 242#define TSU_ADRL31 0x01FC
136 243
137/* Register's bits */ 244#endif /* CONFIG_CPU_SUBTYPE_SH7763 */
245
246/*
247 * Register's bits
248 */
249#ifdef CONFIG_CPU_SUBTYPE_SH7763
250/* EDSR */
251enum EDSR_BIT {
252 EDSR_ENT = 0x01, EDSR_ENR = 0x02,
253};
254#define EDSR_ENALL (EDSR_ENT|EDSR_ENR)
255
256/* GECMR */
257enum GECMR_BIT {
258 GECMR_10 = 0x0, GECMR_100 = 0x04, GECMR_1000 = 0x01,
259};
260#endif
138 261
139/* EDMR */ 262/* EDMR */
140enum DMAC_M_BIT { 263enum DMAC_M_BIT {
141 EDMR_DL1 = 0x20, EDMR_DL0 = 0x10, EDMR_SRST = 0x01, 264 EDMR_DL1 = 0x20, EDMR_DL0 = 0x10,
265#ifdef CONFIG_CPU_SUBTYPE_SH7763
266 EDMR_SRST = 0x03,
267 EMDR_DESC_R = 0x30, /* Descriptor reserve size */
268 EDMR_EL = 0x40, /* Litte endian */
269#else /* CONFIG_CPU_SUBTYPE_SH7763 */
270 EDMR_SRST = 0x01,
271#endif
142}; 272};
143 273
144/* EDTRR */ 274/* EDTRR */
145enum DMAC_T_BIT { 275enum DMAC_T_BIT {
276#ifdef CONFIG_CPU_SUBTYPE_SH7763
277 EDTRR_TRNS = 0x03,
278#else
146 EDTRR_TRNS = 0x01, 279 EDTRR_TRNS = 0x01,
280#endif
147}; 281};
148 282
149/* EDRRR*/ 283/* EDRRR*/
@@ -173,21 +307,47 @@ enum PHY_STATUS_BIT { PHY_ST_LINK = 0x01, };
173 307
174/* EESR */ 308/* EESR */
175enum EESR_BIT { 309enum EESR_BIT {
176 EESR_TWB = 0x40000000, EESR_TABT = 0x04000000, 310#ifndef CONFIG_CPU_SUBTYPE_SH7763
311 EESR_TWB = 0x40000000,
312#else
313 EESR_TWB = 0xC0000000,
314 EESR_TC1 = 0x20000000,
315 EESR_TUC = 0x10000000,
316 EESR_ROC = 0x80000000,
317#endif
318 EESR_TABT = 0x04000000,
177 EESR_RABT = 0x02000000, EESR_RFRMER = 0x01000000, 319 EESR_RABT = 0x02000000, EESR_RFRMER = 0x01000000,
178 EESR_ADE = 0x00800000, EESR_ECI = 0x00400000, 320#ifndef CONFIG_CPU_SUBTYPE_SH7763
179 EESR_FTC = 0x00200000, EESR_TDE = 0x00100000, 321 EESR_ADE = 0x00800000,
180 EESR_TFE = 0x00080000, EESR_FRC = 0x00040000, 322#endif
181 EESR_RDE = 0x00020000, EESR_RFE = 0x00010000, 323 EESR_ECI = 0x00400000,
182 EESR_TINT4 = 0x00000800, EESR_TINT3 = 0x00000400, 324 EESR_FTC = 0x00200000, EESR_TDE = 0x00100000,
183 EESR_TINT2 = 0x00000200, EESR_TINT1 = 0x00000100, 325 EESR_TFE = 0x00080000, EESR_FRC = 0x00040000,
184 EESR_RINT8 = 0x00000080, EESR_RINT5 = 0x00000010, 326 EESR_RDE = 0x00020000, EESR_RFE = 0x00010000,
185 EESR_RINT4 = 0x00000008, EESR_RINT3 = 0x00000004, 327#ifndef CONFIG_CPU_SUBTYPE_SH7763
186 EESR_RINT2 = 0x00000002, EESR_RINT1 = 0x00000001, 328 EESR_CND = 0x00000800,
187}; 329#endif
188 330 EESR_DLC = 0x00000400,
189#define EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE \ 331 EESR_CD = 0x00000200, EESR_RTO = 0x00000100,
332 EESR_RMAF = 0x00000080, EESR_CEEF = 0x00000040,
333 EESR_CELF = 0x00000020, EESR_RRF = 0x00000010,
334 EESR_RTLF = 0x00000008, EESR_RTSF = 0x00000004,
335 EESR_PRE = 0x00000002, EESR_CERF = 0x00000001,
336};
337
338
339#ifdef CONFIG_CPU_SUBTYPE_SH7763
340# define TX_CHECK (EESR_TC1 | EESR_FTC)
341# define EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE \
342 | EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI)
343# define TX_ERROR_CEHCK (EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE)
344
345#else
346# define TX_CHECK (EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO)
347# define EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE \
190 | EESR_RFRMER | EESR_ADE | EESR_TFE | EESR_TDE | EESR_ECI) 348 | EESR_RFRMER | EESR_ADE | EESR_TFE | EESR_TDE | EESR_ECI)
349# define TX_ERROR_CEHCK (EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE)
350#endif
191 351
192/* EESIPR */ 352/* EESIPR */
193enum DMAC_IM_BIT { 353enum DMAC_IM_BIT {
@@ -207,8 +367,8 @@ enum DMAC_IM_BIT {
207 367
208/* Receive descriptor bit */ 368/* Receive descriptor bit */
209enum RD_STS_BIT { 369enum RD_STS_BIT {
210 RD_RACT = 0x80000000, RC_RDEL = 0x40000000, 370 RD_RACT = 0x80000000, RD_RDEL = 0x40000000,
211 RC_RFP1 = 0x20000000, RC_RFP0 = 0x10000000, 371 RD_RFP1 = 0x20000000, RD_RFP0 = 0x10000000,
212 RD_RFE = 0x08000000, RD_RFS10 = 0x00000200, 372 RD_RFE = 0x08000000, RD_RFS10 = 0x00000200,
213 RD_RFS9 = 0x00000100, RD_RFS8 = 0x00000080, 373 RD_RFS9 = 0x00000100, RD_RFS8 = 0x00000080,
214 RD_RFS7 = 0x00000040, RD_RFS6 = 0x00000020, 374 RD_RFS7 = 0x00000040, RD_RFS6 = 0x00000020,
@@ -216,9 +376,9 @@ enum RD_STS_BIT {
216 RD_RFS3 = 0x00000004, RD_RFS2 = 0x00000002, 376 RD_RFS3 = 0x00000004, RD_RFS2 = 0x00000002,
217 RD_RFS1 = 0x00000001, 377 RD_RFS1 = 0x00000001,
218}; 378};
219#define RDF1ST RC_RFP1 379#define RDF1ST RD_RFP1
220#define RDFEND RC_RFP0 380#define RDFEND RD_RFP0
221#define RD_RFP (RC_RFP1|RC_RFP0) 381#define RD_RFP (RD_RFP1|RD_RFP0)
222 382
223/* FCFTR */ 383/* FCFTR */
224enum FCFTR_BIT { 384enum FCFTR_BIT {
@@ -227,11 +387,16 @@ enum FCFTR_BIT {
227 FCFTR_RFD1 = 0x00000002, FCFTR_RFD0 = 0x00000001, 387 FCFTR_RFD1 = 0x00000002, FCFTR_RFD0 = 0x00000001,
228}; 388};
229#define FIFO_F_D_RFF (FCFTR_RFF2|FCFTR_RFF1|FCFTR_RFF0) 389#define FIFO_F_D_RFF (FCFTR_RFF2|FCFTR_RFF1|FCFTR_RFF0)
390#ifndef CONFIG_CPU_SUBTYPE_SH7619
230#define FIFO_F_D_RFD (FCFTR_RFD2|FCFTR_RFD1|FCFTR_RFD0) 391#define FIFO_F_D_RFD (FCFTR_RFD2|FCFTR_RFD1|FCFTR_RFD0)
392#else
393#define FIFO_F_D_RFD (FCFTR_RFD0)
394#endif
231 395
232/* Transfer descriptor bit */ 396/* Transfer descriptor bit */
233enum TD_STS_BIT { 397enum TD_STS_BIT {
234 TD_TACT = 0x80000000, TD_TDLE = 0x40000000, TD_TFP1 = 0x20000000, 398 TD_TACT = 0x80000000,
399 TD_TDLE = 0x40000000, TD_TFP1 = 0x20000000,
235 TD_TFP0 = 0x10000000, 400 TD_TFP0 = 0x10000000,
236}; 401};
237#define TDF1ST TD_TFP1 402#define TDF1ST TD_TFP1
@@ -242,6 +407,10 @@ enum TD_STS_BIT {
242enum RECV_RST_BIT { RMCR_RST = 0x01, }; 407enum RECV_RST_BIT { RMCR_RST = 0x01, };
243/* ECMR */ 408/* ECMR */
244enum FELIC_MODE_BIT { 409enum FELIC_MODE_BIT {
410#ifdef CONFIG_CPU_SUBTYPE_SH7763
411 ECMR_TRCCM = 0x04000000, ECMR_RCSC = 0x00800000,
412 ECMR_DPAD = 0x00200000, ECMR_RZPF = 0x00100000,
413#endif
245 ECMR_ZPF = 0x00080000, ECMR_PFR = 0x00040000, ECMR_RXF = 0x00020000, 414 ECMR_ZPF = 0x00080000, ECMR_PFR = 0x00040000, ECMR_RXF = 0x00020000,
246 ECMR_TXF = 0x00010000, ECMR_MCT = 0x00002000, ECMR_PRCEF = 0x00001000, 415 ECMR_TXF = 0x00010000, ECMR_MCT = 0x00002000, ECMR_PRCEF = 0x00001000,
247 ECMR_PMDE = 0x00000200, ECMR_RE = 0x00000040, ECMR_TE = 0x00000020, 416 ECMR_PMDE = 0x00000200, ECMR_RE = 0x00000040, ECMR_TE = 0x00000020,
@@ -249,18 +418,47 @@ enum FELIC_MODE_BIT {
249 ECMR_PRM = 0x00000001, 418 ECMR_PRM = 0x00000001,
250}; 419};
251 420
421#ifdef CONFIG_CPU_SUBTYPE_SH7763
422#define ECMR_CHG_DM (ECMR_TRCCM | ECMR_RZPF | ECMR_ZPF |\
423 ECMR_PFR | ECMR_RXF | ECMR_TXF | ECMR_MCT)
424#elif CONFIG_CPU_SUBTYPE_SH7619
425#define ECMR_CHG_DM (ECMR_ZPF | ECMR_PFR | ECMR_RXF | ECMR_TXF)
426#else
427#define ECMR_CHG_DM (ECMR_ZPF | ECMR_PFR | ECMR_RXF | ECMR_TXF | ECMR_MCT)
428#endif
429
252/* ECSR */ 430/* ECSR */
253enum ECSR_STATUS_BIT { 431enum ECSR_STATUS_BIT {
254 ECSR_BRCRX = 0x20, ECSR_PSRTO = 0x10, ECSR_LCHNG = 0x04, 432#ifndef CONFIG_CPU_SUBTYPE_SH7763
433 ECSR_BRCRX = 0x20, ECSR_PSRTO = 0x10,
434#endif
435 ECSR_LCHNG = 0x04,
255 ECSR_MPD = 0x02, ECSR_ICD = 0x01, 436 ECSR_MPD = 0x02, ECSR_ICD = 0x01,
256}; 437};
257 438
439#ifdef CONFIG_CPU_SUBTYPE_SH7763
440# define ECSR_INIT (ECSR_ICD | ECSIPR_MPDIP)
441#else
442# define ECSR_INIT (ECSR_BRCRX | ECSR_PSRTO | \
443 ECSR_LCHNG | ECSR_ICD | ECSIPR_MPDIP)
444#endif
445
258/* ECSIPR */ 446/* ECSIPR */
259enum ECSIPR_STATUS_MASK_BIT { 447enum ECSIPR_STATUS_MASK_BIT {
260 ECSIPR_BRCRXIP = 0x20, ECSIPR_PSRTOIP = 0x10, ECSIPR_LCHNGIP = 0x04, 448#ifndef CONFIG_CPU_SUBTYPE_SH7763
449 ECSIPR_BRCRXIP = 0x20, ECSIPR_PSRTOIP = 0x10,
450#endif
451 ECSIPR_LCHNGIP = 0x04,
261 ECSIPR_MPDIP = 0x02, ECSIPR_ICDIP = 0x01, 452 ECSIPR_MPDIP = 0x02, ECSIPR_ICDIP = 0x01,
262}; 453};
263 454
455#ifdef CONFIG_CPU_SUBTYPE_SH7763
456# define ECSIPR_INIT (ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP)
457#else
458# define ECSIPR_INIT (ECSIPR_BRCRXIP | ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | \
459 ECSIPR_ICDIP | ECSIPR_MPDIP)
460#endif
461
264/* APR */ 462/* APR */
265enum APR_BIT { 463enum APR_BIT {
266 APR_AP = 0x00000001, 464 APR_AP = 0x00000001,
@@ -285,9 +483,22 @@ enum RPADIR_BIT {
285 RPADIR_PADR = 0x0003f, 483 RPADIR_PADR = 0x0003f,
286}; 484};
287 485
486#if defined(CONFIG_CPU_SUBTYPE_SH7763)
487# define RPADIR_INIT (0x00)
488#else
489# define RPADIR_INIT (RPADIR_PADS1)
490#endif
491
492/* RFLR */
493#define RFLR_VALUE 0x1000
494
288/* FDR */ 495/* FDR */
289enum FIFO_SIZE_BIT { 496enum FIFO_SIZE_BIT {
497#ifndef CONFIG_CPU_SUBTYPE_SH7619
290 FIFO_SIZE_T = 0x00000700, FIFO_SIZE_R = 0x00000007, 498 FIFO_SIZE_T = 0x00000700, FIFO_SIZE_R = 0x00000007,
499#else
500 FIFO_SIZE_T = 0x00000100, FIFO_SIZE_R = 0x00000001,
501#endif
291}; 502};
292enum phy_offsets { 503enum phy_offsets {
293 PHY_CTRL = 0, PHY_STAT = 1, PHY_IDT1 = 2, PHY_IDT2 = 3, 504 PHY_CTRL = 0, PHY_STAT = 1, PHY_IDT1 = 2, PHY_IDT2 = 3,
@@ -316,7 +527,7 @@ enum PHY_ANA_BIT {
316 PHY_A_NP = 0x8000, PHY_A_ACK = 0x4000, PHY_A_RF = 0x2000, 527 PHY_A_NP = 0x8000, PHY_A_ACK = 0x4000, PHY_A_RF = 0x2000,
317 PHY_A_FCS = 0x0400, PHY_A_T4 = 0x0200, PHY_A_FDX = 0x0100, 528 PHY_A_FCS = 0x0400, PHY_A_T4 = 0x0200, PHY_A_FDX = 0x0100,
318 PHY_A_HDX = 0x0080, PHY_A_10FDX = 0x0040, PHY_A_10HDX = 0x0020, 529 PHY_A_HDX = 0x0080, PHY_A_10FDX = 0x0040, PHY_A_10HDX = 0x0020,
319 PHY_A_SEL = 0x001f, 530 PHY_A_SEL = 0x001e,
320}; 531};
321/* PHY_ANL */ 532/* PHY_ANL */
322enum PHY_ANL_BIT { 533enum PHY_ANL_BIT {
@@ -403,7 +614,7 @@ struct sh_eth_txdesc {
403#endif 614#endif
404 u32 addr; /* TD2 */ 615 u32 addr; /* TD2 */
405 u32 pad1; /* padding data */ 616 u32 pad1; /* padding data */
406}; 617} __attribute__((aligned(2), packed));
407 618
408/* 619/*
409 * The sh ether Rx buffer descriptors. 620 * The sh ether Rx buffer descriptors.
@@ -420,7 +631,7 @@ struct sh_eth_rxdesc {
420#endif 631#endif
421 u32 addr; /* RD2 */ 632 u32 addr; /* RD2 */
422 u32 pad0; /* padding data */ 633 u32 pad0; /* padding data */
423}; 634} __attribute__((aligned(2), packed));
424 635
425struct sh_eth_private { 636struct sh_eth_private {
426 dma_addr_t rx_desc_dma; 637 dma_addr_t rx_desc_dma;
@@ -435,6 +646,7 @@ struct sh_eth_private {
435 u32 cur_rx, dirty_rx; /* Producer/consumer ring indices */ 646 u32 cur_rx, dirty_rx; /* Producer/consumer ring indices */
436 u32 cur_tx, dirty_tx; 647 u32 cur_tx, dirty_tx;
437 u32 rx_buf_sz; /* Based on MTU+slack. */ 648 u32 rx_buf_sz; /* Based on MTU+slack. */
649 int edmac_endian;
438 /* MII transceiver section. */ 650 /* MII transceiver section. */
439 u32 phy_id; /* PHY ID */ 651 u32 phy_id; /* PHY ID */
440 struct mii_bus *mii_bus; /* MDIO bus control */ 652 struct mii_bus *mii_bus; /* MDIO bus control */
@@ -449,6 +661,10 @@ struct sh_eth_private {
449 struct net_device_stats tsu_stats; /* TSU forward status */ 661 struct net_device_stats tsu_stats; /* TSU forward status */
450}; 662};
451 663
664#ifdef CONFIG_CPU_SUBTYPE_SH7763
665/* SH7763 has endian control register */
666#define swaps(x, y)
667#else
452static void swaps(char *src, int len) 668static void swaps(char *src, int len)
453{ 669{
454#ifdef __LITTLE_ENDIAN__ 670#ifdef __LITTLE_ENDIAN__
@@ -460,5 +676,5 @@ static void swaps(char *src, int len)
460 *p = swab32(*p); 676 *p = swab32(*p);
461#endif 677#endif
462} 678}
463 679#endif /* CONFIG_CPU_SUBTYPE_SH7763 */
464#endif 680#endif
diff --git a/drivers/net/skfp/smt.c b/drivers/net/skfp/smt.c
index ffbfb1b79f97..805383b33d3c 100644
--- a/drivers/net/skfp/smt.c
+++ b/drivers/net/skfp/smt.c
@@ -19,6 +19,7 @@
19#include "h/smc.h" 19#include "h/smc.h"
20#include "h/smt_p.h" 20#include "h/smt_p.h"
21#include <linux/bitrev.h> 21#include <linux/bitrev.h>
22#include <linux/kernel.h>
22 23
23#define KERNEL 24#define KERNEL
24#include "h/smtstate.h" 25#include "h/smtstate.h"
@@ -1730,20 +1731,18 @@ void fddi_send_antc(struct s_smc *smc, struct fddi_addr *dest)
1730#endif 1731#endif
1731 1732
1732#ifdef DEBUG 1733#ifdef DEBUG
1733#define hextoasc(x) "0123456789abcdef"[x]
1734
1735char *addr_to_string(struct fddi_addr *addr) 1734char *addr_to_string(struct fddi_addr *addr)
1736{ 1735{
1737 int i ; 1736 int i ;
1738 static char string[6*3] = "****" ; 1737 static char string[6*3] = "****" ;
1739 1738
1740 for (i = 0 ; i < 6 ; i++) { 1739 for (i = 0 ; i < 6 ; i++) {
1741 string[i*3] = hextoasc((addr->a[i]>>4)&0xf) ; 1740 string[i * 3] = hex_asc_hi(addr->a[i]);
1742 string[i*3+1] = hextoasc((addr->a[i])&0xf) ; 1741 string[i * 3 + 1] = hex_asc_lo(addr->a[i]);
1743 string[i*3+2] = ':' ; 1742 string[i * 3 + 2] = ':';
1744 } 1743 }
1745 string[5*3+2] = 0 ; 1744 string[5 * 3 + 2] = 0;
1746 return(string) ; 1745 return(string);
1747} 1746}
1748#endif 1747#endif
1749 1748
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 5257cf464f1a..7d29edcd40b4 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -275,86 +275,6 @@ static void sky2_power_aux(struct sky2_hw *hw)
275 PC_VAUX_ON | PC_VCC_OFF)); 275 PC_VAUX_ON | PC_VCC_OFF));
276} 276}
277 277
278static void sky2_power_state(struct sky2_hw *hw, pci_power_t state)
279{
280 u16 power_control = sky2_pci_read16(hw, hw->pm_cap + PCI_PM_CTRL);
281 int pex = pci_find_capability(hw->pdev, PCI_CAP_ID_EXP);
282 u32 reg;
283
284 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
285
286 switch (state) {
287 case PCI_D0:
288 break;
289
290 case PCI_D1:
291 power_control |= 1;
292 break;
293
294 case PCI_D2:
295 power_control |= 2;
296 break;
297
298 case PCI_D3hot:
299 case PCI_D3cold:
300 power_control |= 3;
301 if (hw->flags & SKY2_HW_ADV_POWER_CTL) {
302 /* additional power saving measurements */
303 reg = sky2_pci_read32(hw, PCI_DEV_REG4);
304
305 /* set gating core clock for LTSSM in L1 state */
306 reg |= P_PEX_LTSSM_STAT(P_PEX_LTSSM_L1_STAT) |
307 /* auto clock gated scheme controlled by CLKREQ */
308 P_ASPM_A1_MODE_SELECT |
309 /* enable Gate Root Core Clock */
310 P_CLK_GATE_ROOT_COR_ENA;
311
312 if (pex && (hw->flags & SKY2_HW_CLK_POWER)) {
313 /* enable Clock Power Management (CLKREQ) */
314 u16 ctrl = sky2_pci_read16(hw, pex + PCI_EXP_DEVCTL);
315
316 ctrl |= PCI_EXP_DEVCTL_AUX_PME;
317 sky2_pci_write16(hw, pex + PCI_EXP_DEVCTL, ctrl);
318 } else
319 /* force CLKREQ Enable in Our4 (A1b only) */
320 reg |= P_ASPM_FORCE_CLKREQ_ENA;
321
322 /* set Mask Register for Release/Gate Clock */
323 sky2_pci_write32(hw, PCI_DEV_REG5,
324 P_REL_PCIE_EXIT_L1_ST | P_GAT_PCIE_ENTER_L1_ST |
325 P_REL_PCIE_RX_EX_IDLE | P_GAT_PCIE_RX_EL_IDLE |
326 P_REL_GPHY_LINK_UP | P_GAT_GPHY_LINK_DOWN);
327 } else
328 sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_CLK_HALT);
329
330 /* put CPU into reset state */
331 sky2_write8(hw, B28_Y2_ASF_STAT_CMD, HCU_CCSR_ASF_RESET);
332 if (hw->chip_id == CHIP_ID_YUKON_SUPR && hw->chip_rev == CHIP_REV_YU_SU_A0)
333 /* put CPU into halt state */
334 sky2_write8(hw, B28_Y2_ASF_STAT_CMD, HCU_CCSR_ASF_HALTED);
335
336 if (pex && !(hw->flags & SKY2_HW_RAM_BUFFER)) {
337 reg = sky2_pci_read32(hw, PCI_DEV_REG1);
338 /* force to PCIe L1 */
339 reg |= PCI_FORCE_PEX_L1;
340 sky2_pci_write32(hw, PCI_DEV_REG1, reg);
341 }
342 break;
343
344 default:
345 dev_warn(&hw->pdev->dev, PFX "Invalid power state (%d) ",
346 state);
347 return;
348 }
349
350 power_control |= PCI_PM_CTRL_PME_ENABLE;
351 /* Finally, set the new power state. */
352 sky2_pci_write32(hw, hw->pm_cap + PCI_PM_CTRL, power_control);
353
354 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
355 sky2_pci_read32(hw, B0_CTST);
356}
357
358static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port) 278static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port)
359{ 279{
360 u16 reg; 280 u16 reg;
@@ -709,6 +629,11 @@ static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port)
709 sky2_pci_write32(hw, PCI_DEV_REG1, reg1); 629 sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
710 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 630 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
711 sky2_pci_read32(hw, PCI_DEV_REG1); 631 sky2_pci_read32(hw, PCI_DEV_REG1);
632
633 if (hw->chip_id == CHIP_ID_YUKON_FE)
634 gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_ANE);
635 else if (hw->flags & SKY2_HW_ADV_POWER_CTL)
636 sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
712} 637}
713 638
714static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port) 639static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port)
@@ -2855,10 +2780,6 @@ static int __devinit sky2_init(struct sky2_hw *hw)
2855 hw->flags = SKY2_HW_GIGABIT 2780 hw->flags = SKY2_HW_GIGABIT
2856 | SKY2_HW_NEWER_PHY 2781 | SKY2_HW_NEWER_PHY
2857 | SKY2_HW_ADV_POWER_CTL; 2782 | SKY2_HW_ADV_POWER_CTL;
2858
2859 /* check for Rev. A1 dev 4200 */
2860 if (sky2_read16(hw, Q_ADDR(Q_XA1, Q_WM)) == 0)
2861 hw->flags |= SKY2_HW_CLK_POWER;
2862 break; 2783 break;
2863 2784
2864 case CHIP_ID_YUKON_EX: 2785 case CHIP_ID_YUKON_EX:
@@ -2914,12 +2835,6 @@ static int __devinit sky2_init(struct sky2_hw *hw)
2914 if (hw->pmd_type == 'L' || hw->pmd_type == 'S' || hw->pmd_type == 'P') 2835 if (hw->pmd_type == 'L' || hw->pmd_type == 'S' || hw->pmd_type == 'P')
2915 hw->flags |= SKY2_HW_FIBRE_PHY; 2836 hw->flags |= SKY2_HW_FIBRE_PHY;
2916 2837
2917 hw->pm_cap = pci_find_capability(hw->pdev, PCI_CAP_ID_PM);
2918 if (hw->pm_cap == 0) {
2919 dev_err(&hw->pdev->dev, "cannot find PowerManagement capability\n");
2920 return -EIO;
2921 }
2922
2923 hw->ports = 1; 2838 hw->ports = 1;
2924 t8 = sky2_read8(hw, B2_Y2_HW_RES); 2839 t8 = sky2_read8(hw, B2_Y2_HW_RES);
2925 if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) { 2840 if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) {
@@ -4512,7 +4427,7 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
4512 4427
4513 pci_save_state(pdev); 4428 pci_save_state(pdev);
4514 pci_enable_wake(pdev, pci_choose_state(pdev, state), wol); 4429 pci_enable_wake(pdev, pci_choose_state(pdev, state), wol);
4515 sky2_power_state(hw, pci_choose_state(pdev, state)); 4430 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4516 4431
4517 return 0; 4432 return 0;
4518} 4433}
@@ -4525,7 +4440,9 @@ static int sky2_resume(struct pci_dev *pdev)
4525 if (!hw) 4440 if (!hw)
4526 return 0; 4441 return 0;
4527 4442
4528 sky2_power_state(hw, PCI_D0); 4443 err = pci_set_power_state(pdev, PCI_D0);
4444 if (err)
4445 goto out;
4529 4446
4530 err = pci_restore_state(pdev); 4447 err = pci_restore_state(pdev);
4531 if (err) 4448 if (err)
@@ -4595,7 +4512,7 @@ static void sky2_shutdown(struct pci_dev *pdev)
4595 pci_enable_wake(pdev, PCI_D3cold, wol); 4512 pci_enable_wake(pdev, PCI_D3cold, wol);
4596 4513
4597 pci_disable_device(pdev); 4514 pci_disable_device(pdev);
4598 sky2_power_state(hw, PCI_D3hot); 4515 pci_set_power_state(pdev, PCI_D3hot);
4599} 4516}
4600 4517
4601static struct pci_driver sky2_driver = { 4518static struct pci_driver sky2_driver = {
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 4d9c4a19bb85..92fb24b27d45 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -2072,9 +2072,7 @@ struct sky2_hw {
2072#define SKY2_HW_NEW_LE 0x00000020 /* new LSOv2 format */ 2072#define SKY2_HW_NEW_LE 0x00000020 /* new LSOv2 format */
2073#define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */ 2073#define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */
2074#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */ 2074#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */
2075#define SKY2_HW_CLK_POWER 0x00000100 /* clock power management */
2076 2075
2077 int pm_cap;
2078 u8 chip_id; 2076 u8 chip_id;
2079 u8 chip_rev; 2077 u8 chip_rev;
2080 u8 pmd_type; 2078 u8 pmd_type;
diff --git a/drivers/net/smc911x.h b/drivers/net/smc911x.h
index 76c17c28fab4..2abfc2845198 100644
--- a/drivers/net/smc911x.h
+++ b/drivers/net/smc911x.h
@@ -222,7 +222,7 @@ static inline void SMC_outsl(struct smc911x_local *lp, int reg,
222 */ 222 */
223#include <linux/dma-mapping.h> 223#include <linux/dma-mapping.h>
224#include <asm/dma.h> 224#include <asm/dma.h>
225#include <asm/arch/pxa-regs.h> 225#include <mach/pxa-regs.h>
226 226
227static dma_addr_t rx_dmabuf, tx_dmabuf; 227static dma_addr_t rx_dmabuf, tx_dmabuf;
228static int rx_dmalen, tx_dmalen; 228static int rx_dmalen, tx_dmalen;
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index 22209b6f1405..997e7f1d5c6e 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -187,7 +187,7 @@ static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
187 187
188#elif defined(CONFIG_SA1100_ASSABET) 188#elif defined(CONFIG_SA1100_ASSABET)
189 189
190#include <asm/arch/neponset.h> 190#include <mach/neponset.h>
191 191
192/* We can only do 8-bit reads and writes in the static memory space. */ 192/* We can only do 8-bit reads and writes in the static memory space. */
193#define SMC_CAN_USE_8BIT 1 193#define SMC_CAN_USE_8BIT 1
@@ -339,7 +339,7 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
339 * IOBARRIER on entry to their ISR. 339 * IOBARRIER on entry to their ISR.
340 */ 340 */
341 341
342#include <asm/arch/constants.h> /* IOBARRIER_VIRT */ 342#include <mach/constants.h> /* IOBARRIER_VIRT */
343 343
344#define SMC_CAN_USE_8BIT 0 344#define SMC_CAN_USE_8BIT 0
345#define SMC_CAN_USE_16BIT 1 345#define SMC_CAN_USE_16BIT 1
@@ -525,7 +525,7 @@ struct smc_local {
525 */ 525 */
526#include <linux/dma-mapping.h> 526#include <linux/dma-mapping.h>
527#include <asm/dma.h> 527#include <asm/dma.h>
528#include <asm/arch/pxa-regs.h> 528#include <mach/pxa-regs.h>
529 529
530#ifdef SMC_insl 530#ifdef SMC_insl
531#undef SMC_insl 531#undef SMC_insl
diff --git a/drivers/net/stnic.c b/drivers/net/stnic.c
index b65be5d70fec..2ed0bd596815 100644
--- a/drivers/net/stnic.c
+++ b/drivers/net/stnic.c
@@ -19,7 +19,7 @@
19 19
20#include <asm/system.h> 20#include <asm/system.h>
21#include <asm/io.h> 21#include <asm/io.h>
22#include <asm/se.h> 22#include <mach-se/mach/se.h>
23#include <asm/machvec.h> 23#include <asm/machvec.h>
24#ifdef CONFIG_SH_STANDARD_BIOS 24#ifdef CONFIG_SH_STANDARD_BIOS
25#include <asm/sh_bios.h> 25#include <asm/sh_bios.h>
diff --git a/drivers/net/sun3_82586.c b/drivers/net/sun3_82586.c
index 9b2a7f7bb258..e531302d95f5 100644
--- a/drivers/net/sun3_82586.c
+++ b/drivers/net/sun3_82586.c
@@ -425,14 +425,11 @@ static int init586(struct net_device *dev)
425 int len = ((char *) p->iscp - (char *) ptr - 8) / 6; 425 int len = ((char *) p->iscp - (char *) ptr - 8) / 6;
426 if(num_addrs > len) { 426 if(num_addrs > len) {
427 printk("%s: switching to promisc. mode\n",dev->name); 427 printk("%s: switching to promisc. mode\n",dev->name);
428 dev->flags|=IFF_PROMISC; 428 cfg_cmd->promisc = 1;
429 } 429 }
430 } 430 }
431 if(dev->flags&IFF_PROMISC) 431 if(dev->flags&IFF_PROMISC)
432 { 432 cfg_cmd->promisc = 1;
433 cfg_cmd->promisc=1;
434 dev->flags|=IFF_PROMISC;
435 }
436 cfg_cmd->carr_coll = 0x00; 433 cfg_cmd->carr_coll = 0x00;
437 434
438 p->scb->cbl_offset = make16(cfg_cmd); 435 p->scb->cbl_offset = make16(cfg_cmd);
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 633c128a6228..d2439b85a790 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -1982,8 +1982,6 @@ static void tg3_power_down_phy(struct tg3 *tp)
1982static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) 1982static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1983{ 1983{
1984 u32 misc_host_ctrl; 1984 u32 misc_host_ctrl;
1985 u16 power_control, power_caps;
1986 int pm = tp->pm_cap;
1987 1985
1988 /* Make sure register accesses (indirect or otherwise) 1986 /* Make sure register accesses (indirect or otherwise)
1989 * will function correctly. 1987 * will function correctly.
@@ -1992,18 +1990,10 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1992 TG3PCI_MISC_HOST_CTRL, 1990 TG3PCI_MISC_HOST_CTRL,
1993 tp->misc_host_ctrl); 1991 tp->misc_host_ctrl);
1994 1992
1995 pci_read_config_word(tp->pdev,
1996 pm + PCI_PM_CTRL,
1997 &power_control);
1998 power_control |= PCI_PM_CTRL_PME_STATUS;
1999 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
2000 switch (state) { 1993 switch (state) {
2001 case PCI_D0: 1994 case PCI_D0:
2002 power_control |= 0; 1995 pci_enable_wake(tp->pdev, state, false);
2003 pci_write_config_word(tp->pdev, 1996 pci_set_power_state(tp->pdev, PCI_D0);
2004 pm + PCI_PM_CTRL,
2005 power_control);
2006 udelay(100); /* Delay after power state change */
2007 1997
2008 /* Switch out of Vaux if it is a NIC */ 1998 /* Switch out of Vaux if it is a NIC */
2009 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC) 1999 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
@@ -2012,26 +2002,15 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2012 return 0; 2002 return 0;
2013 2003
2014 case PCI_D1: 2004 case PCI_D1:
2015 power_control |= 1;
2016 break;
2017
2018 case PCI_D2: 2005 case PCI_D2:
2019 power_control |= 2;
2020 break;
2021
2022 case PCI_D3hot: 2006 case PCI_D3hot:
2023 power_control |= 3;
2024 break; 2007 break;
2025 2008
2026 default: 2009 default:
2027 printk(KERN_WARNING PFX "%s: Invalid power state (%d) " 2010 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2028 "requested.\n", 2011 tp->dev->name, state);
2029 tp->dev->name, state);
2030 return -EINVAL; 2012 return -EINVAL;
2031 } 2013 }
2032
2033 power_control |= PCI_PM_CTRL_PME_ENABLE;
2034
2035 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); 2014 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2036 tw32(TG3PCI_MISC_HOST_CTRL, 2015 tw32(TG3PCI_MISC_HOST_CTRL,
2037 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT); 2016 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
@@ -2109,8 +2088,6 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2109 WOL_DRV_WOL | 2088 WOL_DRV_WOL |
2110 WOL_SET_MAGIC_PKT); 2089 WOL_SET_MAGIC_PKT);
2111 2090
2112 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
2113
2114 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) { 2091 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
2115 u32 mac_mode; 2092 u32 mac_mode;
2116 2093
@@ -2143,8 +2120,8 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2143 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS)) 2120 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2144 tw32(MAC_LED_CTRL, tp->led_ctrl); 2121 tw32(MAC_LED_CTRL, tp->led_ctrl);
2145 2122
2146 if (((power_caps & PCI_PM_CAP_PME_D3cold) && 2123 if (pci_pme_capable(tp->pdev, state) &&
2147 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE))) 2124 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE))
2148 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE; 2125 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2149 2126
2150 tw32_f(MAC_MODE, mac_mode); 2127 tw32_f(MAC_MODE, mac_mode);
@@ -2236,9 +2213,11 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2236 2213
2237 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN); 2214 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2238 2215
2216 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
2217 pci_enable_wake(tp->pdev, state, true);
2218
2239 /* Finally, set the new power state. */ 2219 /* Finally, set the new power state. */
2240 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control); 2220 pci_set_power_state(tp->pdev, state);
2241 udelay(100); /* Delay after power state change */
2242 2221
2243 return 0; 2222 return 0;
2244} 2223}
@@ -7708,21 +7687,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7708 */ 7687 */
7709static int tg3_init_hw(struct tg3 *tp, int reset_phy) 7688static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7710{ 7689{
7711 int err;
7712
7713 /* Force the chip into D0. */
7714 err = tg3_set_power_state(tp, PCI_D0);
7715 if (err)
7716 goto out;
7717
7718 tg3_switch_clocks(tp); 7690 tg3_switch_clocks(tp);
7719 7691
7720 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); 7692 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7721 7693
7722 err = tg3_reset_hw(tp, reset_phy); 7694 return tg3_reset_hw(tp, reset_phy);
7723
7724out:
7725 return err;
7726} 7695}
7727 7696
7728#define TG3_STAT_ADD32(PSTAT, REG) \ 7697#define TG3_STAT_ADD32(PSTAT, REG) \
@@ -8037,13 +8006,11 @@ static int tg3_open(struct net_device *dev)
8037 8006
8038 netif_carrier_off(tp->dev); 8007 netif_carrier_off(tp->dev);
8039 8008
8040 tg3_full_lock(tp, 0);
8041
8042 err = tg3_set_power_state(tp, PCI_D0); 8009 err = tg3_set_power_state(tp, PCI_D0);
8043 if (err) { 8010 if (err)
8044 tg3_full_unlock(tp);
8045 return err; 8011 return err;
8046 } 8012
8013 tg3_full_lock(tp, 0);
8047 8014
8048 tg3_disable_ints(tp); 8015 tg3_disable_ints(tp);
8049 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE; 8016 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
@@ -9065,7 +9032,8 @@ static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9065{ 9032{
9066 struct tg3 *tp = netdev_priv(dev); 9033 struct tg3 *tp = netdev_priv(dev);
9067 9034
9068 if (tp->tg3_flags & TG3_FLAG_WOL_CAP) 9035 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9036 device_can_wakeup(&tp->pdev->dev))
9069 wol->supported = WAKE_MAGIC; 9037 wol->supported = WAKE_MAGIC;
9070 else 9038 else
9071 wol->supported = 0; 9039 wol->supported = 0;
@@ -9078,18 +9046,22 @@ static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9078static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 9046static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9079{ 9047{
9080 struct tg3 *tp = netdev_priv(dev); 9048 struct tg3 *tp = netdev_priv(dev);
9049 struct device *dp = &tp->pdev->dev;
9081 9050
9082 if (wol->wolopts & ~WAKE_MAGIC) 9051 if (wol->wolopts & ~WAKE_MAGIC)
9083 return -EINVAL; 9052 return -EINVAL;
9084 if ((wol->wolopts & WAKE_MAGIC) && 9053 if ((wol->wolopts & WAKE_MAGIC) &&
9085 !(tp->tg3_flags & TG3_FLAG_WOL_CAP)) 9054 !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
9086 return -EINVAL; 9055 return -EINVAL;
9087 9056
9088 spin_lock_bh(&tp->lock); 9057 spin_lock_bh(&tp->lock);
9089 if (wol->wolopts & WAKE_MAGIC) 9058 if (wol->wolopts & WAKE_MAGIC) {
9090 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; 9059 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
9091 else 9060 device_set_wakeup_enable(dp, true);
9061 } else {
9092 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE; 9062 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9063 device_set_wakeup_enable(dp, false);
9064 }
9093 spin_unlock_bh(&tp->lock); 9065 spin_unlock_bh(&tp->lock);
9094 9066
9095 return 0; 9067 return 0;
@@ -11296,7 +11268,8 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
11296 if (val & VCPU_CFGSHDW_ASPM_DBNC) 11268 if (val & VCPU_CFGSHDW_ASPM_DBNC)
11297 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND; 11269 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11298 if ((val & VCPU_CFGSHDW_WOL_ENABLE) && 11270 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
11299 (val & VCPU_CFGSHDW_WOL_MAGPKT)) 11271 (val & VCPU_CFGSHDW_WOL_MAGPKT) &&
11272 device_may_wakeup(&tp->pdev->dev))
11300 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; 11273 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11301 return; 11274 return;
11302 } 11275 }
@@ -11426,8 +11399,9 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
11426 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)) 11399 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
11427 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP; 11400 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
11428 11401
11429 if (tp->tg3_flags & TG3_FLAG_WOL_CAP && 11402 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
11430 nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE) 11403 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE) &&
11404 device_may_wakeup(&tp->pdev->dev))
11431 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; 11405 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11432 11406
11433 if (cfg2 & (1 << 17)) 11407 if (cfg2 & (1 << 17))
@@ -13613,6 +13587,7 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13613{ 13587{
13614 struct net_device *dev = pci_get_drvdata(pdev); 13588 struct net_device *dev = pci_get_drvdata(pdev);
13615 struct tg3 *tp = netdev_priv(dev); 13589 struct tg3 *tp = netdev_priv(dev);
13590 pci_power_t target_state;
13616 int err; 13591 int err;
13617 13592
13618 /* PCI register 4 needs to be saved whether netif_running() or not. 13593 /* PCI register 4 needs to be saved whether netif_running() or not.
@@ -13641,7 +13616,9 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13641 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE; 13616 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
13642 tg3_full_unlock(tp); 13617 tg3_full_unlock(tp);
13643 13618
13644 err = tg3_set_power_state(tp, pci_choose_state(pdev, state)); 13619 target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
13620
13621 err = tg3_set_power_state(tp, target_state);
13645 if (err) { 13622 if (err) {
13646 int err2; 13623 int err2;
13647 13624
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index 7766cde0d63d..bf621328b601 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -95,20 +95,20 @@ MODULE_DESCRIPTION("3Com 3C359 Velocity XL Token Ring Adapter Driver \n") ;
95static int ringspeed[XL_MAX_ADAPTERS] = {0,} ; 95static int ringspeed[XL_MAX_ADAPTERS] = {0,} ;
96 96
97module_param_array(ringspeed, int, NULL, 0); 97module_param_array(ringspeed, int, NULL, 0);
98MODULE_PARM_DESC(ringspeed,"3c359: Ringspeed selection - 4,16 or 0") ; 98MODULE_PARM_DESC(ringspeed,"3c359: Ringspeed selection - 4,16 or 0") ;
99 99
100/* Packet buffer size */ 100/* Packet buffer size */
101 101
102static int pkt_buf_sz[XL_MAX_ADAPTERS] = {0,} ; 102static int pkt_buf_sz[XL_MAX_ADAPTERS] = {0,} ;
103 103
104module_param_array(pkt_buf_sz, int, NULL, 0) ; 104module_param_array(pkt_buf_sz, int, NULL, 0) ;
105MODULE_PARM_DESC(pkt_buf_sz,"3c359: Initial buffer size") ; 105MODULE_PARM_DESC(pkt_buf_sz,"3c359: Initial buffer size") ;
106/* Message Level */ 106/* Message Level */
107 107
108static int message_level[XL_MAX_ADAPTERS] = {0,} ; 108static int message_level[XL_MAX_ADAPTERS] = {0,} ;
109 109
110module_param_array(message_level, int, NULL, 0) ; 110module_param_array(message_level, int, NULL, 0) ;
111MODULE_PARM_DESC(message_level, "3c359: Level of reported messages \n") ; 111MODULE_PARM_DESC(message_level, "3c359: Level of reported messages") ;
112/* 112/*
113 * This is a real nasty way of doing this, but otherwise you 113 * This is a real nasty way of doing this, but otherwise you
114 * will be stuck with 1555 lines of hex #'s in the code. 114 * will be stuck with 1555 lines of hex #'s in the code.
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index f7319d326912..78df2be8a728 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -55,12 +55,28 @@
55 55
56static int dm_read(struct usbnet *dev, u8 reg, u16 length, void *data) 56static int dm_read(struct usbnet *dev, u8 reg, u16 length, void *data)
57{ 57{
58 void *buf;
59 int err = -ENOMEM;
60
58 devdbg(dev, "dm_read() reg=0x%02x length=%d", reg, length); 61 devdbg(dev, "dm_read() reg=0x%02x length=%d", reg, length);
59 return usb_control_msg(dev->udev, 62
60 usb_rcvctrlpipe(dev->udev, 0), 63 buf = kmalloc(length, GFP_KERNEL);
61 DM_READ_REGS, 64 if (!buf)
62 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 65 goto out;
63 0, reg, data, length, USB_CTRL_SET_TIMEOUT); 66
67 err = usb_control_msg(dev->udev,
68 usb_rcvctrlpipe(dev->udev, 0),
69 DM_READ_REGS,
70 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
71 0, reg, buf, length, USB_CTRL_SET_TIMEOUT);
72 if (err == length)
73 memcpy(data, buf, length);
74 else if (err >= 0)
75 err = -EINVAL;
76 kfree(buf);
77
78 out:
79 return err;
64} 80}
65 81
66static int dm_read_reg(struct usbnet *dev, u8 reg, u8 *value) 82static int dm_read_reg(struct usbnet *dev, u8 reg, u8 *value)
@@ -70,12 +86,28 @@ static int dm_read_reg(struct usbnet *dev, u8 reg, u8 *value)
70 86
71static int dm_write(struct usbnet *dev, u8 reg, u16 length, void *data) 87static int dm_write(struct usbnet *dev, u8 reg, u16 length, void *data)
72{ 88{
89 void *buf = NULL;
90 int err = -ENOMEM;
91
73 devdbg(dev, "dm_write() reg=0x%02x, length=%d", reg, length); 92 devdbg(dev, "dm_write() reg=0x%02x, length=%d", reg, length);
74 return usb_control_msg(dev->udev, 93
75 usb_sndctrlpipe(dev->udev, 0), 94 if (data) {
76 DM_WRITE_REGS, 95 buf = kmalloc(length, GFP_KERNEL);
77 USB_DIR_OUT | USB_TYPE_VENDOR |USB_RECIP_DEVICE, 96 if (!buf)
78 0, reg, data, length, USB_CTRL_SET_TIMEOUT); 97 goto out;
98 memcpy(buf, data, length);
99 }
100
101 err = usb_control_msg(dev->udev,
102 usb_sndctrlpipe(dev->udev, 0),
103 DM_WRITE_REGS,
104 USB_DIR_OUT | USB_TYPE_VENDOR |USB_RECIP_DEVICE,
105 0, reg, buf, length, USB_CTRL_SET_TIMEOUT);
106 kfree(buf);
107 if (err >= 0 && err < length)
108 err = -EINVAL;
109 out:
110 return err;
79} 111}
80 112
81static int dm_write_reg(struct usbnet *dev, u8 reg, u8 value) 113static int dm_write_reg(struct usbnet *dev, u8 reg, u8 value)
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index b588c890ea70..a84ba487c713 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -1285,6 +1285,21 @@ static void check_carrier(struct work_struct *work)
1285 } 1285 }
1286} 1286}
1287 1287
1288static int pegasus_blacklisted(struct usb_device *udev)
1289{
1290 struct usb_device_descriptor *udd = &udev->descriptor;
1291
1292 /* Special quirk to keep the driver from handling the Belkin Bluetooth
1293 * dongle which happens to have the same ID.
1294 */
1295 if ((udd->idVendor == VENDOR_BELKIN && udd->idProduct == 0x0121) &&
1296 (udd->bDeviceClass == USB_CLASS_WIRELESS_CONTROLLER) &&
1297 (udd->bDeviceProtocol == 1))
1298 return 1;
1299
1300 return 0;
1301}
1302
1288static int pegasus_probe(struct usb_interface *intf, 1303static int pegasus_probe(struct usb_interface *intf,
1289 const struct usb_device_id *id) 1304 const struct usb_device_id *id)
1290{ 1305{
@@ -1296,6 +1311,12 @@ static int pegasus_probe(struct usb_interface *intf,
1296 DECLARE_MAC_BUF(mac); 1311 DECLARE_MAC_BUF(mac);
1297 1312
1298 usb_get_dev(dev); 1313 usb_get_dev(dev);
1314
1315 if (pegasus_blacklisted(dev)) {
1316 res = -ENODEV;
1317 goto out;
1318 }
1319
1299 net = alloc_etherdev(sizeof(struct pegasus)); 1320 net = alloc_etherdev(sizeof(struct pegasus));
1300 if (!net) { 1321 if (!net) {
1301 dev_err(&intf->dev, "can't allocate %s\n", "device"); 1322 dev_err(&intf->dev, "can't allocate %s\n", "device");
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 370ce30f2f45..007c12970065 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -662,6 +662,10 @@ static void velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid
662 spin_unlock_irq(&vptr->lock); 662 spin_unlock_irq(&vptr->lock);
663} 663}
664 664
665static void velocity_init_rx_ring_indexes(struct velocity_info *vptr)
666{
667 vptr->rx.dirty = vptr->rx.filled = vptr->rx.curr = 0;
668}
665 669
666/** 670/**
667 * velocity_rx_reset - handle a receive reset 671 * velocity_rx_reset - handle a receive reset
@@ -677,16 +681,16 @@ static void velocity_rx_reset(struct velocity_info *vptr)
677 struct mac_regs __iomem * regs = vptr->mac_regs; 681 struct mac_regs __iomem * regs = vptr->mac_regs;
678 int i; 682 int i;
679 683
680 vptr->rd_dirty = vptr->rd_filled = vptr->rd_curr = 0; 684 velocity_init_rx_ring_indexes(vptr);
681 685
682 /* 686 /*
683 * Init state, all RD entries belong to the NIC 687 * Init state, all RD entries belong to the NIC
684 */ 688 */
685 for (i = 0; i < vptr->options.numrx; ++i) 689 for (i = 0; i < vptr->options.numrx; ++i)
686 vptr->rd_ring[i].rdesc0.len |= OWNED_BY_NIC; 690 vptr->rx.ring[i].rdesc0.len |= OWNED_BY_NIC;
687 691
688 writew(vptr->options.numrx, &regs->RBRDU); 692 writew(vptr->options.numrx, &regs->RBRDU);
689 writel(vptr->rd_pool_dma, &regs->RDBaseLo); 693 writel(vptr->rx.pool_dma, &regs->RDBaseLo);
690 writew(0, &regs->RDIdx); 694 writew(0, &regs->RDIdx);
691 writew(vptr->options.numrx - 1, &regs->RDCSize); 695 writew(vptr->options.numrx - 1, &regs->RDCSize);
692} 696}
@@ -779,15 +783,15 @@ static void velocity_init_registers(struct velocity_info *vptr,
779 783
780 vptr->int_mask = INT_MASK_DEF; 784 vptr->int_mask = INT_MASK_DEF;
781 785
782 writel(vptr->rd_pool_dma, &regs->RDBaseLo); 786 writel(vptr->rx.pool_dma, &regs->RDBaseLo);
783 writew(vptr->options.numrx - 1, &regs->RDCSize); 787 writew(vptr->options.numrx - 1, &regs->RDCSize);
784 mac_rx_queue_run(regs); 788 mac_rx_queue_run(regs);
785 mac_rx_queue_wake(regs); 789 mac_rx_queue_wake(regs);
786 790
787 writew(vptr->options.numtx - 1, &regs->TDCSize); 791 writew(vptr->options.numtx - 1, &regs->TDCSize);
788 792
789 for (i = 0; i < vptr->num_txq; i++) { 793 for (i = 0; i < vptr->tx.numq; i++) {
790 writel(vptr->td_pool_dma[i], &regs->TDBaseLo[i]); 794 writel(vptr->tx.pool_dma[i], &regs->TDBaseLo[i]);
791 mac_tx_queue_run(regs, i); 795 mac_tx_queue_run(regs, i);
792 } 796 }
793 797
@@ -1047,7 +1051,7 @@ static void __devinit velocity_init_info(struct pci_dev *pdev,
1047 1051
1048 vptr->pdev = pdev; 1052 vptr->pdev = pdev;
1049 vptr->chip_id = info->chip_id; 1053 vptr->chip_id = info->chip_id;
1050 vptr->num_txq = info->txqueue; 1054 vptr->tx.numq = info->txqueue;
1051 vptr->multicast_limit = MCAM_SIZE; 1055 vptr->multicast_limit = MCAM_SIZE;
1052 spin_lock_init(&vptr->lock); 1056 spin_lock_init(&vptr->lock);
1053 INIT_LIST_HEAD(&vptr->list); 1057 INIT_LIST_HEAD(&vptr->list);
@@ -1093,14 +1097,14 @@ static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pc
1093} 1097}
1094 1098
1095/** 1099/**
1096 * velocity_init_rings - set up DMA rings 1100 * velocity_init_dma_rings - set up DMA rings
1097 * @vptr: Velocity to set up 1101 * @vptr: Velocity to set up
1098 * 1102 *
1099 * Allocate PCI mapped DMA rings for the receive and transmit layer 1103 * Allocate PCI mapped DMA rings for the receive and transmit layer
1100 * to use. 1104 * to use.
1101 */ 1105 */
1102 1106
1103static int velocity_init_rings(struct velocity_info *vptr) 1107static int velocity_init_dma_rings(struct velocity_info *vptr)
1104{ 1108{
1105 struct velocity_opt *opt = &vptr->options; 1109 struct velocity_opt *opt = &vptr->options;
1106 const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc); 1110 const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc);
@@ -1116,7 +1120,7 @@ static int velocity_init_rings(struct velocity_info *vptr)
1116 * pci_alloc_consistent() fulfills the requirement for 64 bytes 1120 * pci_alloc_consistent() fulfills the requirement for 64 bytes
1117 * alignment 1121 * alignment
1118 */ 1122 */
1119 pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->num_txq + 1123 pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->tx.numq +
1120 rx_ring_size, &pool_dma); 1124 rx_ring_size, &pool_dma);
1121 if (!pool) { 1125 if (!pool) {
1122 dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n", 1126 dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n",
@@ -1124,15 +1128,15 @@ static int velocity_init_rings(struct velocity_info *vptr)
1124 return -ENOMEM; 1128 return -ENOMEM;
1125 } 1129 }
1126 1130
1127 vptr->rd_ring = pool; 1131 vptr->rx.ring = pool;
1128 vptr->rd_pool_dma = pool_dma; 1132 vptr->rx.pool_dma = pool_dma;
1129 1133
1130 pool += rx_ring_size; 1134 pool += rx_ring_size;
1131 pool_dma += rx_ring_size; 1135 pool_dma += rx_ring_size;
1132 1136
1133 for (i = 0; i < vptr->num_txq; i++) { 1137 for (i = 0; i < vptr->tx.numq; i++) {
1134 vptr->td_rings[i] = pool; 1138 vptr->tx.rings[i] = pool;
1135 vptr->td_pool_dma[i] = pool_dma; 1139 vptr->tx.pool_dma[i] = pool_dma;
1136 pool += tx_ring_size; 1140 pool += tx_ring_size;
1137 pool_dma += tx_ring_size; 1141 pool_dma += tx_ring_size;
1138 } 1142 }
@@ -1141,18 +1145,18 @@ static int velocity_init_rings(struct velocity_info *vptr)
1141} 1145}
1142 1146
1143/** 1147/**
1144 * velocity_free_rings - free PCI ring pointers 1148 * velocity_free_dma_rings - free PCI ring pointers
1145 * @vptr: Velocity to free from 1149 * @vptr: Velocity to free from
1146 * 1150 *
1147 * Clean up the PCI ring buffers allocated to this velocity. 1151 * Clean up the PCI ring buffers allocated to this velocity.
1148 */ 1152 */
1149 1153
1150static void velocity_free_rings(struct velocity_info *vptr) 1154static void velocity_free_dma_rings(struct velocity_info *vptr)
1151{ 1155{
1152 const int size = vptr->options.numrx * sizeof(struct rx_desc) + 1156 const int size = vptr->options.numrx * sizeof(struct rx_desc) +
1153 vptr->options.numtx * sizeof(struct tx_desc) * vptr->num_txq; 1157 vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq;
1154 1158
1155 pci_free_consistent(vptr->pdev, size, vptr->rd_ring, vptr->rd_pool_dma); 1159 pci_free_consistent(vptr->pdev, size, vptr->rx.ring, vptr->rx.pool_dma);
1156} 1160}
1157 1161
1158static void velocity_give_many_rx_descs(struct velocity_info *vptr) 1162static void velocity_give_many_rx_descs(struct velocity_info *vptr)
@@ -1164,44 +1168,44 @@ static void velocity_give_many_rx_descs(struct velocity_info *vptr)
1164 * RD number must be equal to 4X per hardware spec 1168 * RD number must be equal to 4X per hardware spec
1165 * (programming guide rev 1.20, p.13) 1169 * (programming guide rev 1.20, p.13)
1166 */ 1170 */
1167 if (vptr->rd_filled < 4) 1171 if (vptr->rx.filled < 4)
1168 return; 1172 return;
1169 1173
1170 wmb(); 1174 wmb();
1171 1175
1172 unusable = vptr->rd_filled & 0x0003; 1176 unusable = vptr->rx.filled & 0x0003;
1173 dirty = vptr->rd_dirty - unusable; 1177 dirty = vptr->rx.dirty - unusable;
1174 for (avail = vptr->rd_filled & 0xfffc; avail; avail--) { 1178 for (avail = vptr->rx.filled & 0xfffc; avail; avail--) {
1175 dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1; 1179 dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
1176 vptr->rd_ring[dirty].rdesc0.len |= OWNED_BY_NIC; 1180 vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC;
1177 } 1181 }
1178 1182
1179 writew(vptr->rd_filled & 0xfffc, &regs->RBRDU); 1183 writew(vptr->rx.filled & 0xfffc, &regs->RBRDU);
1180 vptr->rd_filled = unusable; 1184 vptr->rx.filled = unusable;
1181} 1185}
1182 1186
1183static int velocity_rx_refill(struct velocity_info *vptr) 1187static int velocity_rx_refill(struct velocity_info *vptr)
1184{ 1188{
1185 int dirty = vptr->rd_dirty, done = 0; 1189 int dirty = vptr->rx.dirty, done = 0;
1186 1190
1187 do { 1191 do {
1188 struct rx_desc *rd = vptr->rd_ring + dirty; 1192 struct rx_desc *rd = vptr->rx.ring + dirty;
1189 1193
1190 /* Fine for an all zero Rx desc at init time as well */ 1194 /* Fine for an all zero Rx desc at init time as well */
1191 if (rd->rdesc0.len & OWNED_BY_NIC) 1195 if (rd->rdesc0.len & OWNED_BY_NIC)
1192 break; 1196 break;
1193 1197
1194 if (!vptr->rd_info[dirty].skb) { 1198 if (!vptr->rx.info[dirty].skb) {
1195 if (velocity_alloc_rx_buf(vptr, dirty) < 0) 1199 if (velocity_alloc_rx_buf(vptr, dirty) < 0)
1196 break; 1200 break;
1197 } 1201 }
1198 done++; 1202 done++;
1199 dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0; 1203 dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0;
1200 } while (dirty != vptr->rd_curr); 1204 } while (dirty != vptr->rx.curr);
1201 1205
1202 if (done) { 1206 if (done) {
1203 vptr->rd_dirty = dirty; 1207 vptr->rx.dirty = dirty;
1204 vptr->rd_filled += done; 1208 vptr->rx.filled += done;
1205 } 1209 }
1206 1210
1207 return done; 1211 return done;
@@ -1209,7 +1213,7 @@ static int velocity_rx_refill(struct velocity_info *vptr)
1209 1213
1210static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu) 1214static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu)
1211{ 1215{
1212 vptr->rx_buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32; 1216 vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32;
1213} 1217}
1214 1218
1215/** 1219/**
@@ -1224,12 +1228,12 @@ static int velocity_init_rd_ring(struct velocity_info *vptr)
1224{ 1228{
1225 int ret = -ENOMEM; 1229 int ret = -ENOMEM;
1226 1230
1227 vptr->rd_info = kcalloc(vptr->options.numrx, 1231 vptr->rx.info = kcalloc(vptr->options.numrx,
1228 sizeof(struct velocity_rd_info), GFP_KERNEL); 1232 sizeof(struct velocity_rd_info), GFP_KERNEL);
1229 if (!vptr->rd_info) 1233 if (!vptr->rx.info)
1230 goto out; 1234 goto out;
1231 1235
1232 vptr->rd_filled = vptr->rd_dirty = vptr->rd_curr = 0; 1236 velocity_init_rx_ring_indexes(vptr);
1233 1237
1234 if (velocity_rx_refill(vptr) != vptr->options.numrx) { 1238 if (velocity_rx_refill(vptr) != vptr->options.numrx) {
1235 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR 1239 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR
@@ -1255,18 +1259,18 @@ static void velocity_free_rd_ring(struct velocity_info *vptr)
1255{ 1259{
1256 int i; 1260 int i;
1257 1261
1258 if (vptr->rd_info == NULL) 1262 if (vptr->rx.info == NULL)
1259 return; 1263 return;
1260 1264
1261 for (i = 0; i < vptr->options.numrx; i++) { 1265 for (i = 0; i < vptr->options.numrx; i++) {
1262 struct velocity_rd_info *rd_info = &(vptr->rd_info[i]); 1266 struct velocity_rd_info *rd_info = &(vptr->rx.info[i]);
1263 struct rx_desc *rd = vptr->rd_ring + i; 1267 struct rx_desc *rd = vptr->rx.ring + i;
1264 1268
1265 memset(rd, 0, sizeof(*rd)); 1269 memset(rd, 0, sizeof(*rd));
1266 1270
1267 if (!rd_info->skb) 1271 if (!rd_info->skb)
1268 continue; 1272 continue;
1269 pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz, 1273 pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
1270 PCI_DMA_FROMDEVICE); 1274 PCI_DMA_FROMDEVICE);
1271 rd_info->skb_dma = (dma_addr_t) NULL; 1275 rd_info->skb_dma = (dma_addr_t) NULL;
1272 1276
@@ -1274,8 +1278,8 @@ static void velocity_free_rd_ring(struct velocity_info *vptr)
1274 rd_info->skb = NULL; 1278 rd_info->skb = NULL;
1275 } 1279 }
1276 1280
1277 kfree(vptr->rd_info); 1281 kfree(vptr->rx.info);
1278 vptr->rd_info = NULL; 1282 vptr->rx.info = NULL;
1279} 1283}
1280 1284
1281/** 1285/**
@@ -1293,19 +1297,19 @@ static int velocity_init_td_ring(struct velocity_info *vptr)
1293 unsigned int j; 1297 unsigned int j;
1294 1298
1295 /* Init the TD ring entries */ 1299 /* Init the TD ring entries */
1296 for (j = 0; j < vptr->num_txq; j++) { 1300 for (j = 0; j < vptr->tx.numq; j++) {
1297 curr = vptr->td_pool_dma[j]; 1301 curr = vptr->tx.pool_dma[j];
1298 1302
1299 vptr->td_infos[j] = kcalloc(vptr->options.numtx, 1303 vptr->tx.infos[j] = kcalloc(vptr->options.numtx,
1300 sizeof(struct velocity_td_info), 1304 sizeof(struct velocity_td_info),
1301 GFP_KERNEL); 1305 GFP_KERNEL);
1302 if (!vptr->td_infos[j]) { 1306 if (!vptr->tx.infos[j]) {
1303 while(--j >= 0) 1307 while(--j >= 0)
1304 kfree(vptr->td_infos[j]); 1308 kfree(vptr->tx.infos[j]);
1305 return -ENOMEM; 1309 return -ENOMEM;
1306 } 1310 }
1307 1311
1308 vptr->td_tail[j] = vptr->td_curr[j] = vptr->td_used[j] = 0; 1312 vptr->tx.tail[j] = vptr->tx.curr[j] = vptr->tx.used[j] = 0;
1309 } 1313 }
1310 return 0; 1314 return 0;
1311} 1315}
@@ -1317,7 +1321,7 @@ static int velocity_init_td_ring(struct velocity_info *vptr)
1317static void velocity_free_td_ring_entry(struct velocity_info *vptr, 1321static void velocity_free_td_ring_entry(struct velocity_info *vptr,
1318 int q, int n) 1322 int q, int n)
1319{ 1323{
1320 struct velocity_td_info * td_info = &(vptr->td_infos[q][n]); 1324 struct velocity_td_info * td_info = &(vptr->tx.infos[q][n]);
1321 int i; 1325 int i;
1322 1326
1323 if (td_info == NULL) 1327 if (td_info == NULL)
@@ -1349,15 +1353,15 @@ static void velocity_free_td_ring(struct velocity_info *vptr)
1349{ 1353{
1350 int i, j; 1354 int i, j;
1351 1355
1352 for (j = 0; j < vptr->num_txq; j++) { 1356 for (j = 0; j < vptr->tx.numq; j++) {
1353 if (vptr->td_infos[j] == NULL) 1357 if (vptr->tx.infos[j] == NULL)
1354 continue; 1358 continue;
1355 for (i = 0; i < vptr->options.numtx; i++) { 1359 for (i = 0; i < vptr->options.numtx; i++) {
1356 velocity_free_td_ring_entry(vptr, j, i); 1360 velocity_free_td_ring_entry(vptr, j, i);
1357 1361
1358 } 1362 }
1359 kfree(vptr->td_infos[j]); 1363 kfree(vptr->tx.infos[j]);
1360 vptr->td_infos[j] = NULL; 1364 vptr->tx.infos[j] = NULL;
1361 } 1365 }
1362} 1366}
1363 1367
@@ -1374,13 +1378,13 @@ static void velocity_free_td_ring(struct velocity_info *vptr)
1374static int velocity_rx_srv(struct velocity_info *vptr, int status) 1378static int velocity_rx_srv(struct velocity_info *vptr, int status)
1375{ 1379{
1376 struct net_device_stats *stats = &vptr->stats; 1380 struct net_device_stats *stats = &vptr->stats;
1377 int rd_curr = vptr->rd_curr; 1381 int rd_curr = vptr->rx.curr;
1378 int works = 0; 1382 int works = 0;
1379 1383
1380 do { 1384 do {
1381 struct rx_desc *rd = vptr->rd_ring + rd_curr; 1385 struct rx_desc *rd = vptr->rx.ring + rd_curr;
1382 1386
1383 if (!vptr->rd_info[rd_curr].skb) 1387 if (!vptr->rx.info[rd_curr].skb)
1384 break; 1388 break;
1385 1389
1386 if (rd->rdesc0.len & OWNED_BY_NIC) 1390 if (rd->rdesc0.len & OWNED_BY_NIC)
@@ -1412,7 +1416,7 @@ static int velocity_rx_srv(struct velocity_info *vptr, int status)
1412 rd_curr = 0; 1416 rd_curr = 0;
1413 } while (++works <= 15); 1417 } while (++works <= 15);
1414 1418
1415 vptr->rd_curr = rd_curr; 1419 vptr->rx.curr = rd_curr;
1416 1420
1417 if ((works > 0) && (velocity_rx_refill(vptr) > 0)) 1421 if ((works > 0) && (velocity_rx_refill(vptr) > 0))
1418 velocity_give_many_rx_descs(vptr); 1422 velocity_give_many_rx_descs(vptr);
@@ -1510,8 +1514,8 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
1510{ 1514{
1511 void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int); 1515 void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int);
1512 struct net_device_stats *stats = &vptr->stats; 1516 struct net_device_stats *stats = &vptr->stats;
1513 struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]); 1517 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
1514 struct rx_desc *rd = &(vptr->rd_ring[idx]); 1518 struct rx_desc *rd = &(vptr->rx.ring[idx]);
1515 int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff; 1519 int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
1516 struct sk_buff *skb; 1520 struct sk_buff *skb;
1517 1521
@@ -1527,7 +1531,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
1527 skb = rd_info->skb; 1531 skb = rd_info->skb;
1528 1532
1529 pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma, 1533 pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma,
1530 vptr->rx_buf_sz, PCI_DMA_FROMDEVICE); 1534 vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
1531 1535
1532 /* 1536 /*
1533 * Drop frame not meeting IEEE 802.3 1537 * Drop frame not meeting IEEE 802.3
@@ -1550,7 +1554,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
1550 rd_info->skb = NULL; 1554 rd_info->skb = NULL;
1551 } 1555 }
1552 1556
1553 pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz, 1557 pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
1554 PCI_DMA_FROMDEVICE); 1558 PCI_DMA_FROMDEVICE);
1555 1559
1556 skb_put(skb, pkt_len - 4); 1560 skb_put(skb, pkt_len - 4);
@@ -1580,10 +1584,10 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
1580 1584
1581static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx) 1585static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1582{ 1586{
1583 struct rx_desc *rd = &(vptr->rd_ring[idx]); 1587 struct rx_desc *rd = &(vptr->rx.ring[idx]);
1584 struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]); 1588 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
1585 1589
1586 rd_info->skb = netdev_alloc_skb(vptr->dev, vptr->rx_buf_sz + 64); 1590 rd_info->skb = dev_alloc_skb(vptr->rx.buf_sz + 64);
1587 if (rd_info->skb == NULL) 1591 if (rd_info->skb == NULL)
1588 return -ENOMEM; 1592 return -ENOMEM;
1589 1593
@@ -1592,14 +1596,15 @@ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1592 * 64byte alignment. 1596 * 64byte alignment.
1593 */ 1597 */
1594 skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63); 1598 skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63);
1595 rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, vptr->rx_buf_sz, PCI_DMA_FROMDEVICE); 1599 rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data,
1600 vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
1596 1601
1597 /* 1602 /*
1598 * Fill in the descriptor to match 1603 * Fill in the descriptor to match
1599 */ 1604 */
1600 1605
1601 *((u32 *) & (rd->rdesc0)) = 0; 1606 *((u32 *) & (rd->rdesc0)) = 0;
1602 rd->size = cpu_to_le16(vptr->rx_buf_sz) | RX_INTEN; 1607 rd->size = cpu_to_le16(vptr->rx.buf_sz) | RX_INTEN;
1603 rd->pa_low = cpu_to_le32(rd_info->skb_dma); 1608 rd->pa_low = cpu_to_le32(rd_info->skb_dma);
1604 rd->pa_high = 0; 1609 rd->pa_high = 0;
1605 return 0; 1610 return 0;
@@ -1625,15 +1630,15 @@ static int velocity_tx_srv(struct velocity_info *vptr, u32 status)
1625 struct velocity_td_info *tdinfo; 1630 struct velocity_td_info *tdinfo;
1626 struct net_device_stats *stats = &vptr->stats; 1631 struct net_device_stats *stats = &vptr->stats;
1627 1632
1628 for (qnum = 0; qnum < vptr->num_txq; qnum++) { 1633 for (qnum = 0; qnum < vptr->tx.numq; qnum++) {
1629 for (idx = vptr->td_tail[qnum]; vptr->td_used[qnum] > 0; 1634 for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0;
1630 idx = (idx + 1) % vptr->options.numtx) { 1635 idx = (idx + 1) % vptr->options.numtx) {
1631 1636
1632 /* 1637 /*
1633 * Get Tx Descriptor 1638 * Get Tx Descriptor
1634 */ 1639 */
1635 td = &(vptr->td_rings[qnum][idx]); 1640 td = &(vptr->tx.rings[qnum][idx]);
1636 tdinfo = &(vptr->td_infos[qnum][idx]); 1641 tdinfo = &(vptr->tx.infos[qnum][idx]);
1637 1642
1638 if (td->tdesc0.len & OWNED_BY_NIC) 1643 if (td->tdesc0.len & OWNED_BY_NIC)
1639 break; 1644 break;
@@ -1657,9 +1662,9 @@ static int velocity_tx_srv(struct velocity_info *vptr, u32 status)
1657 stats->tx_bytes += tdinfo->skb->len; 1662 stats->tx_bytes += tdinfo->skb->len;
1658 } 1663 }
1659 velocity_free_tx_buf(vptr, tdinfo); 1664 velocity_free_tx_buf(vptr, tdinfo);
1660 vptr->td_used[qnum]--; 1665 vptr->tx.used[qnum]--;
1661 } 1666 }
1662 vptr->td_tail[qnum] = idx; 1667 vptr->tx.tail[qnum] = idx;
1663 1668
1664 if (AVAIL_TD(vptr, qnum) < 1) { 1669 if (AVAIL_TD(vptr, qnum) < 1) {
1665 full = 1; 1670 full = 1;
@@ -1846,6 +1851,40 @@ static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_
1846 tdinfo->skb = NULL; 1851 tdinfo->skb = NULL;
1847} 1852}
1848 1853
1854static int velocity_init_rings(struct velocity_info *vptr, int mtu)
1855{
1856 int ret;
1857
1858 velocity_set_rxbufsize(vptr, mtu);
1859
1860 ret = velocity_init_dma_rings(vptr);
1861 if (ret < 0)
1862 goto out;
1863
1864 ret = velocity_init_rd_ring(vptr);
1865 if (ret < 0)
1866 goto err_free_dma_rings_0;
1867
1868 ret = velocity_init_td_ring(vptr);
1869 if (ret < 0)
1870 goto err_free_rd_ring_1;
1871out:
1872 return ret;
1873
1874err_free_rd_ring_1:
1875 velocity_free_rd_ring(vptr);
1876err_free_dma_rings_0:
1877 velocity_free_dma_rings(vptr);
1878 goto out;
1879}
1880
1881static void velocity_free_rings(struct velocity_info *vptr)
1882{
1883 velocity_free_td_ring(vptr);
1884 velocity_free_rd_ring(vptr);
1885 velocity_free_dma_rings(vptr);
1886}
1887
1849/** 1888/**
1850 * velocity_open - interface activation callback 1889 * velocity_open - interface activation callback
1851 * @dev: network layer device to open 1890 * @dev: network layer device to open
@@ -1862,20 +1901,10 @@ static int velocity_open(struct net_device *dev)
1862 struct velocity_info *vptr = netdev_priv(dev); 1901 struct velocity_info *vptr = netdev_priv(dev);
1863 int ret; 1902 int ret;
1864 1903
1865 velocity_set_rxbufsize(vptr, dev->mtu); 1904 ret = velocity_init_rings(vptr, dev->mtu);
1866
1867 ret = velocity_init_rings(vptr);
1868 if (ret < 0) 1905 if (ret < 0)
1869 goto out; 1906 goto out;
1870 1907
1871 ret = velocity_init_rd_ring(vptr);
1872 if (ret < 0)
1873 goto err_free_desc_rings;
1874
1875 ret = velocity_init_td_ring(vptr);
1876 if (ret < 0)
1877 goto err_free_rd_ring;
1878
1879 /* Ensure chip is running */ 1908 /* Ensure chip is running */
1880 pci_set_power_state(vptr->pdev, PCI_D0); 1909 pci_set_power_state(vptr->pdev, PCI_D0);
1881 1910
@@ -1888,7 +1917,8 @@ static int velocity_open(struct net_device *dev)
1888 if (ret < 0) { 1917 if (ret < 0) {
1889 /* Power down the chip */ 1918 /* Power down the chip */
1890 pci_set_power_state(vptr->pdev, PCI_D3hot); 1919 pci_set_power_state(vptr->pdev, PCI_D3hot);
1891 goto err_free_td_ring; 1920 velocity_free_rings(vptr);
1921 goto out;
1892 } 1922 }
1893 1923
1894 mac_enable_int(vptr->mac_regs); 1924 mac_enable_int(vptr->mac_regs);
@@ -1896,14 +1926,6 @@ static int velocity_open(struct net_device *dev)
1896 vptr->flags |= VELOCITY_FLAGS_OPENED; 1926 vptr->flags |= VELOCITY_FLAGS_OPENED;
1897out: 1927out:
1898 return ret; 1928 return ret;
1899
1900err_free_td_ring:
1901 velocity_free_td_ring(vptr);
1902err_free_rd_ring:
1903 velocity_free_rd_ring(vptr);
1904err_free_desc_rings:
1905 velocity_free_rings(vptr);
1906 goto out;
1907} 1929}
1908 1930
1909/** 1931/**
@@ -1919,50 +1941,72 @@ err_free_desc_rings:
1919static int velocity_change_mtu(struct net_device *dev, int new_mtu) 1941static int velocity_change_mtu(struct net_device *dev, int new_mtu)
1920{ 1942{
1921 struct velocity_info *vptr = netdev_priv(dev); 1943 struct velocity_info *vptr = netdev_priv(dev);
1922 unsigned long flags;
1923 int oldmtu = dev->mtu;
1924 int ret = 0; 1944 int ret = 0;
1925 1945
1926 if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) { 1946 if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) {
1927 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n", 1947 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n",
1928 vptr->dev->name); 1948 vptr->dev->name);
1929 return -EINVAL; 1949 ret = -EINVAL;
1950 goto out_0;
1930 } 1951 }
1931 1952
1932 if (!netif_running(dev)) { 1953 if (!netif_running(dev)) {
1933 dev->mtu = new_mtu; 1954 dev->mtu = new_mtu;
1934 return 0; 1955 goto out_0;
1935 } 1956 }
1936 1957
1937 if (new_mtu != oldmtu) { 1958 if (dev->mtu != new_mtu) {
1959 struct velocity_info *tmp_vptr;
1960 unsigned long flags;
1961 struct rx_info rx;
1962 struct tx_info tx;
1963
1964 tmp_vptr = kzalloc(sizeof(*tmp_vptr), GFP_KERNEL);
1965 if (!tmp_vptr) {
1966 ret = -ENOMEM;
1967 goto out_0;
1968 }
1969
1970 tmp_vptr->dev = dev;
1971 tmp_vptr->pdev = vptr->pdev;
1972 tmp_vptr->options = vptr->options;
1973 tmp_vptr->tx.numq = vptr->tx.numq;
1974
1975 ret = velocity_init_rings(tmp_vptr, new_mtu);
1976 if (ret < 0)
1977 goto out_free_tmp_vptr_1;
1978
1938 spin_lock_irqsave(&vptr->lock, flags); 1979 spin_lock_irqsave(&vptr->lock, flags);
1939 1980
1940 netif_stop_queue(dev); 1981 netif_stop_queue(dev);
1941 velocity_shutdown(vptr); 1982 velocity_shutdown(vptr);
1942 1983
1943 velocity_free_td_ring(vptr); 1984 rx = vptr->rx;
1944 velocity_free_rd_ring(vptr); 1985 tx = vptr->tx;
1945 1986
1946 dev->mtu = new_mtu; 1987 vptr->rx = tmp_vptr->rx;
1988 vptr->tx = tmp_vptr->tx;
1947 1989
1948 velocity_set_rxbufsize(vptr, new_mtu); 1990 tmp_vptr->rx = rx;
1991 tmp_vptr->tx = tx;
1949 1992
1950 ret = velocity_init_rd_ring(vptr); 1993 dev->mtu = new_mtu;
1951 if (ret < 0)
1952 goto out_unlock;
1953 1994
1954 ret = velocity_init_td_ring(vptr); 1995 velocity_give_many_rx_descs(vptr);
1955 if (ret < 0)
1956 goto out_unlock;
1957 1996
1958 velocity_init_registers(vptr, VELOCITY_INIT_COLD); 1997 velocity_init_registers(vptr, VELOCITY_INIT_COLD);
1959 1998
1960 mac_enable_int(vptr->mac_regs); 1999 mac_enable_int(vptr->mac_regs);
1961 netif_start_queue(dev); 2000 netif_start_queue(dev);
1962out_unlock: 2001
1963 spin_unlock_irqrestore(&vptr->lock, flags); 2002 spin_unlock_irqrestore(&vptr->lock, flags);
1964 }
1965 2003
2004 velocity_free_rings(tmp_vptr);
2005
2006out_free_tmp_vptr_1:
2007 kfree(tmp_vptr);
2008 }
2009out_0:
1966 return ret; 2010 return ret;
1967} 2011}
1968 2012
@@ -2008,9 +2052,6 @@ static int velocity_close(struct net_device *dev)
2008 /* Power down the chip */ 2052 /* Power down the chip */
2009 pci_set_power_state(vptr->pdev, PCI_D3hot); 2053 pci_set_power_state(vptr->pdev, PCI_D3hot);
2010 2054
2011 /* Free the resources */
2012 velocity_free_td_ring(vptr);
2013 velocity_free_rd_ring(vptr);
2014 velocity_free_rings(vptr); 2055 velocity_free_rings(vptr);
2015 2056
2016 vptr->flags &= (~VELOCITY_FLAGS_OPENED); 2057 vptr->flags &= (~VELOCITY_FLAGS_OPENED);
@@ -2056,9 +2097,9 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
2056 2097
2057 spin_lock_irqsave(&vptr->lock, flags); 2098 spin_lock_irqsave(&vptr->lock, flags);
2058 2099
2059 index = vptr->td_curr[qnum]; 2100 index = vptr->tx.curr[qnum];
2060 td_ptr = &(vptr->td_rings[qnum][index]); 2101 td_ptr = &(vptr->tx.rings[qnum][index]);
2061 tdinfo = &(vptr->td_infos[qnum][index]); 2102 tdinfo = &(vptr->tx.infos[qnum][index]);
2062 2103
2063 td_ptr->tdesc1.TCR = TCR0_TIC; 2104 td_ptr->tdesc1.TCR = TCR0_TIC;
2064 td_ptr->td_buf[0].size &= ~TD_QUEUE; 2105 td_ptr->td_buf[0].size &= ~TD_QUEUE;
@@ -2071,9 +2112,9 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
2071 skb_copy_from_linear_data(skb, tdinfo->buf, skb->len); 2112 skb_copy_from_linear_data(skb, tdinfo->buf, skb->len);
2072 tdinfo->skb_dma[0] = tdinfo->buf_dma; 2113 tdinfo->skb_dma[0] = tdinfo->buf_dma;
2073 td_ptr->tdesc0.len = len; 2114 td_ptr->tdesc0.len = len;
2074 td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); 2115 td_ptr->tx.buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
2075 td_ptr->td_buf[0].pa_high = 0; 2116 td_ptr->tx.buf[0].pa_high = 0;
2076 td_ptr->td_buf[0].size = len; /* queue is 0 anyway */ 2117 td_ptr->tx.buf[0].size = len; /* queue is 0 anyway */
2077 tdinfo->nskb_dma = 1; 2118 tdinfo->nskb_dma = 1;
2078 } else { 2119 } else {
2079 int i = 0; 2120 int i = 0;
@@ -2084,9 +2125,9 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
2084 td_ptr->tdesc0.len = len; 2125 td_ptr->tdesc0.len = len;
2085 2126
2086 /* FIXME: support 48bit DMA later */ 2127 /* FIXME: support 48bit DMA later */
2087 td_ptr->td_buf[i].pa_low = cpu_to_le32(tdinfo->skb_dma); 2128 td_ptr->tx.buf[i].pa_low = cpu_to_le32(tdinfo->skb_dma);
2088 td_ptr->td_buf[i].pa_high = 0; 2129 td_ptr->tx.buf[i].pa_high = 0;
2089 td_ptr->td_buf[i].size = cpu_to_le16(skb_headlen(skb)); 2130 td_ptr->tx.buf[i].size = cpu_to_le16(skb_headlen(skb));
2090 2131
2091 for (i = 0; i < nfrags; i++) { 2132 for (i = 0; i < nfrags; i++) {
2092 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2133 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -2094,9 +2135,9 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
2094 2135
2095 tdinfo->skb_dma[i + 1] = pci_map_single(vptr->pdev, addr, frag->size, PCI_DMA_TODEVICE); 2136 tdinfo->skb_dma[i + 1] = pci_map_single(vptr->pdev, addr, frag->size, PCI_DMA_TODEVICE);
2096 2137
2097 td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]); 2138 td_ptr->tx.buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
2098 td_ptr->td_buf[i + 1].pa_high = 0; 2139 td_ptr->tx.buf[i + 1].pa_high = 0;
2099 td_ptr->td_buf[i + 1].size = cpu_to_le16(frag->size); 2140 td_ptr->tx.buf[i + 1].size = cpu_to_le16(frag->size);
2100 } 2141 }
2101 tdinfo->nskb_dma = i - 1; 2142 tdinfo->nskb_dma = i - 1;
2102 } 2143 }
@@ -2142,13 +2183,13 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
2142 if (prev < 0) 2183 if (prev < 0)
2143 prev = vptr->options.numtx - 1; 2184 prev = vptr->options.numtx - 1;
2144 td_ptr->tdesc0.len |= OWNED_BY_NIC; 2185 td_ptr->tdesc0.len |= OWNED_BY_NIC;
2145 vptr->td_used[qnum]++; 2186 vptr->tx.used[qnum]++;
2146 vptr->td_curr[qnum] = (index + 1) % vptr->options.numtx; 2187 vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx;
2147 2188
2148 if (AVAIL_TD(vptr, qnum) < 1) 2189 if (AVAIL_TD(vptr, qnum) < 1)
2149 netif_stop_queue(dev); 2190 netif_stop_queue(dev);
2150 2191
2151 td_ptr = &(vptr->td_rings[qnum][prev]); 2192 td_ptr = &(vptr->tx.rings[qnum][prev]);
2152 td_ptr->td_buf[0].size |= TD_QUEUE; 2193 td_ptr->td_buf[0].size |= TD_QUEUE;
2153 mac_tx_queue_wake(vptr->mac_regs, qnum); 2194 mac_tx_queue_wake(vptr->mac_regs, qnum);
2154 } 2195 }
@@ -3405,8 +3446,8 @@ static int velocity_resume(struct pci_dev *pdev)
3405 3446
3406 velocity_tx_srv(vptr, 0); 3447 velocity_tx_srv(vptr, 0);
3407 3448
3408 for (i = 0; i < vptr->num_txq; i++) { 3449 for (i = 0; i < vptr->tx.numq; i++) {
3409 if (vptr->td_used[i]) { 3450 if (vptr->tx.used[i]) {
3410 mac_tx_queue_wake(vptr->mac_regs, i); 3451 mac_tx_queue_wake(vptr->mac_regs, i);
3411 } 3452 }
3412 } 3453 }
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index 86446147284c..1b95b04c9257 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -1494,6 +1494,10 @@ struct velocity_opt {
1494 u32 flags; 1494 u32 flags;
1495}; 1495};
1496 1496
1497#define AVAIL_TD(p,q) ((p)->options.numtx-((p)->tx.used[(q)]))
1498
1499#define GET_RD_BY_IDX(vptr, idx) (vptr->rd_ring[idx])
1500
1497struct velocity_info { 1501struct velocity_info {
1498 struct list_head list; 1502 struct list_head list;
1499 1503
@@ -1501,9 +1505,6 @@ struct velocity_info {
1501 struct net_device *dev; 1505 struct net_device *dev;
1502 struct net_device_stats stats; 1506 struct net_device_stats stats;
1503 1507
1504 dma_addr_t rd_pool_dma;
1505 dma_addr_t td_pool_dma[TX_QUEUE_NO];
1506
1507 struct vlan_group *vlgrp; 1508 struct vlan_group *vlgrp;
1508 u8 ip_addr[4]; 1509 u8 ip_addr[4];
1509 enum chip_type chip_id; 1510 enum chip_type chip_id;
@@ -1512,25 +1513,29 @@ struct velocity_info {
1512 unsigned long memaddr; 1513 unsigned long memaddr;
1513 unsigned long ioaddr; 1514 unsigned long ioaddr;
1514 1515
1515 u8 rev_id; 1516 struct tx_info {
1516 1517 int numq;
1517#define AVAIL_TD(p,q) ((p)->options.numtx-((p)->td_used[(q)])) 1518
1519 /* FIXME: the locality of the data seems rather poor. */
1520 int used[TX_QUEUE_NO];
1521 int curr[TX_QUEUE_NO];
1522 int tail[TX_QUEUE_NO];
1523 struct tx_desc *rings[TX_QUEUE_NO];
1524 struct velocity_td_info *infos[TX_QUEUE_NO];
1525 dma_addr_t pool_dma[TX_QUEUE_NO];
1526 } tx;
1527
1528 struct rx_info {
1529 int buf_sz;
1530
1531 int dirty;
1532 int curr;
1533 u32 filled;
1534 struct rx_desc *ring;
1535 struct velocity_rd_info *info; /* It's an array */
1536 dma_addr_t pool_dma;
1537 } rx;
1518 1538
1519 int num_txq;
1520
1521 volatile int td_used[TX_QUEUE_NO];
1522 int td_curr[TX_QUEUE_NO];
1523 int td_tail[TX_QUEUE_NO];
1524 struct tx_desc *td_rings[TX_QUEUE_NO];
1525 struct velocity_td_info *td_infos[TX_QUEUE_NO];
1526
1527 int rd_curr;
1528 int rd_dirty;
1529 u32 rd_filled;
1530 struct rx_desc *rd_ring;
1531 struct velocity_rd_info *rd_info; /* It's an array */
1532
1533#define GET_RD_BY_IDX(vptr, idx) (vptr->rd_ring[idx])
1534 u32 mib_counter[MAX_HW_MIB_COUNTER]; 1539 u32 mib_counter[MAX_HW_MIB_COUNTER];
1535 struct velocity_opt options; 1540 struct velocity_opt options;
1536 1541
@@ -1538,7 +1543,6 @@ struct velocity_info {
1538 1543
1539 u32 flags; 1544 u32 flags;
1540 1545
1541 int rx_buf_sz;
1542 u32 mii_status; 1546 u32 mii_status;
1543 u32 phy_id; 1547 u32 phy_id;
1544 int multicast_limit; 1548 int multicast_limit;
@@ -1554,8 +1558,8 @@ struct velocity_info {
1554 struct velocity_context context; 1558 struct velocity_context context;
1555 1559
1556 u32 ticks; 1560 u32 ticks;
1557 u32 rx_bytes;
1558 1561
1562 u8 rev_id;
1559}; 1563};
1560 1564
1561/** 1565/**
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 846be60e7821..2ae2ec40015d 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -25,7 +25,7 @@ if WAN
25# There is no way to detect a comtrol sv11 - force it modular for now. 25# There is no way to detect a comtrol sv11 - force it modular for now.
26config HOSTESS_SV11 26config HOSTESS_SV11
27 tristate "Comtrol Hostess SV-11 support" 27 tristate "Comtrol Hostess SV-11 support"
28 depends on ISA && m && ISA_DMA_API && INET 28 depends on ISA && m && ISA_DMA_API && INET && HDLC
29 help 29 help
30 Driver for Comtrol Hostess SV-11 network card which 30 Driver for Comtrol Hostess SV-11 network card which
31 operates on low speed synchronous serial links at up to 31 operates on low speed synchronous serial links at up to
@@ -37,7 +37,7 @@ config HOSTESS_SV11
37# The COSA/SRP driver has not been tested as non-modular yet. 37# The COSA/SRP driver has not been tested as non-modular yet.
38config COSA 38config COSA
39 tristate "COSA/SRP sync serial boards support" 39 tristate "COSA/SRP sync serial boards support"
40 depends on ISA && m && ISA_DMA_API 40 depends on ISA && m && ISA_DMA_API && HDLC
41 ---help--- 41 ---help---
42 Driver for COSA and SRP synchronous serial boards. 42 Driver for COSA and SRP synchronous serial boards.
43 43
@@ -61,7 +61,7 @@ config COSA
61# 61#
62config LANMEDIA 62config LANMEDIA
63 tristate "LanMedia Corp. SSI/V.35, T1/E1, HSSI, T3 boards" 63 tristate "LanMedia Corp. SSI/V.35, T1/E1, HSSI, T3 boards"
64 depends on PCI && VIRT_TO_BUS 64 depends on PCI && VIRT_TO_BUS && HDLC
65 ---help--- 65 ---help---
66 Driver for the following Lan Media family of serial boards: 66 Driver for the following Lan Media family of serial boards:
67 67
@@ -78,9 +78,8 @@ config LANMEDIA
78 - LMC 5245 board connects directly to a T3 circuit saving the 78 - LMC 5245 board connects directly to a T3 circuit saving the
79 additional external hardware. 79 additional external hardware.
80 80
81 To change setting such as syncPPP vs Cisco HDLC or clock source you 81 To change setting such as clock source you will need lmcctl.
82 will need lmcctl. It is available at <ftp://ftp.lanmedia.com/> 82 It is available at <ftp://ftp.lanmedia.com/> (broken link).
83 (broken link).
84 83
85 To compile this driver as a module, choose M here: the 84 To compile this driver as a module, choose M here: the
86 module will be called lmc. 85 module will be called lmc.
@@ -88,7 +87,7 @@ config LANMEDIA
88# There is no way to detect a Sealevel board. Force it modular 87# There is no way to detect a Sealevel board. Force it modular
89config SEALEVEL_4021 88config SEALEVEL_4021
90 tristate "Sealevel Systems 4021 support" 89 tristate "Sealevel Systems 4021 support"
91 depends on ISA && m && ISA_DMA_API && INET 90 depends on ISA && m && ISA_DMA_API && INET && HDLC
92 help 91 help
93 This is a driver for the Sealevel Systems ACB 56 serial I/O adapter. 92 This is a driver for the Sealevel Systems ACB 56 serial I/O adapter.
94 93
@@ -154,8 +153,6 @@ config HDLC_PPP
154 help 153 help
155 Generic HDLC driver supporting PPP over WAN connections. 154 Generic HDLC driver supporting PPP over WAN connections.
156 155
157 It will be replaced by new PPP implementation in Linux 2.6.26.
158
159 If unsure, say N. 156 If unsure, say N.
160 157
161config HDLC_X25 158config HDLC_X25
diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile
index d61fef36afc9..102549605d09 100644
--- a/drivers/net/wan/Makefile
+++ b/drivers/net/wan/Makefile
@@ -21,12 +21,11 @@ pc300-y := pc300_drv.o
21pc300-$(CONFIG_PC300_MLPPP) += pc300_tty.o 21pc300-$(CONFIG_PC300_MLPPP) += pc300_tty.o
22pc300-objs := $(pc300-y) 22pc300-objs := $(pc300-y)
23 23
24obj-$(CONFIG_HOSTESS_SV11) += z85230.o syncppp.o hostess_sv11.o 24obj-$(CONFIG_HOSTESS_SV11) += z85230.o hostess_sv11.o
25obj-$(CONFIG_SEALEVEL_4021) += z85230.o syncppp.o sealevel.o 25obj-$(CONFIG_SEALEVEL_4021) += z85230.o sealevel.o
26obj-$(CONFIG_COSA) += syncppp.o cosa.o 26obj-$(CONFIG_COSA) += cosa.o
27obj-$(CONFIG_FARSYNC) += syncppp.o farsync.o 27obj-$(CONFIG_FARSYNC) += farsync.o
28obj-$(CONFIG_DSCC4) += dscc4.o 28obj-$(CONFIG_DSCC4) += dscc4.o
29obj-$(CONFIG_LANMEDIA) += syncppp.o
30obj-$(CONFIG_X25_ASY) += x25_asy.o 29obj-$(CONFIG_X25_ASY) += x25_asy.o
31 30
32obj-$(CONFIG_LANMEDIA) += lmc/ 31obj-$(CONFIG_LANMEDIA) += lmc/
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index f7d3349dc3ec..f14051556c87 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -2,6 +2,7 @@
2 2
3/* 3/*
4 * Copyright (C) 1995-1997 Jan "Yenya" Kasprzak <kas@fi.muni.cz> 4 * Copyright (C) 1995-1997 Jan "Yenya" Kasprzak <kas@fi.muni.cz>
5 * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -54,7 +55,7 @@
54 * 55 *
55 * The Linux driver (unlike the present *BSD drivers :-) can work even 56 * The Linux driver (unlike the present *BSD drivers :-) can work even
56 * for the COSA and SRP in one computer and allows each channel to work 57 * for the COSA and SRP in one computer and allows each channel to work
57 * in one of the three modes (character device, Cisco HDLC, Sync PPP). 58 * in one of the two modes (character or network device).
58 * 59 *
59 * AUTHOR 60 * AUTHOR
60 * 61 *
@@ -72,12 +73,6 @@
72 * The Comtrol Hostess SV11 driver by Alan Cox 73 * The Comtrol Hostess SV11 driver by Alan Cox
73 * The Sync PPP/Cisco HDLC layer (syncppp.c) ported to Linux by Alan Cox 74 * The Sync PPP/Cisco HDLC layer (syncppp.c) ported to Linux by Alan Cox
74 */ 75 */
75/*
76 * 5/25/1999 : Marcelo Tosatti <marcelo@conectiva.com.br>
77 * fixed a deadlock in cosa_sppp_open
78 */
79
80/* ---------- Headers, macros, data structures ---------- */
81 76
82#include <linux/module.h> 77#include <linux/module.h>
83#include <linux/kernel.h> 78#include <linux/kernel.h>
@@ -86,6 +81,7 @@
86#include <linux/fs.h> 81#include <linux/fs.h>
87#include <linux/interrupt.h> 82#include <linux/interrupt.h>
88#include <linux/delay.h> 83#include <linux/delay.h>
84#include <linux/hdlc.h>
89#include <linux/errno.h> 85#include <linux/errno.h>
90#include <linux/ioport.h> 86#include <linux/ioport.h>
91#include <linux/netdevice.h> 87#include <linux/netdevice.h>
@@ -93,14 +89,12 @@
93#include <linux/mutex.h> 89#include <linux/mutex.h>
94#include <linux/device.h> 90#include <linux/device.h>
95#include <linux/smp_lock.h> 91#include <linux/smp_lock.h>
96
97#undef COSA_SLOW_IO /* for testing purposes only */
98
99#include <asm/io.h> 92#include <asm/io.h>
100#include <asm/dma.h> 93#include <asm/dma.h>
101#include <asm/byteorder.h> 94#include <asm/byteorder.h>
102 95
103#include <net/syncppp.h> 96#undef COSA_SLOW_IO /* for testing purposes only */
97
104#include "cosa.h" 98#include "cosa.h"
105 99
106/* Maximum length of the identification string. */ 100/* Maximum length of the identification string. */
@@ -112,7 +106,6 @@
112/* Per-channel data structure */ 106/* Per-channel data structure */
113 107
114struct channel_data { 108struct channel_data {
115 void *if_ptr; /* General purpose pointer (used by SPPP) */
116 int usage; /* Usage count; >0 for chrdev, -1 for netdev */ 109 int usage; /* Usage count; >0 for chrdev, -1 for netdev */
117 int num; /* Number of the channel */ 110 int num; /* Number of the channel */
118 struct cosa_data *cosa; /* Pointer to the per-card structure */ 111 struct cosa_data *cosa; /* Pointer to the per-card structure */
@@ -136,10 +129,9 @@ struct channel_data {
136 wait_queue_head_t txwaitq, rxwaitq; 129 wait_queue_head_t txwaitq, rxwaitq;
137 int tx_status, rx_status; 130 int tx_status, rx_status;
138 131
139 /* SPPP/HDLC device parts */ 132 /* generic HDLC device parts */
140 struct ppp_device pppdev; 133 struct net_device *netdev;
141 struct sk_buff *rx_skb, *tx_skb; 134 struct sk_buff *rx_skb, *tx_skb;
142 struct net_device_stats stats;
143}; 135};
144 136
145/* cosa->firmware_status bits */ 137/* cosa->firmware_status bits */
@@ -281,21 +273,19 @@ static int cosa_start_tx(struct channel_data *channel, char *buf, int size);
281static void cosa_kick(struct cosa_data *cosa); 273static void cosa_kick(struct cosa_data *cosa);
282static int cosa_dma_able(struct channel_data *chan, char *buf, int data); 274static int cosa_dma_able(struct channel_data *chan, char *buf, int data);
283 275
284/* SPPP/HDLC stuff */ 276/* Network device stuff */
285static void sppp_channel_init(struct channel_data *chan); 277static int cosa_net_attach(struct net_device *dev, unsigned short encoding,
286static void sppp_channel_delete(struct channel_data *chan); 278 unsigned short parity);
287static int cosa_sppp_open(struct net_device *d); 279static int cosa_net_open(struct net_device *d);
288static int cosa_sppp_close(struct net_device *d); 280static int cosa_net_close(struct net_device *d);
289static void cosa_sppp_timeout(struct net_device *d); 281static void cosa_net_timeout(struct net_device *d);
290static int cosa_sppp_tx(struct sk_buff *skb, struct net_device *d); 282static int cosa_net_tx(struct sk_buff *skb, struct net_device *d);
291static char *sppp_setup_rx(struct channel_data *channel, int size); 283static char *cosa_net_setup_rx(struct channel_data *channel, int size);
292static int sppp_rx_done(struct channel_data *channel); 284static int cosa_net_rx_done(struct channel_data *channel);
293static int sppp_tx_done(struct channel_data *channel, int size); 285static int cosa_net_tx_done(struct channel_data *channel, int size);
294static int cosa_sppp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); 286static int cosa_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
295static struct net_device_stats *cosa_net_stats(struct net_device *dev);
296 287
297/* Character device */ 288/* Character device */
298static void chardev_channel_init(struct channel_data *chan);
299static char *chrdev_setup_rx(struct channel_data *channel, int size); 289static char *chrdev_setup_rx(struct channel_data *channel, int size);
300static int chrdev_rx_done(struct channel_data *channel); 290static int chrdev_rx_done(struct channel_data *channel);
301static int chrdev_tx_done(struct channel_data *channel, int size); 291static int chrdev_tx_done(struct channel_data *channel, int size);
@@ -357,17 +347,17 @@ static void debug_status_in(struct cosa_data *cosa, int status);
357static void debug_status_out(struct cosa_data *cosa, int status); 347static void debug_status_out(struct cosa_data *cosa, int status);
358#endif 348#endif
359 349
360 350static inline struct channel_data* dev_to_chan(struct net_device *dev)
351{
352 return (struct channel_data *)dev_to_hdlc(dev)->priv;
353}
354
361/* ---------- Initialization stuff ---------- */ 355/* ---------- Initialization stuff ---------- */
362 356
363static int __init cosa_init(void) 357static int __init cosa_init(void)
364{ 358{
365 int i, err = 0; 359 int i, err = 0;
366 360
367 printk(KERN_INFO "cosa v1.08 (c) 1997-2000 Jan Kasprzak <kas@fi.muni.cz>\n");
368#ifdef CONFIG_SMP
369 printk(KERN_INFO "cosa: SMP found. Please mail any success/failure reports to the author.\n");
370#endif
371 if (cosa_major > 0) { 361 if (cosa_major > 0) {
372 if (register_chrdev(cosa_major, "cosa", &cosa_fops)) { 362 if (register_chrdev(cosa_major, "cosa", &cosa_fops)) {
373 printk(KERN_WARNING "cosa: unable to get major %d\n", 363 printk(KERN_WARNING "cosa: unable to get major %d\n",
@@ -402,7 +392,7 @@ static int __init cosa_init(void)
402 NULL, "cosa%d", i); 392 NULL, "cosa%d", i);
403 err = 0; 393 err = 0;
404 goto out; 394 goto out;
405 395
406out_chrdev: 396out_chrdev:
407 unregister_chrdev(cosa_major, "cosa"); 397 unregister_chrdev(cosa_major, "cosa");
408out: 398out:
@@ -414,43 +404,29 @@ static void __exit cosa_exit(void)
414{ 404{
415 struct cosa_data *cosa; 405 struct cosa_data *cosa;
416 int i; 406 int i;
417 printk(KERN_INFO "Unloading the cosa module\n");
418 407
419 for (i=0; i<nr_cards; i++) 408 for (i = 0; i < nr_cards; i++)
420 device_destroy(cosa_class, MKDEV(cosa_major, i)); 409 device_destroy(cosa_class, MKDEV(cosa_major, i));
421 class_destroy(cosa_class); 410 class_destroy(cosa_class);
422 for (cosa=cosa_cards; nr_cards--; cosa++) { 411
412 for (cosa = cosa_cards; nr_cards--; cosa++) {
423 /* Clean up the per-channel data */ 413 /* Clean up the per-channel data */
424 for (i=0; i<cosa->nchannels; i++) { 414 for (i = 0; i < cosa->nchannels; i++) {
425 /* Chardev driver has no alloc'd per-channel data */ 415 /* Chardev driver has no alloc'd per-channel data */
426 sppp_channel_delete(cosa->chan+i); 416 unregister_hdlc_device(cosa->chan[i].netdev);
417 free_netdev(cosa->chan[i].netdev);
427 } 418 }
428 /* Clean up the per-card data */ 419 /* Clean up the per-card data */
429 kfree(cosa->chan); 420 kfree(cosa->chan);
430 kfree(cosa->bouncebuf); 421 kfree(cosa->bouncebuf);
431 free_irq(cosa->irq, cosa); 422 free_irq(cosa->irq, cosa);
432 free_dma(cosa->dma); 423 free_dma(cosa->dma);
433 release_region(cosa->datareg,is_8bit(cosa)?2:4); 424 release_region(cosa->datareg, is_8bit(cosa) ? 2 : 4);
434 } 425 }
435 unregister_chrdev(cosa_major, "cosa"); 426 unregister_chrdev(cosa_major, "cosa");
436} 427}
437module_exit(cosa_exit); 428module_exit(cosa_exit);
438 429
439/*
440 * This function should register all the net devices needed for the
441 * single channel.
442 */
443static __inline__ void channel_init(struct channel_data *chan)
444{
445 sprintf(chan->name, "cosa%dc%d", chan->cosa->num, chan->num);
446
447 /* Initialize the chardev data structures */
448 chardev_channel_init(chan);
449
450 /* Register the sppp interface */
451 sppp_channel_init(chan);
452}
453
454static int cosa_probe(int base, int irq, int dma) 430static int cosa_probe(int base, int irq, int dma)
455{ 431{
456 struct cosa_data *cosa = cosa_cards+nr_cards; 432 struct cosa_data *cosa = cosa_cards+nr_cards;
@@ -576,13 +552,43 @@ static int cosa_probe(int base, int irq, int dma)
576 /* Initialize the per-channel data */ 552 /* Initialize the per-channel data */
577 cosa->chan = kcalloc(cosa->nchannels, sizeof(struct channel_data), GFP_KERNEL); 553 cosa->chan = kcalloc(cosa->nchannels, sizeof(struct channel_data), GFP_KERNEL);
578 if (!cosa->chan) { 554 if (!cosa->chan) {
579 err = -ENOMEM; 555 err = -ENOMEM;
580 goto err_out3; 556 goto err_out3;
581 } 557 }
582 for (i=0; i<cosa->nchannels; i++) { 558
583 cosa->chan[i].cosa = cosa; 559 for (i = 0; i < cosa->nchannels; i++) {
584 cosa->chan[i].num = i; 560 struct channel_data *chan = &cosa->chan[i];
585 channel_init(cosa->chan+i); 561
562 chan->cosa = cosa;
563 chan->num = i;
564 sprintf(chan->name, "cosa%dc%d", chan->cosa->num, i);
565
566 /* Initialize the chardev data structures */
567 mutex_init(&chan->rlock);
568 init_MUTEX(&chan->wsem);
569
570 /* Register the network interface */
571 if (!(chan->netdev = alloc_hdlcdev(chan))) {
572 printk(KERN_WARNING "%s: alloc_hdlcdev failed.\n",
573 chan->name);
574 goto err_hdlcdev;
575 }
576 dev_to_hdlc(chan->netdev)->attach = cosa_net_attach;
577 dev_to_hdlc(chan->netdev)->xmit = cosa_net_tx;
578 chan->netdev->open = cosa_net_open;
579 chan->netdev->stop = cosa_net_close;
580 chan->netdev->do_ioctl = cosa_net_ioctl;
581 chan->netdev->tx_timeout = cosa_net_timeout;
582 chan->netdev->watchdog_timeo = TX_TIMEOUT;
583 chan->netdev->base_addr = chan->cosa->datareg;
584 chan->netdev->irq = chan->cosa->irq;
585 chan->netdev->dma = chan->cosa->dma;
586 if (register_hdlc_device(chan->netdev)) {
587 printk(KERN_WARNING "%s: register_hdlc_device()"
588 " failed.\n", chan->netdev->name);
589 free_netdev(chan->netdev);
590 goto err_hdlcdev;
591 }
586 } 592 }
587 593
588 printk (KERN_INFO "cosa%d: %s (%s at 0x%x irq %d dma %d), %d channels\n", 594 printk (KERN_INFO "cosa%d: %s (%s at 0x%x irq %d dma %d), %d channels\n",
@@ -590,13 +596,20 @@ static int cosa_probe(int base, int irq, int dma)
590 cosa->datareg, cosa->irq, cosa->dma, cosa->nchannels); 596 cosa->datareg, cosa->irq, cosa->dma, cosa->nchannels);
591 597
592 return nr_cards++; 598 return nr_cards++;
599
600err_hdlcdev:
601 while (i-- > 0) {
602 unregister_hdlc_device(cosa->chan[i].netdev);
603 free_netdev(cosa->chan[i].netdev);
604 }
605 kfree(cosa->chan);
593err_out3: 606err_out3:
594 kfree(cosa->bouncebuf); 607 kfree(cosa->bouncebuf);
595err_out2: 608err_out2:
596 free_dma(cosa->dma); 609 free_dma(cosa->dma);
597err_out1: 610err_out1:
598 free_irq(cosa->irq, cosa); 611 free_irq(cosa->irq, cosa);
599err_out: 612err_out:
600 release_region(cosa->datareg,is_8bit(cosa)?2:4); 613 release_region(cosa->datareg,is_8bit(cosa)?2:4);
601 printk(KERN_NOTICE "cosa%d: allocating resources failed\n", 614 printk(KERN_NOTICE "cosa%d: allocating resources failed\n",
602 cosa->num); 615 cosa->num);
@@ -604,54 +617,19 @@ err_out:
604} 617}
605 618
606 619
607/*---------- SPPP/HDLC netdevice ---------- */ 620/*---------- network device ---------- */
608 621
609static void cosa_setup(struct net_device *d) 622static int cosa_net_attach(struct net_device *dev, unsigned short encoding,
623 unsigned short parity)
610{ 624{
611 d->open = cosa_sppp_open; 625 if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)
612 d->stop = cosa_sppp_close; 626 return 0;
613 d->hard_start_xmit = cosa_sppp_tx; 627 return -EINVAL;
614 d->do_ioctl = cosa_sppp_ioctl;
615 d->get_stats = cosa_net_stats;
616 d->tx_timeout = cosa_sppp_timeout;
617 d->watchdog_timeo = TX_TIMEOUT;
618}
619
620static void sppp_channel_init(struct channel_data *chan)
621{
622 struct net_device *d;
623 chan->if_ptr = &chan->pppdev;
624 d = alloc_netdev(0, chan->name, cosa_setup);
625 if (!d) {
626 printk(KERN_WARNING "%s: alloc_netdev failed.\n", chan->name);
627 return;
628 }
629 chan->pppdev.dev = d;
630 d->base_addr = chan->cosa->datareg;
631 d->irq = chan->cosa->irq;
632 d->dma = chan->cosa->dma;
633 d->ml_priv = chan;
634 sppp_attach(&chan->pppdev);
635 if (register_netdev(d)) {
636 printk(KERN_WARNING "%s: register_netdev failed.\n", d->name);
637 sppp_detach(d);
638 free_netdev(d);
639 chan->pppdev.dev = NULL;
640 return;
641 }
642}
643
644static void sppp_channel_delete(struct channel_data *chan)
645{
646 unregister_netdev(chan->pppdev.dev);
647 sppp_detach(chan->pppdev.dev);
648 free_netdev(chan->pppdev.dev);
649 chan->pppdev.dev = NULL;
650} 628}
651 629
652static int cosa_sppp_open(struct net_device *d) 630static int cosa_net_open(struct net_device *dev)
653{ 631{
654 struct channel_data *chan = d->ml_priv; 632 struct channel_data *chan = dev_to_chan(dev);
655 int err; 633 int err;
656 unsigned long flags; 634 unsigned long flags;
657 635
@@ -662,36 +640,35 @@ static int cosa_sppp_open(struct net_device *d)
662 } 640 }
663 spin_lock_irqsave(&chan->cosa->lock, flags); 641 spin_lock_irqsave(&chan->cosa->lock, flags);
664 if (chan->usage != 0) { 642 if (chan->usage != 0) {
665 printk(KERN_WARNING "%s: sppp_open called with usage count %d\n", 643 printk(KERN_WARNING "%s: cosa_net_open called with usage count"
666 chan->name, chan->usage); 644 " %d\n", chan->name, chan->usage);
667 spin_unlock_irqrestore(&chan->cosa->lock, flags); 645 spin_unlock_irqrestore(&chan->cosa->lock, flags);
668 return -EBUSY; 646 return -EBUSY;
669 } 647 }
670 chan->setup_rx = sppp_setup_rx; 648 chan->setup_rx = cosa_net_setup_rx;
671 chan->tx_done = sppp_tx_done; 649 chan->tx_done = cosa_net_tx_done;
672 chan->rx_done = sppp_rx_done; 650 chan->rx_done = cosa_net_rx_done;
673 chan->usage=-1; 651 chan->usage = -1;
674 chan->cosa->usage++; 652 chan->cosa->usage++;
675 spin_unlock_irqrestore(&chan->cosa->lock, flags); 653 spin_unlock_irqrestore(&chan->cosa->lock, flags);
676 654
677 err = sppp_open(d); 655 err = hdlc_open(dev);
678 if (err) { 656 if (err) {
679 spin_lock_irqsave(&chan->cosa->lock, flags); 657 spin_lock_irqsave(&chan->cosa->lock, flags);
680 chan->usage=0; 658 chan->usage = 0;
681 chan->cosa->usage--; 659 chan->cosa->usage--;
682
683 spin_unlock_irqrestore(&chan->cosa->lock, flags); 660 spin_unlock_irqrestore(&chan->cosa->lock, flags);
684 return err; 661 return err;
685 } 662 }
686 663
687 netif_start_queue(d); 664 netif_start_queue(dev);
688 cosa_enable_rx(chan); 665 cosa_enable_rx(chan);
689 return 0; 666 return 0;
690} 667}
691 668
692static int cosa_sppp_tx(struct sk_buff *skb, struct net_device *dev) 669static int cosa_net_tx(struct sk_buff *skb, struct net_device *dev)
693{ 670{
694 struct channel_data *chan = dev->ml_priv; 671 struct channel_data *chan = dev_to_chan(dev);
695 672
696 netif_stop_queue(dev); 673 netif_stop_queue(dev);
697 674
@@ -700,16 +677,16 @@ static int cosa_sppp_tx(struct sk_buff *skb, struct net_device *dev)
700 return 0; 677 return 0;
701} 678}
702 679
703static void cosa_sppp_timeout(struct net_device *dev) 680static void cosa_net_timeout(struct net_device *dev)
704{ 681{
705 struct channel_data *chan = dev->ml_priv; 682 struct channel_data *chan = dev_to_chan(dev);
706 683
707 if (test_bit(RXBIT, &chan->cosa->rxtx)) { 684 if (test_bit(RXBIT, &chan->cosa->rxtx)) {
708 chan->stats.rx_errors++; 685 chan->netdev->stats.rx_errors++;
709 chan->stats.rx_missed_errors++; 686 chan->netdev->stats.rx_missed_errors++;
710 } else { 687 } else {
711 chan->stats.tx_errors++; 688 chan->netdev->stats.tx_errors++;
712 chan->stats.tx_aborted_errors++; 689 chan->netdev->stats.tx_aborted_errors++;
713 } 690 }
714 cosa_kick(chan->cosa); 691 cosa_kick(chan->cosa);
715 if (chan->tx_skb) { 692 if (chan->tx_skb) {
@@ -719,13 +696,13 @@ static void cosa_sppp_timeout(struct net_device *dev)
719 netif_wake_queue(dev); 696 netif_wake_queue(dev);
720} 697}
721 698
722static int cosa_sppp_close(struct net_device *d) 699static int cosa_net_close(struct net_device *dev)
723{ 700{
724 struct channel_data *chan = d->ml_priv; 701 struct channel_data *chan = dev_to_chan(dev);
725 unsigned long flags; 702 unsigned long flags;
726 703
727 netif_stop_queue(d); 704 netif_stop_queue(dev);
728 sppp_close(d); 705 hdlc_close(dev);
729 cosa_disable_rx(chan); 706 cosa_disable_rx(chan);
730 spin_lock_irqsave(&chan->cosa->lock, flags); 707 spin_lock_irqsave(&chan->cosa->lock, flags);
731 if (chan->rx_skb) { 708 if (chan->rx_skb) {
@@ -736,13 +713,13 @@ static int cosa_sppp_close(struct net_device *d)
736 kfree_skb(chan->tx_skb); 713 kfree_skb(chan->tx_skb);
737 chan->tx_skb = NULL; 714 chan->tx_skb = NULL;
738 } 715 }
739 chan->usage=0; 716 chan->usage = 0;
740 chan->cosa->usage--; 717 chan->cosa->usage--;
741 spin_unlock_irqrestore(&chan->cosa->lock, flags); 718 spin_unlock_irqrestore(&chan->cosa->lock, flags);
742 return 0; 719 return 0;
743} 720}
744 721
745static char *sppp_setup_rx(struct channel_data *chan, int size) 722static char *cosa_net_setup_rx(struct channel_data *chan, int size)
746{ 723{
747 /* 724 /*
748 * We can safely fall back to non-dma-able memory, because we have 725 * We can safely fall back to non-dma-able memory, because we have
@@ -754,66 +731,53 @@ static char *sppp_setup_rx(struct channel_data *chan, int size)
754 if (chan->rx_skb == NULL) { 731 if (chan->rx_skb == NULL) {
755 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet\n", 732 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet\n",
756 chan->name); 733 chan->name);
757 chan->stats.rx_dropped++; 734 chan->netdev->stats.rx_dropped++;
758 return NULL; 735 return NULL;
759 } 736 }
760 chan->pppdev.dev->trans_start = jiffies; 737 chan->netdev->trans_start = jiffies;
761 return skb_put(chan->rx_skb, size); 738 return skb_put(chan->rx_skb, size);
762} 739}
763 740
764static int sppp_rx_done(struct channel_data *chan) 741static int cosa_net_rx_done(struct channel_data *chan)
765{ 742{
766 if (!chan->rx_skb) { 743 if (!chan->rx_skb) {
767 printk(KERN_WARNING "%s: rx_done with empty skb!\n", 744 printk(KERN_WARNING "%s: rx_done with empty skb!\n",
768 chan->name); 745 chan->name);
769 chan->stats.rx_errors++; 746 chan->netdev->stats.rx_errors++;
770 chan->stats.rx_frame_errors++; 747 chan->netdev->stats.rx_frame_errors++;
771 return 0; 748 return 0;
772 } 749 }
773 chan->rx_skb->protocol = htons(ETH_P_WAN_PPP); 750 chan->rx_skb->protocol = hdlc_type_trans(chan->rx_skb, chan->netdev);
774 chan->rx_skb->dev = chan->pppdev.dev; 751 chan->rx_skb->dev = chan->netdev;
775 skb_reset_mac_header(chan->rx_skb); 752 skb_reset_mac_header(chan->rx_skb);
776 chan->stats.rx_packets++; 753 chan->netdev->stats.rx_packets++;
777 chan->stats.rx_bytes += chan->cosa->rxsize; 754 chan->netdev->stats.rx_bytes += chan->cosa->rxsize;
778 netif_rx(chan->rx_skb); 755 netif_rx(chan->rx_skb);
779 chan->rx_skb = NULL; 756 chan->rx_skb = NULL;
780 chan->pppdev.dev->last_rx = jiffies; 757 chan->netdev->last_rx = jiffies;
781 return 0; 758 return 0;
782} 759}
783 760
784/* ARGSUSED */ 761/* ARGSUSED */
785static int sppp_tx_done(struct channel_data *chan, int size) 762static int cosa_net_tx_done(struct channel_data *chan, int size)
786{ 763{
787 if (!chan->tx_skb) { 764 if (!chan->tx_skb) {
788 printk(KERN_WARNING "%s: tx_done with empty skb!\n", 765 printk(KERN_WARNING "%s: tx_done with empty skb!\n",
789 chan->name); 766 chan->name);
790 chan->stats.tx_errors++; 767 chan->netdev->stats.tx_errors++;
791 chan->stats.tx_aborted_errors++; 768 chan->netdev->stats.tx_aborted_errors++;
792 return 1; 769 return 1;
793 } 770 }
794 dev_kfree_skb_irq(chan->tx_skb); 771 dev_kfree_skb_irq(chan->tx_skb);
795 chan->tx_skb = NULL; 772 chan->tx_skb = NULL;
796 chan->stats.tx_packets++; 773 chan->netdev->stats.tx_packets++;
797 chan->stats.tx_bytes += size; 774 chan->netdev->stats.tx_bytes += size;
798 netif_wake_queue(chan->pppdev.dev); 775 netif_wake_queue(chan->netdev);
799 return 1; 776 return 1;
800} 777}
801 778
802static struct net_device_stats *cosa_net_stats(struct net_device *dev)
803{
804 struct channel_data *chan = dev->ml_priv;
805 return &chan->stats;
806}
807
808
809/*---------- Character device ---------- */ 779/*---------- Character device ---------- */
810 780
811static void chardev_channel_init(struct channel_data *chan)
812{
813 mutex_init(&chan->rlock);
814 init_MUTEX(&chan->wsem);
815}
816
817static ssize_t cosa_read(struct file *file, 781static ssize_t cosa_read(struct file *file,
818 char __user *buf, size_t count, loff_t *ppos) 782 char __user *buf, size_t count, loff_t *ppos)
819{ 783{
@@ -1223,16 +1187,15 @@ static int cosa_ioctl_common(struct cosa_data *cosa,
1223 return -ENOIOCTLCMD; 1187 return -ENOIOCTLCMD;
1224} 1188}
1225 1189
1226static int cosa_sppp_ioctl(struct net_device *dev, struct ifreq *ifr, 1190static int cosa_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1227 int cmd)
1228{ 1191{
1229 int rv; 1192 int rv;
1230 struct channel_data *chan = dev->ml_priv; 1193 struct channel_data *chan = dev_to_chan(dev);
1231 rv = cosa_ioctl_common(chan->cosa, chan, cmd, (unsigned long)ifr->ifr_data); 1194 rv = cosa_ioctl_common(chan->cosa, chan, cmd,
1232 if (rv == -ENOIOCTLCMD) { 1195 (unsigned long)ifr->ifr_data);
1233 return sppp_do_ioctl(dev, ifr, cmd); 1196 if (rv != -ENOIOCTLCMD)
1234 } 1197 return rv;
1235 return rv; 1198 return hdlc_ioctl(dev, ifr, cmd);
1236} 1199}
1237 1200
1238static int cosa_chardev_ioctl(struct inode *inode, struct file *file, 1201static int cosa_chardev_ioctl(struct inode *inode, struct file *file,
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 50ef5b4efd6d..f5d55ad02267 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -103,7 +103,6 @@
103#include <linux/netdevice.h> 103#include <linux/netdevice.h>
104#include <linux/skbuff.h> 104#include <linux/skbuff.h>
105#include <linux/delay.h> 105#include <linux/delay.h>
106#include <net/syncppp.h>
107#include <linux/hdlc.h> 106#include <linux/hdlc.h>
108#include <linux/mutex.h> 107#include <linux/mutex.h>
109 108
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 754f00809e3e..9557ad078ab8 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -47,10 +47,7 @@ MODULE_LICENSE("GPL");
47/* Default parameters for the link 47/* Default parameters for the link
48 */ 48 */
49#define FST_TX_QUEUE_LEN 100 /* At 8Mbps a longer queue length is 49#define FST_TX_QUEUE_LEN 100 /* At 8Mbps a longer queue length is
50 * useful, the syncppp module forces 50 * useful */
51 * this down assuming a slower line I
52 * guess.
53 */
54#define FST_TXQ_DEPTH 16 /* This one is for the buffering 51#define FST_TXQ_DEPTH 16 /* This one is for the buffering
55 * of frames on the way down to the card 52 * of frames on the way down to the card
56 * so that we can keep the card busy 53 * so that we can keep the card busy
diff --git a/drivers/net/wan/farsync.h b/drivers/net/wan/farsync.h
index d871dafa87a1..6b27e7c3d449 100644
--- a/drivers/net/wan/farsync.h
+++ b/drivers/net/wan/farsync.h
@@ -54,9 +54,6 @@
54 54
55 55
56/* Ioctl call command values 56/* Ioctl call command values
57 *
58 * The first three private ioctls are used by the sync-PPP module,
59 * allowing a little room for expansion we start our numbering at 10.
60 */ 57 */
61#define FSTWRITE (SIOCDEVPRIVATE+10) 58#define FSTWRITE (SIOCDEVPRIVATE+10)
62#define FSTCPURESET (SIOCDEVPRIVATE+11) 59#define FSTCPURESET (SIOCDEVPRIVATE+11)
@@ -202,9 +199,6 @@ struct fstioc_info {
202#define J1 7 199#define J1 7
203 200
204/* "proto" */ 201/* "proto" */
205#define FST_HDLC 1 /* Cisco compatible HDLC */
206#define FST_PPP 2 /* Sync PPP */
207#define FST_MONITOR 3 /* Monitor only (raw packet reception) */
208#define FST_RAW 4 /* Two way raw packets */ 202#define FST_RAW 4 /* Two way raw packets */
209#define FST_GEN_HDLC 5 /* Using "Generic HDLC" module */ 203#define FST_GEN_HDLC 5 /* Using "Generic HDLC" module */
210 204
diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c
index e3a536477c7e..1f2a140c9f7c 100644
--- a/drivers/net/wan/hdlc.c
+++ b/drivers/net/wan/hdlc.c
@@ -22,20 +22,19 @@
22 * - proto->start() and stop() are called with spin_lock_irq held. 22 * - proto->start() and stop() are called with spin_lock_irq held.
23 */ 23 */
24 24
25#include <linux/module.h>
26#include <linux/kernel.h>
27#include <linux/slab.h>
28#include <linux/poll.h>
29#include <linux/errno.h> 25#include <linux/errno.h>
26#include <linux/hdlc.h>
30#include <linux/if_arp.h> 27#include <linux/if_arp.h>
28#include <linux/inetdevice.h>
31#include <linux/init.h> 29#include <linux/init.h>
32#include <linux/skbuff.h> 30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/notifier.h>
33#include <linux/pkt_sched.h> 33#include <linux/pkt_sched.h>
34#include <linux/inetdevice.h> 34#include <linux/poll.h>
35#include <linux/lapb.h>
36#include <linux/rtnetlink.h> 35#include <linux/rtnetlink.h>
37#include <linux/notifier.h> 36#include <linux/skbuff.h>
38#include <linux/hdlc.h> 37#include <linux/slab.h>
39#include <net/net_namespace.h> 38#include <net/net_namespace.h>
40 39
41 40
@@ -109,7 +108,7 @@ static int hdlc_device_event(struct notifier_block *this, unsigned long event,
109 108
110 if (dev->get_stats != hdlc_get_stats) 109 if (dev->get_stats != hdlc_get_stats)
111 return NOTIFY_DONE; /* not an HDLC device */ 110 return NOTIFY_DONE; /* not an HDLC device */
112 111
113 if (event != NETDEV_CHANGE) 112 if (event != NETDEV_CHANGE)
114 return NOTIFY_DONE; /* Only interrested in carrier changes */ 113 return NOTIFY_DONE; /* Only interrested in carrier changes */
115 114
@@ -357,7 +356,7 @@ static struct packet_type hdlc_packet_type = {
357 356
358 357
359static struct notifier_block hdlc_notifier = { 358static struct notifier_block hdlc_notifier = {
360 .notifier_call = hdlc_device_event, 359 .notifier_call = hdlc_device_event,
361}; 360};
362 361
363 362
@@ -367,8 +366,8 @@ static int __init hdlc_module_init(void)
367 366
368 printk(KERN_INFO "%s\n", version); 367 printk(KERN_INFO "%s\n", version);
369 if ((result = register_netdevice_notifier(&hdlc_notifier)) != 0) 368 if ((result = register_netdevice_notifier(&hdlc_notifier)) != 0)
370 return result; 369 return result;
371 dev_add_pack(&hdlc_packet_type); 370 dev_add_pack(&hdlc_packet_type);
372 return 0; 371 return 0;
373} 372}
374 373
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index 849819c2552d..44e64b15dbd1 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -9,19 +9,18 @@
9 * as published by the Free Software Foundation. 9 * as published by the Free Software Foundation.
10 */ 10 */
11 11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/slab.h>
15#include <linux/poll.h>
16#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/hdlc.h>
17#include <linux/if_arp.h> 14#include <linux/if_arp.h>
15#include <linux/inetdevice.h>
18#include <linux/init.h> 16#include <linux/init.h>
19#include <linux/skbuff.h> 17#include <linux/kernel.h>
18#include <linux/module.h>
20#include <linux/pkt_sched.h> 19#include <linux/pkt_sched.h>
21#include <linux/inetdevice.h> 20#include <linux/poll.h>
22#include <linux/lapb.h>
23#include <linux/rtnetlink.h> 21#include <linux/rtnetlink.h>
24#include <linux/hdlc.h> 22#include <linux/skbuff.h>
23#include <linux/slab.h>
25 24
26#undef DEBUG_HARD_HEADER 25#undef DEBUG_HARD_HEADER
27 26
@@ -68,9 +67,9 @@ struct cisco_state {
68static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr); 67static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr);
69 68
70 69
71static inline struct cisco_state * state(hdlc_device *hdlc) 70static inline struct cisco_state* state(hdlc_device *hdlc)
72{ 71{
73 return(struct cisco_state *)(hdlc->state); 72 return (struct cisco_state *)hdlc->state;
74} 73}
75 74
76 75
@@ -172,7 +171,7 @@ static int cisco_rx(struct sk_buff *skb)
172 data->address != CISCO_UNICAST) 171 data->address != CISCO_UNICAST)
173 goto rx_error; 172 goto rx_error;
174 173
175 switch(ntohs(data->protocol)) { 174 switch (ntohs(data->protocol)) {
176 case CISCO_SYS_INFO: 175 case CISCO_SYS_INFO:
177 /* Packet is not needed, drop it. */ 176 /* Packet is not needed, drop it. */
178 dev_kfree_skb_any(skb); 177 dev_kfree_skb_any(skb);
@@ -336,7 +335,7 @@ static struct hdlc_proto proto = {
336static const struct header_ops cisco_header_ops = { 335static const struct header_ops cisco_header_ops = {
337 .create = cisco_hard_header, 336 .create = cisco_hard_header,
338}; 337};
339 338
340static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr) 339static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
341{ 340{
342 cisco_proto __user *cisco_s = ifr->ifr_settings.ifs_ifsu.cisco; 341 cisco_proto __user *cisco_s = ifr->ifr_settings.ifs_ifsu.cisco;
@@ -359,10 +358,10 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
359 return 0; 358 return 0;
360 359
361 case IF_PROTO_CISCO: 360 case IF_PROTO_CISCO:
362 if(!capable(CAP_NET_ADMIN)) 361 if (!capable(CAP_NET_ADMIN))
363 return -EPERM; 362 return -EPERM;
364 363
365 if(dev->flags & IFF_UP) 364 if (dev->flags & IFF_UP)
366 return -EBUSY; 365 return -EBUSY;
367 366
368 if (copy_from_user(&new_settings, cisco_s, size)) 367 if (copy_from_user(&new_settings, cisco_s, size))
@@ -372,7 +371,7 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
372 new_settings.timeout < 2) 371 new_settings.timeout < 2)
373 return -EINVAL; 372 return -EINVAL;
374 373
375 result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT); 374 result = hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
376 if (result) 375 if (result)
377 return result; 376 return result;
378 377
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index 62e93dac6b13..d3d5055741ad 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -33,20 +33,19 @@
33 33
34*/ 34*/
35 35
36#include <linux/module.h>
37#include <linux/kernel.h>
38#include <linux/slab.h>
39#include <linux/poll.h>
40#include <linux/errno.h> 36#include <linux/errno.h>
37#include <linux/etherdevice.h>
38#include <linux/hdlc.h>
41#include <linux/if_arp.h> 39#include <linux/if_arp.h>
40#include <linux/inetdevice.h>
42#include <linux/init.h> 41#include <linux/init.h>
43#include <linux/skbuff.h> 42#include <linux/kernel.h>
43#include <linux/module.h>
44#include <linux/pkt_sched.h> 44#include <linux/pkt_sched.h>
45#include <linux/inetdevice.h> 45#include <linux/poll.h>
46#include <linux/lapb.h>
47#include <linux/rtnetlink.h> 46#include <linux/rtnetlink.h>
48#include <linux/etherdevice.h> 47#include <linux/skbuff.h>
49#include <linux/hdlc.h> 48#include <linux/slab.h>
50 49
51#undef DEBUG_PKT 50#undef DEBUG_PKT
52#undef DEBUG_ECN 51#undef DEBUG_ECN
@@ -96,7 +95,7 @@ typedef struct {
96 unsigned ea1: 1; 95 unsigned ea1: 1;
97 unsigned cr: 1; 96 unsigned cr: 1;
98 unsigned dlcih: 6; 97 unsigned dlcih: 6;
99 98
100 unsigned ea2: 1; 99 unsigned ea2: 1;
101 unsigned de: 1; 100 unsigned de: 1;
102 unsigned becn: 1; 101 unsigned becn: 1;
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index 00308337928e..4efe9e6d32d5 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -9,19 +9,18 @@
9 * as published by the Free Software Foundation. 9 * as published by the Free Software Foundation.
10 */ 10 */
11 11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/slab.h>
15#include <linux/poll.h>
16#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/hdlc.h>
17#include <linux/if_arp.h> 14#include <linux/if_arp.h>
15#include <linux/inetdevice.h>
18#include <linux/init.h> 16#include <linux/init.h>
19#include <linux/skbuff.h> 17#include <linux/kernel.h>
18#include <linux/module.h>
20#include <linux/pkt_sched.h> 19#include <linux/pkt_sched.h>
21#include <linux/inetdevice.h> 20#include <linux/poll.h>
22#include <linux/lapb.h>
23#include <linux/rtnetlink.h> 21#include <linux/rtnetlink.h>
24#include <linux/hdlc.h> 22#include <linux/skbuff.h>
23#include <linux/slab.h>
25#include <net/syncppp.h> 24#include <net/syncppp.h>
26 25
27struct ppp_state { 26struct ppp_state {
diff --git a/drivers/net/wan/hdlc_raw.c b/drivers/net/wan/hdlc_raw.c
index bbbb819d764c..8612311748f4 100644
--- a/drivers/net/wan/hdlc_raw.c
+++ b/drivers/net/wan/hdlc_raw.c
@@ -9,19 +9,18 @@
9 * as published by the Free Software Foundation. 9 * as published by the Free Software Foundation.
10 */ 10 */
11 11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/slab.h>
15#include <linux/poll.h>
16#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/hdlc.h>
17#include <linux/if_arp.h> 14#include <linux/if_arp.h>
15#include <linux/inetdevice.h>
18#include <linux/init.h> 16#include <linux/init.h>
19#include <linux/skbuff.h> 17#include <linux/kernel.h>
18#include <linux/module.h>
20#include <linux/pkt_sched.h> 19#include <linux/pkt_sched.h>
21#include <linux/inetdevice.h> 20#include <linux/poll.h>
22#include <linux/lapb.h>
23#include <linux/rtnetlink.h> 21#include <linux/rtnetlink.h>
24#include <linux/hdlc.h> 22#include <linux/skbuff.h>
23#include <linux/slab.h>
25 24
26 25
27static int raw_ioctl(struct net_device *dev, struct ifreq *ifr); 26static int raw_ioctl(struct net_device *dev, struct ifreq *ifr);
diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c
index 26dee600506f..a13fc3207520 100644
--- a/drivers/net/wan/hdlc_raw_eth.c
+++ b/drivers/net/wan/hdlc_raw_eth.c
@@ -9,20 +9,19 @@
9 * as published by the Free Software Foundation. 9 * as published by the Free Software Foundation.
10 */ 10 */
11 11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/slab.h>
15#include <linux/poll.h>
16#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/etherdevice.h>
14#include <linux/hdlc.h>
17#include <linux/if_arp.h> 15#include <linux/if_arp.h>
16#include <linux/inetdevice.h>
18#include <linux/init.h> 17#include <linux/init.h>
19#include <linux/skbuff.h> 18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/pkt_sched.h> 20#include <linux/pkt_sched.h>
21#include <linux/inetdevice.h> 21#include <linux/poll.h>
22#include <linux/lapb.h>
23#include <linux/rtnetlink.h> 22#include <linux/rtnetlink.h>
24#include <linux/etherdevice.h> 23#include <linux/skbuff.h>
25#include <linux/hdlc.h> 24#include <linux/slab.h>
26 25
27static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr); 26static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr);
28 27
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
index e808720030ef..8b7e5d2e2ac9 100644
--- a/drivers/net/wan/hdlc_x25.c
+++ b/drivers/net/wan/hdlc_x25.c
@@ -9,20 +9,19 @@
9 * as published by the Free Software Foundation. 9 * as published by the Free Software Foundation.
10 */ 10 */
11 11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/slab.h>
15#include <linux/poll.h>
16#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/hdlc.h>
17#include <linux/if_arp.h> 14#include <linux/if_arp.h>
18#include <linux/init.h>
19#include <linux/skbuff.h>
20#include <linux/pkt_sched.h>
21#include <linux/inetdevice.h> 15#include <linux/inetdevice.h>
16#include <linux/init.h>
17#include <linux/kernel.h>
22#include <linux/lapb.h> 18#include <linux/lapb.h>
19#include <linux/module.h>
20#include <linux/pkt_sched.h>
21#include <linux/poll.h>
23#include <linux/rtnetlink.h> 22#include <linux/rtnetlink.h>
24#include <linux/hdlc.h> 23#include <linux/skbuff.h>
25 24#include <linux/slab.h>
26#include <net/x25device.h> 25#include <net/x25device.h>
27 26
28static int x25_ioctl(struct net_device *dev, struct ifreq *ifr); 27static int x25_ioctl(struct net_device *dev, struct ifreq *ifr);
diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c
index f3065d3473fd..e299313f828a 100644
--- a/drivers/net/wan/hostess_sv11.c
+++ b/drivers/net/wan/hostess_sv11.c
@@ -16,6 +16,8 @@
16 * touching control registers. 16 * touching control registers.
17 * 17 *
18 * Port B isnt wired (why - beats me) 18 * Port B isnt wired (why - beats me)
19 *
20 * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
19 */ 21 */
20 22
21#include <linux/module.h> 23#include <linux/module.h>
@@ -26,6 +28,7 @@
26#include <linux/netdevice.h> 28#include <linux/netdevice.h>
27#include <linux/if_arp.h> 29#include <linux/if_arp.h>
28#include <linux/delay.h> 30#include <linux/delay.h>
31#include <linux/hdlc.h>
29#include <linux/ioport.h> 32#include <linux/ioport.h>
30#include <net/arp.h> 33#include <net/arp.h>
31 34
@@ -33,34 +36,31 @@
33#include <asm/io.h> 36#include <asm/io.h>
34#include <asm/dma.h> 37#include <asm/dma.h>
35#include <asm/byteorder.h> 38#include <asm/byteorder.h>
36#include <net/syncppp.h>
37#include "z85230.h" 39#include "z85230.h"
38 40
39static int dma; 41static int dma;
40 42
41struct sv11_device
42{
43 void *if_ptr; /* General purpose pointer (used by SPPP) */
44 struct z8530_dev sync;
45 struct ppp_device netdev;
46};
47
48/* 43/*
49 * Network driver support routines 44 * Network driver support routines
50 */ 45 */
51 46
47static inline struct z8530_dev* dev_to_sv(struct net_device *dev)
48{
49 return (struct z8530_dev *)dev_to_hdlc(dev)->priv;
50}
51
52/* 52/*
53 * Frame receive. Simple for our card as we do sync ppp and there 53 * Frame receive. Simple for our card as we do HDLC and there
54 * is no funny garbage involved 54 * is no funny garbage involved
55 */ 55 */
56 56
57static void hostess_input(struct z8530_channel *c, struct sk_buff *skb) 57static void hostess_input(struct z8530_channel *c, struct sk_buff *skb)
58{ 58{
59 /* Drop the CRC - it's not a good idea to try and negotiate it ;) */ 59 /* Drop the CRC - it's not a good idea to try and negotiate it ;) */
60 skb_trim(skb, skb->len-2); 60 skb_trim(skb, skb->len - 2);
61 skb->protocol=__constant_htons(ETH_P_WAN_PPP); 61 skb->protocol = hdlc_type_trans(skb, c->netdevice);
62 skb_reset_mac_header(skb); 62 skb_reset_mac_header(skb);
63 skb->dev=c->netdevice; 63 skb->dev = c->netdevice;
64 /* 64 /*
65 * Send it to the PPP layer. We don't have time to process 65 * Send it to the PPP layer. We don't have time to process
66 * it right now. 66 * it right now.
@@ -68,56 +68,51 @@ static void hostess_input(struct z8530_channel *c, struct sk_buff *skb)
68 netif_rx(skb); 68 netif_rx(skb);
69 c->netdevice->last_rx = jiffies; 69 c->netdevice->last_rx = jiffies;
70} 70}
71 71
72/* 72/*
73 * We've been placed in the UP state 73 * We've been placed in the UP state
74 */ 74 */
75 75
76static int hostess_open(struct net_device *d) 76static int hostess_open(struct net_device *d)
77{ 77{
78 struct sv11_device *sv11=d->ml_priv; 78 struct z8530_dev *sv11 = dev_to_sv(d);
79 int err = -1; 79 int err = -1;
80 80
81 /* 81 /*
82 * Link layer up 82 * Link layer up
83 */ 83 */
84 switch(dma) 84 switch (dma) {
85 {
86 case 0: 85 case 0:
87 err=z8530_sync_open(d, &sv11->sync.chanA); 86 err = z8530_sync_open(d, &sv11->chanA);
88 break; 87 break;
89 case 1: 88 case 1:
90 err=z8530_sync_dma_open(d, &sv11->sync.chanA); 89 err = z8530_sync_dma_open(d, &sv11->chanA);
91 break; 90 break;
92 case 2: 91 case 2:
93 err=z8530_sync_txdma_open(d, &sv11->sync.chanA); 92 err = z8530_sync_txdma_open(d, &sv11->chanA);
94 break; 93 break;
95 } 94 }
96 95
97 if(err) 96 if (err)
98 return err; 97 return err;
99 /* 98
100 * Begin PPP 99 err = hdlc_open(d);
101 */ 100 if (err) {
102 err=sppp_open(d); 101 switch (dma) {
103 if(err)
104 {
105 switch(dma)
106 {
107 case 0: 102 case 0:
108 z8530_sync_close(d, &sv11->sync.chanA); 103 z8530_sync_close(d, &sv11->chanA);
109 break; 104 break;
110 case 1: 105 case 1:
111 z8530_sync_dma_close(d, &sv11->sync.chanA); 106 z8530_sync_dma_close(d, &sv11->chanA);
112 break; 107 break;
113 case 2: 108 case 2:
114 z8530_sync_txdma_close(d, &sv11->sync.chanA); 109 z8530_sync_txdma_close(d, &sv11->chanA);
115 break; 110 break;
116 } 111 }
117 return err; 112 return err;
118 } 113 }
119 sv11->sync.chanA.rx_function=hostess_input; 114 sv11->chanA.rx_function = hostess_input;
120 115
121 /* 116 /*
122 * Go go go 117 * Go go go
123 */ 118 */
@@ -128,30 +123,24 @@ static int hostess_open(struct net_device *d)
128 123
129static int hostess_close(struct net_device *d) 124static int hostess_close(struct net_device *d)
130{ 125{
131 struct sv11_device *sv11=d->ml_priv; 126 struct z8530_dev *sv11 = dev_to_sv(d);
132 /* 127 /*
133 * Discard new frames 128 * Discard new frames
134 */ 129 */
135 sv11->sync.chanA.rx_function=z8530_null_rx; 130 sv11->chanA.rx_function = z8530_null_rx;
136 /* 131
137 * PPP off 132 hdlc_close(d);
138 */
139 sppp_close(d);
140 /*
141 * Link layer down
142 */
143 netif_stop_queue(d); 133 netif_stop_queue(d);
144 134
145 switch(dma) 135 switch (dma) {
146 {
147 case 0: 136 case 0:
148 z8530_sync_close(d, &sv11->sync.chanA); 137 z8530_sync_close(d, &sv11->chanA);
149 break; 138 break;
150 case 1: 139 case 1:
151 z8530_sync_dma_close(d, &sv11->sync.chanA); 140 z8530_sync_dma_close(d, &sv11->chanA);
152 break; 141 break;
153 case 2: 142 case 2:
154 z8530_sync_txdma_close(d, &sv11->sync.chanA); 143 z8530_sync_txdma_close(d, &sv11->chanA);
155 break; 144 break;
156 } 145 }
157 return 0; 146 return 0;
@@ -159,232 +148,174 @@ static int hostess_close(struct net_device *d)
159 148
160static int hostess_ioctl(struct net_device *d, struct ifreq *ifr, int cmd) 149static int hostess_ioctl(struct net_device *d, struct ifreq *ifr, int cmd)
161{ 150{
162 /* struct sv11_device *sv11=d->ml_priv; 151 /* struct z8530_dev *sv11=dev_to_sv(d);
163 z8530_ioctl(d,&sv11->sync.chanA,ifr,cmd) */ 152 z8530_ioctl(d,&sv11->chanA,ifr,cmd) */
164 return sppp_do_ioctl(d, ifr,cmd); 153 return hdlc_ioctl(d, ifr, cmd);
165}
166
167static struct net_device_stats *hostess_get_stats(struct net_device *d)
168{
169 struct sv11_device *sv11=d->ml_priv;
170 if(sv11)
171 return z8530_get_stats(&sv11->sync.chanA);
172 else
173 return NULL;
174} 154}
175 155
176/* 156/*
177 * Passed PPP frames, fire them downwind. 157 * Passed network frames, fire them downwind.
178 */ 158 */
179 159
180static int hostess_queue_xmit(struct sk_buff *skb, struct net_device *d) 160static int hostess_queue_xmit(struct sk_buff *skb, struct net_device *d)
181{ 161{
182 struct sv11_device *sv11=d->ml_priv; 162 return z8530_queue_xmit(&dev_to_sv(d)->chanA, skb);
183 return z8530_queue_xmit(&sv11->sync.chanA, skb);
184} 163}
185 164
186static int hostess_neigh_setup(struct neighbour *n) 165static int hostess_attach(struct net_device *dev, unsigned short encoding,
166 unsigned short parity)
187{ 167{
188 if (n->nud_state == NUD_NONE) { 168 if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)
189 n->ops = &arp_broken_ops; 169 return 0;
190 n->output = n->ops->output; 170 return -EINVAL;
191 }
192 return 0;
193}
194
195static int hostess_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p)
196{
197 if (p->tbl->family == AF_INET) {
198 p->neigh_setup = hostess_neigh_setup;
199 p->ucast_probes = 0;
200 p->mcast_probes = 0;
201 }
202 return 0;
203}
204
205static void sv11_setup(struct net_device *dev)
206{
207 dev->open = hostess_open;
208 dev->stop = hostess_close;
209 dev->hard_start_xmit = hostess_queue_xmit;
210 dev->get_stats = hostess_get_stats;
211 dev->do_ioctl = hostess_ioctl;
212 dev->neigh_setup = hostess_neigh_setup_dev;
213} 171}
214 172
215/* 173/*
216 * Description block for a Comtrol Hostess SV11 card 174 * Description block for a Comtrol Hostess SV11 card
217 */ 175 */
218 176
219static struct sv11_device *sv11_init(int iobase, int irq) 177static struct z8530_dev *sv11_init(int iobase, int irq)
220{ 178{
221 struct z8530_dev *dev; 179 struct z8530_dev *sv;
222 struct sv11_device *sv; 180 struct net_device *netdev;
223
224 /* 181 /*
225 * Get the needed I/O space 182 * Get the needed I/O space
226 */ 183 */
227 184
228 if(!request_region(iobase, 8, "Comtrol SV11")) 185 if (!request_region(iobase, 8, "Comtrol SV11")) {
229 { 186 printk(KERN_WARNING "hostess: I/O 0x%X already in use.\n",
230 printk(KERN_WARNING "hostess: I/O 0x%X already in use.\n", iobase); 187 iobase);
231 return NULL; 188 return NULL;
232 } 189 }
233 190
234 sv = kzalloc(sizeof(struct sv11_device), GFP_KERNEL); 191 sv = kzalloc(sizeof(struct z8530_dev), GFP_KERNEL);
235 if(!sv) 192 if (!sv)
236 goto fail3; 193 goto err_kzalloc;
237 194
238 sv->if_ptr=&sv->netdev;
239
240 sv->netdev.dev = alloc_netdev(0, "hdlc%d", sv11_setup);
241 if(!sv->netdev.dev)
242 goto fail2;
243
244 dev=&sv->sync;
245
246 /* 195 /*
247 * Stuff in the I/O addressing 196 * Stuff in the I/O addressing
248 */ 197 */
249 198
250 dev->active = 0; 199 sv->active = 0;
251 200
252 dev->chanA.ctrlio=iobase+1; 201 sv->chanA.ctrlio = iobase + 1;
253 dev->chanA.dataio=iobase+3; 202 sv->chanA.dataio = iobase + 3;
254 dev->chanB.ctrlio=-1; 203 sv->chanB.ctrlio = -1;
255 dev->chanB.dataio=-1; 204 sv->chanB.dataio = -1;
256 dev->chanA.irqs=&z8530_nop; 205 sv->chanA.irqs = &z8530_nop;
257 dev->chanB.irqs=&z8530_nop; 206 sv->chanB.irqs = &z8530_nop;
258 207
259 outb(0, iobase+4); /* DMA off */ 208 outb(0, iobase + 4); /* DMA off */
260 209
261 /* We want a fast IRQ for this device. Actually we'd like an even faster 210 /* We want a fast IRQ for this device. Actually we'd like an even faster
262 IRQ ;) - This is one driver RtLinux is made for */ 211 IRQ ;) - This is one driver RtLinux is made for */
263 212
264 if(request_irq(irq, &z8530_interrupt, IRQF_DISABLED, "Hostess SV11", dev)<0) 213 if (request_irq(irq, &z8530_interrupt, IRQF_DISABLED,
265 { 214 "Hostess SV11", sv) < 0) {
266 printk(KERN_WARNING "hostess: IRQ %d already in use.\n", irq); 215 printk(KERN_WARNING "hostess: IRQ %d already in use.\n", irq);
267 goto fail1; 216 goto err_irq;
268 } 217 }
269 218
270 dev->irq=irq; 219 sv->irq = irq;
271 dev->chanA.private=sv; 220 sv->chanA.private = sv;
272 dev->chanA.netdevice=sv->netdev.dev; 221 sv->chanA.dev = sv;
273 dev->chanA.dev=dev; 222 sv->chanB.dev = sv;
274 dev->chanB.dev=dev; 223
275 224 if (dma) {
276 if(dma)
277 {
278 /* 225 /*
279 * You can have DMA off or 1 and 3 thats the lot 226 * You can have DMA off or 1 and 3 thats the lot
280 * on the Comtrol. 227 * on the Comtrol.
281 */ 228 */
282 dev->chanA.txdma=3; 229 sv->chanA.txdma = 3;
283 dev->chanA.rxdma=1; 230 sv->chanA.rxdma = 1;
284 outb(0x03|0x08, iobase+4); /* DMA on */ 231 outb(0x03 | 0x08, iobase + 4); /* DMA on */
285 if(request_dma(dev->chanA.txdma, "Hostess SV/11 (TX)")!=0) 232 if (request_dma(sv->chanA.txdma, "Hostess SV/11 (TX)"))
286 goto fail; 233 goto err_txdma;
287 234
288 if(dma==1) 235 if (dma == 1)
289 { 236 if (request_dma(sv->chanA.rxdma, "Hostess SV/11 (RX)"))
290 if(request_dma(dev->chanA.rxdma, "Hostess SV/11 (RX)")!=0) 237 goto err_rxdma;
291 goto dmafail;
292 }
293 } 238 }
294 239
295 /* Kill our private IRQ line the hostess can end up chattering 240 /* Kill our private IRQ line the hostess can end up chattering
296 until the configuration is set */ 241 until the configuration is set */
297 disable_irq(irq); 242 disable_irq(irq);
298 243
299 /* 244 /*
300 * Begin normal initialise 245 * Begin normal initialise
301 */ 246 */
302 247
303 if(z8530_init(dev)!=0) 248 if (z8530_init(sv)) {
304 {
305 printk(KERN_ERR "Z8530 series device not found.\n"); 249 printk(KERN_ERR "Z8530 series device not found.\n");
306 enable_irq(irq); 250 enable_irq(irq);
307 goto dmafail2; 251 goto free_dma;
308 } 252 }
309 z8530_channel_load(&dev->chanB, z8530_dead_port); 253 z8530_channel_load(&sv->chanB, z8530_dead_port);
310 if(dev->type==Z85C30) 254 if (sv->type == Z85C30)
311 z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream); 255 z8530_channel_load(&sv->chanA, z8530_hdlc_kilostream);
312 else 256 else
313 z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream_85230); 257 z8530_channel_load(&sv->chanA, z8530_hdlc_kilostream_85230);
314 258
315 enable_irq(irq); 259 enable_irq(irq);
316
317 260
318 /* 261 /*
319 * Now we can take the IRQ 262 * Now we can take the IRQ
320 */ 263 */
321 if(dev_alloc_name(dev->chanA.netdevice,"hdlc%d")>=0)
322 {
323 struct net_device *d=dev->chanA.netdevice;
324 264
325 /* 265 sv->chanA.netdevice = netdev = alloc_hdlcdev(sv);
326 * Initialise the PPP components 266 if (!netdev)
327 */ 267 goto free_dma;
328 d->ml_priv = sv;
329 sppp_attach(&sv->netdev);
330
331 /*
332 * Local fields
333 */
334
335 d->base_addr = iobase;
336 d->irq = irq;
337
338 if(register_netdev(d))
339 {
340 printk(KERN_ERR "%s: unable to register device.\n",
341 d->name);
342 sppp_detach(d);
343 goto dmafail2;
344 }
345 268
346 z8530_describe(dev, "I/O", iobase); 269 dev_to_hdlc(netdev)->attach = hostess_attach;
347 dev->active=1; 270 dev_to_hdlc(netdev)->xmit = hostess_queue_xmit;
348 return sv; 271 netdev->open = hostess_open;
272 netdev->stop = hostess_close;
273 netdev->do_ioctl = hostess_ioctl;
274 netdev->base_addr = iobase;
275 netdev->irq = irq;
276
277 if (register_hdlc_device(netdev)) {
278 printk(KERN_ERR "hostess: unable to register HDLC device.\n");
279 free_netdev(netdev);
280 goto free_dma;
349 } 281 }
350dmafail2: 282
351 if(dma==1) 283 z8530_describe(sv, "I/O", iobase);
352 free_dma(dev->chanA.rxdma); 284 sv->active = 1;
353dmafail: 285 return sv;
354 if(dma) 286
355 free_dma(dev->chanA.txdma); 287free_dma:
356fail: 288 if (dma == 1)
357 free_irq(irq, dev); 289 free_dma(sv->chanA.rxdma);
358fail1: 290err_rxdma:
359 free_netdev(sv->netdev.dev); 291 if (dma)
360fail2: 292 free_dma(sv->chanA.txdma);
293err_txdma:
294 free_irq(irq, sv);
295err_irq:
361 kfree(sv); 296 kfree(sv);
362fail3: 297err_kzalloc:
363 release_region(iobase,8); 298 release_region(iobase, 8);
364 return NULL; 299 return NULL;
365} 300}
366 301
367static void sv11_shutdown(struct sv11_device *dev) 302static void sv11_shutdown(struct z8530_dev *dev)
368{ 303{
369 sppp_detach(dev->netdev.dev); 304 unregister_hdlc_device(dev->chanA.netdevice);
370 unregister_netdev(dev->netdev.dev); 305 z8530_shutdown(dev);
371 z8530_shutdown(&dev->sync); 306 free_irq(dev->irq, dev);
372 free_irq(dev->sync.irq, dev); 307 if (dma) {
373 if(dma) 308 if (dma == 1)
374 { 309 free_dma(dev->chanA.rxdma);
375 if(dma==1) 310 free_dma(dev->chanA.txdma);
376 free_dma(dev->sync.chanA.rxdma);
377 free_dma(dev->sync.chanA.txdma);
378 } 311 }
379 release_region(dev->sync.chanA.ctrlio-1, 8); 312 release_region(dev->chanA.ctrlio - 1, 8);
380 free_netdev(dev->netdev.dev); 313 free_netdev(dev->chanA.netdevice);
381 kfree(dev); 314 kfree(dev);
382} 315}
383 316
384#ifdef MODULE 317static int io = 0x200;
385 318static int irq = 9;
386static int io=0x200;
387static int irq=9;
388 319
389module_param(io, int, 0); 320module_param(io, int, 0);
390MODULE_PARM_DESC(io, "The I/O base of the Comtrol Hostess SV11 card"); 321MODULE_PARM_DESC(io, "The I/O base of the Comtrol Hostess SV11 card");
@@ -397,22 +328,17 @@ MODULE_AUTHOR("Alan Cox");
397MODULE_LICENSE("GPL"); 328MODULE_LICENSE("GPL");
398MODULE_DESCRIPTION("Modular driver for the Comtrol Hostess SV11"); 329MODULE_DESCRIPTION("Modular driver for the Comtrol Hostess SV11");
399 330
400static struct sv11_device *sv11_unit; 331static struct z8530_dev *sv11_unit;
401 332
402int init_module(void) 333int init_module(void)
403{ 334{
404 printk(KERN_INFO "SV-11 Z85230 Synchronous Driver v 0.03.\n"); 335 if ((sv11_unit = sv11_init(io, irq)) == NULL)
405 printk(KERN_INFO "(c) Copyright 2001, Red Hat Inc.\n");
406 if((sv11_unit=sv11_init(io,irq))==NULL)
407 return -ENODEV; 336 return -ENODEV;
408 return 0; 337 return 0;
409} 338}
410 339
411void cleanup_module(void) 340void cleanup_module(void)
412{ 341{
413 if(sv11_unit) 342 if (sv11_unit)
414 sv11_shutdown(sv11_unit); 343 sv11_shutdown(sv11_unit);
415} 344}
416
417#endif
418
diff --git a/drivers/net/wan/lmc/lmc.h b/drivers/net/wan/lmc/lmc.h
index 882e58c1bfd7..4ced7ac16c2c 100644
--- a/drivers/net/wan/lmc/lmc.h
+++ b/drivers/net/wan/lmc/lmc.h
@@ -11,12 +11,12 @@ unsigned lmc_mii_readreg(lmc_softc_t * const sc, unsigned
11 devaddr, unsigned regno); 11 devaddr, unsigned regno);
12void lmc_mii_writereg(lmc_softc_t * const sc, unsigned devaddr, 12void lmc_mii_writereg(lmc_softc_t * const sc, unsigned devaddr,
13 unsigned regno, unsigned data); 13 unsigned regno, unsigned data);
14void lmc_led_on(lmc_softc_t * const, u_int32_t); 14void lmc_led_on(lmc_softc_t * const, u32);
15void lmc_led_off(lmc_softc_t * const, u_int32_t); 15void lmc_led_off(lmc_softc_t * const, u32);
16unsigned lmc_mii_readreg(lmc_softc_t * const, unsigned, unsigned); 16unsigned lmc_mii_readreg(lmc_softc_t * const, unsigned, unsigned);
17void lmc_mii_writereg(lmc_softc_t * const, unsigned, unsigned, unsigned); 17void lmc_mii_writereg(lmc_softc_t * const, unsigned, unsigned, unsigned);
18void lmc_gpio_mkinput(lmc_softc_t * const sc, u_int32_t bits); 18void lmc_gpio_mkinput(lmc_softc_t * const sc, u32 bits);
19void lmc_gpio_mkoutput(lmc_softc_t * const sc, u_int32_t bits); 19void lmc_gpio_mkoutput(lmc_softc_t * const sc, u32 bits);
20 20
21int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); 21int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
22 22
@@ -26,8 +26,7 @@ extern lmc_media_t lmc_t1_media;
26extern lmc_media_t lmc_hssi_media; 26extern lmc_media_t lmc_hssi_media;
27 27
28#ifdef _DBG_EVENTLOG 28#ifdef _DBG_EVENTLOG
29static void lmcEventLog( u_int32_t EventNum, u_int32_t arg2, u_int32_t arg3 ); 29static void lmcEventLog(u32 EventNum, u32 arg2, u32 arg3);
30#endif 30#endif
31 31
32#endif 32#endif
33
diff --git a/drivers/net/wan/lmc/lmc_debug.c b/drivers/net/wan/lmc/lmc_debug.c
index 3b94352b0d03..15049d711f47 100644
--- a/drivers/net/wan/lmc/lmc_debug.c
+++ b/drivers/net/wan/lmc/lmc_debug.c
@@ -1,4 +1,3 @@
1
2#include <linux/types.h> 1#include <linux/types.h>
3#include <linux/netdevice.h> 2#include <linux/netdevice.h>
4#include <linux/interrupt.h> 3#include <linux/interrupt.h>
@@ -48,10 +47,10 @@ void lmcConsoleLog(char *type, unsigned char *ucData, int iLen)
48#endif 47#endif
49 48
50#ifdef DEBUG 49#ifdef DEBUG
51u_int32_t lmcEventLogIndex = 0; 50u32 lmcEventLogIndex;
52u_int32_t lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS]; 51u32 lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS];
53 52
54void lmcEventLog (u_int32_t EventNum, u_int32_t arg2, u_int32_t arg3) 53void lmcEventLog(u32 EventNum, u32 arg2, u32 arg3)
55{ 54{
56 lmcEventLogBuf[lmcEventLogIndex++] = EventNum; 55 lmcEventLogBuf[lmcEventLogIndex++] = EventNum;
57 lmcEventLogBuf[lmcEventLogIndex++] = arg2; 56 lmcEventLogBuf[lmcEventLogIndex++] = arg2;
diff --git a/drivers/net/wan/lmc/lmc_debug.h b/drivers/net/wan/lmc/lmc_debug.h
index cf3563859bf3..2d46f121549f 100644
--- a/drivers/net/wan/lmc/lmc_debug.h
+++ b/drivers/net/wan/lmc/lmc_debug.h
@@ -38,15 +38,15 @@
38 38
39 39
40#ifdef DEBUG 40#ifdef DEBUG
41extern u_int32_t lmcEventLogIndex; 41extern u32 lmcEventLogIndex;
42extern u_int32_t lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS]; 42extern u32 lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS];
43#define LMC_EVENT_LOG(x, y, z) lmcEventLog((x), (y), (z)) 43#define LMC_EVENT_LOG(x, y, z) lmcEventLog((x), (y), (z))
44#else 44#else
45#define LMC_EVENT_LOG(x,y,z) 45#define LMC_EVENT_LOG(x,y,z)
46#endif /* end ifdef _DBG_EVENTLOG */ 46#endif /* end ifdef _DBG_EVENTLOG */
47 47
48void lmcConsoleLog(char *type, unsigned char *ucData, int iLen); 48void lmcConsoleLog(char *type, unsigned char *ucData, int iLen);
49void lmcEventLog (u_int32_t EventNum, u_int32_t arg2, u_int32_t arg3); 49void lmcEventLog(u32 EventNum, u32 arg2, u32 arg3);
50void lmc_trace(struct net_device *dev, char *msg); 50void lmc_trace(struct net_device *dev, char *msg);
51 51
52#endif 52#endif
diff --git a/drivers/net/wan/lmc/lmc_ioctl.h b/drivers/net/wan/lmc/lmc_ioctl.h
index 57dd861cd3db..72fb113a44ca 100644
--- a/drivers/net/wan/lmc/lmc_ioctl.h
+++ b/drivers/net/wan/lmc/lmc_ioctl.h
@@ -61,7 +61,7 @@
61/* 61/*
62 * IFTYPE defines 62 * IFTYPE defines
63 */ 63 */
64#define LMC_PPP 1 /* use sppp interface */ 64#define LMC_PPP 1 /* use generic HDLC interface */
65#define LMC_NET 2 /* use direct net interface */ 65#define LMC_NET 2 /* use direct net interface */
66#define LMC_RAW 3 /* use direct net interface */ 66#define LMC_RAW 3 /* use direct net interface */
67 67
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 62133cee446a..f80640f5a744 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -1,6 +1,7 @@
1 /* 1 /*
2 * Copyright (c) 1997-2000 LAN Media Corporation (LMC) 2 * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
3 * All rights reserved. www.lanmedia.com 3 * All rights reserved. www.lanmedia.com
4 * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
4 * 5 *
5 * This code is written by: 6 * This code is written by:
6 * Andrew Stanley-Jones (asj@cban.com) 7 * Andrew Stanley-Jones (asj@cban.com)
@@ -36,8 +37,6 @@
36 * 37 *
37 */ 38 */
38 39
39/* $Id: lmc_main.c,v 1.36 2000/04/11 05:25:25 asj Exp $ */
40
41#include <linux/kernel.h> 40#include <linux/kernel.h>
42#include <linux/module.h> 41#include <linux/module.h>
43#include <linux/string.h> 42#include <linux/string.h>
@@ -49,6 +48,7 @@
49#include <linux/interrupt.h> 48#include <linux/interrupt.h>
50#include <linux/pci.h> 49#include <linux/pci.h>
51#include <linux/delay.h> 50#include <linux/delay.h>
51#include <linux/hdlc.h>
52#include <linux/init.h> 52#include <linux/init.h>
53#include <linux/in.h> 53#include <linux/in.h>
54#include <linux/if_arp.h> 54#include <linux/if_arp.h>
@@ -57,9 +57,6 @@
57#include <linux/skbuff.h> 57#include <linux/skbuff.h>
58#include <linux/inet.h> 58#include <linux/inet.h>
59#include <linux/bitops.h> 59#include <linux/bitops.h>
60
61#include <net/syncppp.h>
62
63#include <asm/processor.h> /* Processor type for cache alignment. */ 60#include <asm/processor.h> /* Processor type for cache alignment. */
64#include <asm/io.h> 61#include <asm/io.h>
65#include <asm/dma.h> 62#include <asm/dma.h>
@@ -78,8 +75,6 @@
78#include "lmc_debug.h" 75#include "lmc_debug.h"
79#include "lmc_proto.h" 76#include "lmc_proto.h"
80 77
81static int lmc_first_load = 0;
82
83static int LMC_PKT_BUF_SZ = 1542; 78static int LMC_PKT_BUF_SZ = 1542;
84 79
85static struct pci_device_id lmc_pci_tbl[] = { 80static struct pci_device_id lmc_pci_tbl[] = {
@@ -91,11 +86,10 @@ static struct pci_device_id lmc_pci_tbl[] = {
91}; 86};
92 87
93MODULE_DEVICE_TABLE(pci, lmc_pci_tbl); 88MODULE_DEVICE_TABLE(pci, lmc_pci_tbl);
94MODULE_LICENSE("GPL"); 89MODULE_LICENSE("GPL v2");
95 90
96 91
97static int lmc_start_xmit(struct sk_buff *skb, struct net_device *dev); 92static int lmc_start_xmit(struct sk_buff *skb, struct net_device *dev);
98static int lmc_start_xmit(struct sk_buff *skb, struct net_device *dev);
99static int lmc_rx (struct net_device *dev); 93static int lmc_rx (struct net_device *dev);
100static int lmc_open(struct net_device *dev); 94static int lmc_open(struct net_device *dev);
101static int lmc_close(struct net_device *dev); 95static int lmc_close(struct net_device *dev);
@@ -114,20 +108,14 @@ static void lmc_driver_timeout(struct net_device *dev);
114 * linux reserves 16 device specific IOCTLs. We call them 108 * linux reserves 16 device specific IOCTLs. We call them
115 * LMCIOC* to control various bits of our world. 109 * LMCIOC* to control various bits of our world.
116 */ 110 */
117int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ 111int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
118{ 112{
119 lmc_softc_t *sc; 113 lmc_softc_t *sc = dev_to_sc(dev);
120 lmc_ctl_t ctl; 114 lmc_ctl_t ctl;
121 int ret; 115 int ret = -EOPNOTSUPP;
122 u_int16_t regVal; 116 u16 regVal;
123 unsigned long flags; 117 unsigned long flags;
124 118
125 struct sppp *sp;
126
127 ret = -EOPNOTSUPP;
128
129 sc = dev->priv;
130
131 lmc_trace(dev, "lmc_ioctl in"); 119 lmc_trace(dev, "lmc_ioctl in");
132 120
133 /* 121 /*
@@ -149,7 +137,6 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
149 break; 137 break;
150 138
151 case LMCIOCSINFO: /*fold01*/ 139 case LMCIOCSINFO: /*fold01*/
152 sp = &((struct ppp_device *) dev)->sppp;
153 if (!capable(CAP_NET_ADMIN)) { 140 if (!capable(CAP_NET_ADMIN)) {
154 ret = -EPERM; 141 ret = -EPERM;
155 break; 142 break;
@@ -175,25 +162,20 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
175 sc->TxDescriptControlInit &= ~LMC_TDES_ADD_CRC_DISABLE; 162 sc->TxDescriptControlInit &= ~LMC_TDES_ADD_CRC_DISABLE;
176 } 163 }
177 164
178 if (ctl.keepalive_onoff == LMC_CTL_OFF)
179 sp->pp_flags &= ~PP_KEEPALIVE; /* Turn off */
180 else
181 sp->pp_flags |= PP_KEEPALIVE; /* Turn on */
182
183 ret = 0; 165 ret = 0;
184 break; 166 break;
185 167
186 case LMCIOCIFTYPE: /*fold01*/ 168 case LMCIOCIFTYPE: /*fold01*/
187 { 169 {
188 u_int16_t old_type = sc->if_type; 170 u16 old_type = sc->if_type;
189 u_int16_t new_type; 171 u16 new_type;
190 172
191 if (!capable(CAP_NET_ADMIN)) { 173 if (!capable(CAP_NET_ADMIN)) {
192 ret = -EPERM; 174 ret = -EPERM;
193 break; 175 break;
194 } 176 }
195 177
196 if (copy_from_user(&new_type, ifr->ifr_data, sizeof(u_int16_t))) { 178 if (copy_from_user(&new_type, ifr->ifr_data, sizeof(u16))) {
197 ret = -EFAULT; 179 ret = -EFAULT;
198 break; 180 break;
199 } 181 }
@@ -206,15 +188,11 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
206 } 188 }
207 189
208 lmc_proto_close(sc); 190 lmc_proto_close(sc);
209 lmc_proto_detach(sc);
210 191
211 sc->if_type = new_type; 192 sc->if_type = new_type;
212// lmc_proto_init(sc);
213 lmc_proto_attach(sc); 193 lmc_proto_attach(sc);
214 lmc_proto_open(sc); 194 ret = lmc_proto_open(sc);
215 195 break;
216 ret = 0 ;
217 break ;
218 } 196 }
219 197
220 case LMCIOCGETXINFO: /*fold01*/ 198 case LMCIOCGETXINFO: /*fold01*/
@@ -241,51 +219,53 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
241 219
242 break; 220 break;
243 221
244 case LMCIOCGETLMCSTATS: /*fold01*/ 222 case LMCIOCGETLMCSTATS:
245 if (sc->lmc_cardtype == LMC_CARDTYPE_T1){ 223 if (sc->lmc_cardtype == LMC_CARDTYPE_T1) {
246 lmc_mii_writereg (sc, 0, 17, T1FRAMER_FERR_LSB); 224 lmc_mii_writereg(sc, 0, 17, T1FRAMER_FERR_LSB);
247 sc->stats.framingBitErrorCount += 225 sc->extra_stats.framingBitErrorCount +=
248 lmc_mii_readreg (sc, 0, 18) & 0xff; 226 lmc_mii_readreg(sc, 0, 18) & 0xff;
249 lmc_mii_writereg (sc, 0, 17, T1FRAMER_FERR_MSB); 227 lmc_mii_writereg(sc, 0, 17, T1FRAMER_FERR_MSB);
250 sc->stats.framingBitErrorCount += 228 sc->extra_stats.framingBitErrorCount +=
251 (lmc_mii_readreg (sc, 0, 18) & 0xff) << 8; 229 (lmc_mii_readreg(sc, 0, 18) & 0xff) << 8;
252 lmc_mii_writereg (sc, 0, 17, T1FRAMER_LCV_LSB); 230 lmc_mii_writereg(sc, 0, 17, T1FRAMER_LCV_LSB);
253 sc->stats.lineCodeViolationCount += 231 sc->extra_stats.lineCodeViolationCount +=
254 lmc_mii_readreg (sc, 0, 18) & 0xff; 232 lmc_mii_readreg(sc, 0, 18) & 0xff;
255 lmc_mii_writereg (sc, 0, 17, T1FRAMER_LCV_MSB); 233 lmc_mii_writereg(sc, 0, 17, T1FRAMER_LCV_MSB);
256 sc->stats.lineCodeViolationCount += 234 sc->extra_stats.lineCodeViolationCount +=
257 (lmc_mii_readreg (sc, 0, 18) & 0xff) << 8; 235 (lmc_mii_readreg(sc, 0, 18) & 0xff) << 8;
258 lmc_mii_writereg (sc, 0, 17, T1FRAMER_AERR); 236 lmc_mii_writereg(sc, 0, 17, T1FRAMER_AERR);
259 regVal = lmc_mii_readreg (sc, 0, 18) & 0xff; 237 regVal = lmc_mii_readreg(sc, 0, 18) & 0xff;
260 238
261 sc->stats.lossOfFrameCount += 239 sc->extra_stats.lossOfFrameCount +=
262 (regVal & T1FRAMER_LOF_MASK) >> 4; 240 (regVal & T1FRAMER_LOF_MASK) >> 4;
263 sc->stats.changeOfFrameAlignmentCount += 241 sc->extra_stats.changeOfFrameAlignmentCount +=
264 (regVal & T1FRAMER_COFA_MASK) >> 2; 242 (regVal & T1FRAMER_COFA_MASK) >> 2;
265 sc->stats.severelyErroredFrameCount += 243 sc->extra_stats.severelyErroredFrameCount +=
266 regVal & T1FRAMER_SEF_MASK; 244 regVal & T1FRAMER_SEF_MASK;
267 } 245 }
268 246 if (copy_to_user(ifr->ifr_data, &sc->lmc_device->stats,
269 if (copy_to_user(ifr->ifr_data, &sc->stats, 247 sizeof(sc->lmc_device->stats)) ||
270 sizeof (struct lmc_statistics))) 248 copy_to_user(ifr->ifr_data + sizeof(sc->lmc_device->stats),
271 ret = -EFAULT; 249 &sc->extra_stats, sizeof(sc->extra_stats)))
272 else 250 ret = -EFAULT;
273 ret = 0; 251 else
274 break; 252 ret = 0;
253 break;
275 254
276 case LMCIOCCLEARLMCSTATS: /*fold01*/ 255 case LMCIOCCLEARLMCSTATS:
277 if (!capable(CAP_NET_ADMIN)){ 256 if (!capable(CAP_NET_ADMIN)) {
278 ret = -EPERM; 257 ret = -EPERM;
279 break; 258 break;
280 } 259 }
281 260
282 memset (&sc->stats, 0, sizeof (struct lmc_statistics)); 261 memset(&sc->lmc_device->stats, 0, sizeof(sc->lmc_device->stats));
283 sc->stats.check = STATCHECK; 262 memset(&sc->extra_stats, 0, sizeof(sc->extra_stats));
284 sc->stats.version_size = (DRIVER_VERSION << 16) + 263 sc->extra_stats.check = STATCHECK;
285 sizeof (struct lmc_statistics); 264 sc->extra_stats.version_size = (DRIVER_VERSION << 16) +
286 sc->stats.lmc_cardtype = sc->lmc_cardtype; 265 sizeof(sc->lmc_device->stats) + sizeof(sc->extra_stats);
287 ret = 0; 266 sc->extra_stats.lmc_cardtype = sc->lmc_cardtype;
288 break; 267 ret = 0;
268 break;
289 269
290 case LMCIOCSETCIRCUIT: /*fold01*/ 270 case LMCIOCSETCIRCUIT: /*fold01*/
291 if (!capable(CAP_NET_ADMIN)){ 271 if (!capable(CAP_NET_ADMIN)){
@@ -330,7 +310,8 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
330 ret = -EFAULT; 310 ret = -EFAULT;
331 break; 311 break;
332 } 312 }
333 if (copy_to_user(ifr->ifr_data + sizeof (u32), lmcEventLogBuf, sizeof (lmcEventLogBuf))) 313 if (copy_to_user(ifr->ifr_data + sizeof(u32), lmcEventLogBuf,
314 sizeof(lmcEventLogBuf)))
334 ret = -EFAULT; 315 ret = -EFAULT;
335 else 316 else
336 ret = 0; 317 ret = 0;
@@ -641,14 +622,12 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
641/* the watchdog process that cruises around */ 622/* the watchdog process that cruises around */
642static void lmc_watchdog (unsigned long data) /*fold00*/ 623static void lmc_watchdog (unsigned long data) /*fold00*/
643{ 624{
644 struct net_device *dev = (struct net_device *) data; 625 struct net_device *dev = (struct net_device *)data;
645 lmc_softc_t *sc; 626 lmc_softc_t *sc = dev_to_sc(dev);
646 int link_status; 627 int link_status;
647 u_int32_t ticks; 628 u32 ticks;
648 unsigned long flags; 629 unsigned long flags;
649 630
650 sc = dev->priv;
651
652 lmc_trace(dev, "lmc_watchdog in"); 631 lmc_trace(dev, "lmc_watchdog in");
653 632
654 spin_lock_irqsave(&sc->lmc_lock, flags); 633 spin_lock_irqsave(&sc->lmc_lock, flags);
@@ -677,22 +656,22 @@ static void lmc_watchdog (unsigned long data) /*fold00*/
677 * check for a transmit interrupt timeout 656 * check for a transmit interrupt timeout
678 * Has the packet xmt vs xmt serviced threshold been exceeded */ 657 * Has the packet xmt vs xmt serviced threshold been exceeded */
679 if (sc->lmc_taint_tx == sc->lastlmc_taint_tx && 658 if (sc->lmc_taint_tx == sc->lastlmc_taint_tx &&
680 sc->stats.tx_packets > sc->lasttx_packets && 659 sc->lmc_device->stats.tx_packets > sc->lasttx_packets &&
681 sc->tx_TimeoutInd == 0) 660 sc->tx_TimeoutInd == 0)
682 { 661 {
683 662
684 /* wait for the watchdog to come around again */ 663 /* wait for the watchdog to come around again */
685 sc->tx_TimeoutInd = 1; 664 sc->tx_TimeoutInd = 1;
686 } 665 }
687 else if (sc->lmc_taint_tx == sc->lastlmc_taint_tx && 666 else if (sc->lmc_taint_tx == sc->lastlmc_taint_tx &&
688 sc->stats.tx_packets > sc->lasttx_packets && 667 sc->lmc_device->stats.tx_packets > sc->lasttx_packets &&
689 sc->tx_TimeoutInd) 668 sc->tx_TimeoutInd)
690 { 669 {
691 670
692 LMC_EVENT_LOG(LMC_EVENT_XMTINTTMO, LMC_CSR_READ (sc, csr_status), 0); 671 LMC_EVENT_LOG(LMC_EVENT_XMTINTTMO, LMC_CSR_READ (sc, csr_status), 0);
693 672
694 sc->tx_TimeoutDisplay = 1; 673 sc->tx_TimeoutDisplay = 1;
695 sc->stats.tx_TimeoutCnt++; 674 sc->extra_stats.tx_TimeoutCnt++;
696 675
697 /* DEC chip is stuck, hit it with a RESET!!!! */ 676 /* DEC chip is stuck, hit it with a RESET!!!! */
698 lmc_running_reset (dev); 677 lmc_running_reset (dev);
@@ -712,13 +691,11 @@ static void lmc_watchdog (unsigned long data) /*fold00*/
712 /* reset the transmit timeout detection flag */ 691 /* reset the transmit timeout detection flag */
713 sc->tx_TimeoutInd = 0; 692 sc->tx_TimeoutInd = 0;
714 sc->lastlmc_taint_tx = sc->lmc_taint_tx; 693 sc->lastlmc_taint_tx = sc->lmc_taint_tx;
715 sc->lasttx_packets = sc->stats.tx_packets; 694 sc->lasttx_packets = sc->lmc_device->stats.tx_packets;
716 } 695 } else {
717 else
718 {
719 sc->tx_TimeoutInd = 0; 696 sc->tx_TimeoutInd = 0;
720 sc->lastlmc_taint_tx = sc->lmc_taint_tx; 697 sc->lastlmc_taint_tx = sc->lmc_taint_tx;
721 sc->lasttx_packets = sc->stats.tx_packets; 698 sc->lasttx_packets = sc->lmc_device->stats.tx_packets;
722 } 699 }
723 700
724 /* --- end time out check ----------------------------------- */ 701 /* --- end time out check ----------------------------------- */
@@ -748,19 +725,7 @@ static void lmc_watchdog (unsigned long data) /*fold00*/
748 sc->last_link_status = 1; 725 sc->last_link_status = 1;
749 /* lmc_reset (sc); Again why reset??? */ 726 /* lmc_reset (sc); Again why reset??? */
750 727
751 /* Inform the world that link protocol is back up. */
752 netif_carrier_on(dev); 728 netif_carrier_on(dev);
753
754 /* Now we have to tell the syncppp that we had an outage
755 * and that it should deal. Calling sppp_reopen here
756 * should do the trick, but we may have to call sppp_close
757 * when the link goes down, and call sppp_open here.
758 * Subject to more testing.
759 * --bbraun
760 */
761
762 lmc_proto_reopen(sc);
763
764 } 729 }
765 730
766 /* Call media specific watchdog functions */ 731 /* Call media specific watchdog functions */
@@ -816,114 +781,93 @@ kick_timer:
816 781
817} 782}
818 783
819static void lmc_setup(struct net_device * const dev) /*fold00*/ 784static int lmc_attach(struct net_device *dev, unsigned short encoding,
785 unsigned short parity)
820{ 786{
821 lmc_trace(dev, "lmc_setup in"); 787 if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)
822 788 return 0;
823 dev->type = ARPHRD_HDLC; 789 return -EINVAL;
824 dev->hard_start_xmit = lmc_start_xmit;
825 dev->open = lmc_open;
826 dev->stop = lmc_close;
827 dev->get_stats = lmc_get_stats;
828 dev->do_ioctl = lmc_ioctl;
829 dev->tx_timeout = lmc_driver_timeout;
830 dev->watchdog_timeo = (HZ); /* 1 second */
831
832 lmc_trace(dev, "lmc_setup out");
833} 790}
834 791
835
836static int __devinit lmc_init_one(struct pci_dev *pdev, 792static int __devinit lmc_init_one(struct pci_dev *pdev,
837 const struct pci_device_id *ent) 793 const struct pci_device_id *ent)
838{ 794{
839 struct net_device *dev; 795 lmc_softc_t *sc;
840 lmc_softc_t *sc; 796 struct net_device *dev;
841 u16 subdevice; 797 u16 subdevice;
842 u_int16_t AdapModelNum; 798 u16 AdapModelNum;
843 int err = -ENOMEM; 799 int err;
844 static int cards_found; 800 static int cards_found;
845#ifndef GCOM 801
846 /* We name by type not by vendor */ 802 /* lmc_trace(dev, "lmc_init_one in"); */
847 static const char lmcname[] = "hdlc%d"; 803
848#else 804 err = pci_enable_device(pdev);
849 /* 805 if (err) {
850 * GCOM uses LMC vendor name so that clients can know which card 806 printk(KERN_ERR "lmc: pci enable failed: %d\n", err);
851 * to attach to. 807 return err;
852 */ 808 }
853 static const char lmcname[] = "lmc%d";
854#endif
855
856
857 /*
858 * Allocate our own device structure
859 */
860 dev = alloc_netdev(sizeof(lmc_softc_t), lmcname, lmc_setup);
861 if (!dev) {
862 printk (KERN_ERR "lmc:alloc_netdev for device failed\n");
863 goto out1;
864 }
865
866 lmc_trace(dev, "lmc_init_one in");
867
868 err = pci_enable_device(pdev);
869 if (err) {
870 printk(KERN_ERR "lmc: pci enable failed:%d\n", err);
871 goto out2;
872 }
873
874 if (pci_request_regions(pdev, "lmc")) {
875 printk(KERN_ERR "lmc: pci_request_region failed\n");
876 err = -EIO;
877 goto out3;
878 }
879
880 pci_set_drvdata(pdev, dev);
881
882 if(lmc_first_load == 0){
883 printk(KERN_INFO "Lan Media Corporation WAN Driver Version %d.%d.%d\n",
884 DRIVER_MAJOR_VERSION, DRIVER_MINOR_VERSION,DRIVER_SUB_VERSION);
885 lmc_first_load = 1;
886 }
887
888 sc = dev->priv;
889 sc->lmc_device = dev;
890 sc->name = dev->name;
891
892 /* Initialize the sppp layer */
893 /* An ioctl can cause a subsequent detach for raw frame interface */
894 dev->ml_priv = sc;
895 sc->if_type = LMC_PPP;
896 sc->check = 0xBEAFCAFE;
897 dev->base_addr = pci_resource_start(pdev, 0);
898 dev->irq = pdev->irq;
899
900 SET_NETDEV_DEV(dev, &pdev->dev);
901
902 /*
903 * This will get the protocol layer ready and do any 1 time init's
904 * Must have a valid sc and dev structure
905 */
906 lmc_proto_init(sc);
907
908 lmc_proto_attach(sc);
909 809
910 /* 810 err = pci_request_regions(pdev, "lmc");
911 * Why were we changing this??? 811 if (err) {
912 dev->tx_queue_len = 100; 812 printk(KERN_ERR "lmc: pci_request_region failed\n");
913 */ 813 goto err_req_io;
814 }
914 815
915 /* Init the spin lock so can call it latter */ 816 /*
817 * Allocate our own device structure
818 */
819 sc = kzalloc(sizeof(lmc_softc_t), GFP_KERNEL);
820 if (!sc) {
821 err = -ENOMEM;
822 goto err_kzalloc;
823 }
916 824
917 spin_lock_init(&sc->lmc_lock); 825 dev = alloc_hdlcdev(sc);
918 pci_set_master(pdev); 826 if (!dev) {
827 printk(KERN_ERR "lmc:alloc_netdev for device failed\n");
828 goto err_hdlcdev;
829 }
919 830
920 printk ("%s: detected at %lx, irq %d\n", dev->name,
921 dev->base_addr, dev->irq);
922 831
923 if (register_netdev (dev) != 0) { 832 dev->type = ARPHRD_HDLC;
924 printk (KERN_ERR "%s: register_netdev failed.\n", dev->name); 833 dev_to_hdlc(dev)->xmit = lmc_start_xmit;
925 goto out4; 834 dev_to_hdlc(dev)->attach = lmc_attach;
926 } 835 dev->open = lmc_open;
836 dev->stop = lmc_close;
837 dev->get_stats = lmc_get_stats;
838 dev->do_ioctl = lmc_ioctl;
839 dev->tx_timeout = lmc_driver_timeout;
840 dev->watchdog_timeo = HZ; /* 1 second */
841 dev->tx_queue_len = 100;
842 sc->lmc_device = dev;
843 sc->name = dev->name;
844 sc->if_type = LMC_PPP;
845 sc->check = 0xBEAFCAFE;
846 dev->base_addr = pci_resource_start(pdev, 0);
847 dev->irq = pdev->irq;
848 pci_set_drvdata(pdev, dev);
849 SET_NETDEV_DEV(dev, &pdev->dev);
850
851 /*
852 * This will get the protocol layer ready and do any 1 time init's
853 * Must have a valid sc and dev structure
854 */
855 lmc_proto_attach(sc);
856
857 /* Init the spin lock so can call it latter */
858
859 spin_lock_init(&sc->lmc_lock);
860 pci_set_master(pdev);
861
862 printk(KERN_INFO "%s: detected at %lx, irq %d\n", dev->name,
863 dev->base_addr, dev->irq);
864
865 err = register_hdlc_device(dev);
866 if (err) {
867 printk(KERN_ERR "%s: register_netdev failed.\n", dev->name);
868 free_netdev(dev);
869 goto err_hdlcdev;
870 }
927 871
928 sc->lmc_cardtype = LMC_CARDTYPE_UNKNOWN; 872 sc->lmc_cardtype = LMC_CARDTYPE_UNKNOWN;
929 sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT; 873 sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT;
@@ -939,27 +883,27 @@ static int __devinit lmc_init_one(struct pci_dev *pdev,
939 883
940 switch (subdevice) { 884 switch (subdevice) {
941 case PCI_DEVICE_ID_LMC_HSSI: 885 case PCI_DEVICE_ID_LMC_HSSI:
942 printk ("%s: LMC HSSI\n", dev->name); 886 printk(KERN_INFO "%s: LMC HSSI\n", dev->name);
943 sc->lmc_cardtype = LMC_CARDTYPE_HSSI; 887 sc->lmc_cardtype = LMC_CARDTYPE_HSSI;
944 sc->lmc_media = &lmc_hssi_media; 888 sc->lmc_media = &lmc_hssi_media;
945 break; 889 break;
946 case PCI_DEVICE_ID_LMC_DS3: 890 case PCI_DEVICE_ID_LMC_DS3:
947 printk ("%s: LMC DS3\n", dev->name); 891 printk(KERN_INFO "%s: LMC DS3\n", dev->name);
948 sc->lmc_cardtype = LMC_CARDTYPE_DS3; 892 sc->lmc_cardtype = LMC_CARDTYPE_DS3;
949 sc->lmc_media = &lmc_ds3_media; 893 sc->lmc_media = &lmc_ds3_media;
950 break; 894 break;
951 case PCI_DEVICE_ID_LMC_SSI: 895 case PCI_DEVICE_ID_LMC_SSI:
952 printk ("%s: LMC SSI\n", dev->name); 896 printk(KERN_INFO "%s: LMC SSI\n", dev->name);
953 sc->lmc_cardtype = LMC_CARDTYPE_SSI; 897 sc->lmc_cardtype = LMC_CARDTYPE_SSI;
954 sc->lmc_media = &lmc_ssi_media; 898 sc->lmc_media = &lmc_ssi_media;
955 break; 899 break;
956 case PCI_DEVICE_ID_LMC_T1: 900 case PCI_DEVICE_ID_LMC_T1:
957 printk ("%s: LMC T1\n", dev->name); 901 printk(KERN_INFO "%s: LMC T1\n", dev->name);
958 sc->lmc_cardtype = LMC_CARDTYPE_T1; 902 sc->lmc_cardtype = LMC_CARDTYPE_T1;
959 sc->lmc_media = &lmc_t1_media; 903 sc->lmc_media = &lmc_t1_media;
960 break; 904 break;
961 default: 905 default:
962 printk (KERN_WARNING "%s: LMC UNKOWN CARD!\n", dev->name); 906 printk(KERN_WARNING "%s: LMC UNKOWN CARD!\n", dev->name);
963 break; 907 break;
964 } 908 }
965 909
@@ -977,32 +921,28 @@ static int __devinit lmc_init_one(struct pci_dev *pdev,
977 */ 921 */
978 AdapModelNum = (lmc_mii_readreg (sc, 0, 3) & 0x3f0) >> 4; 922 AdapModelNum = (lmc_mii_readreg (sc, 0, 3) & 0x3f0) >> 4;
979 923
980 if ((AdapModelNum == LMC_ADAP_T1 924 if ((AdapModelNum != LMC_ADAP_T1 || /* detect LMC1200 */
981 && subdevice == PCI_DEVICE_ID_LMC_T1) || /* detect LMC1200 */ 925 subdevice != PCI_DEVICE_ID_LMC_T1) &&
982 (AdapModelNum == LMC_ADAP_SSI 926 (AdapModelNum != LMC_ADAP_SSI || /* detect LMC1000 */
983 && subdevice == PCI_DEVICE_ID_LMC_SSI) || /* detect LMC1000 */ 927 subdevice != PCI_DEVICE_ID_LMC_SSI) &&
984 (AdapModelNum == LMC_ADAP_DS3 928 (AdapModelNum != LMC_ADAP_DS3 || /* detect LMC5245 */
985 && subdevice == PCI_DEVICE_ID_LMC_DS3) || /* detect LMC5245 */ 929 subdevice != PCI_DEVICE_ID_LMC_DS3) &&
986 (AdapModelNum == LMC_ADAP_HSSI 930 (AdapModelNum != LMC_ADAP_HSSI || /* detect LMC5200 */
987 && subdevice == PCI_DEVICE_ID_LMC_HSSI)) 931 subdevice != PCI_DEVICE_ID_LMC_HSSI))
988 { /* detect LMC5200 */ 932 printk(KERN_WARNING "%s: Model number (%d) miscompare for PCI"
933 " Subsystem ID = 0x%04x\n",
934 dev->name, AdapModelNum, subdevice);
989 935
990 }
991 else {
992 printk ("%s: Model number (%d) miscompare for PCI Subsystem ID = 0x%04x\n",
993 dev->name, AdapModelNum, subdevice);
994// return (NULL);
995 }
996 /* 936 /*
997 * reset clock 937 * reset clock
998 */ 938 */
999 LMC_CSR_WRITE (sc, csr_gp_timer, 0xFFFFFFFFUL); 939 LMC_CSR_WRITE (sc, csr_gp_timer, 0xFFFFFFFFUL);
1000 940
1001 sc->board_idx = cards_found++; 941 sc->board_idx = cards_found++;
1002 sc->stats.check = STATCHECK; 942 sc->extra_stats.check = STATCHECK;
1003 sc->stats.version_size = (DRIVER_VERSION << 16) + 943 sc->extra_stats.version_size = (DRIVER_VERSION << 16) +
1004 sizeof (struct lmc_statistics); 944 sizeof(sc->lmc_device->stats) + sizeof(sc->extra_stats);
1005 sc->stats.lmc_cardtype = sc->lmc_cardtype; 945 sc->extra_stats.lmc_cardtype = sc->lmc_cardtype;
1006 946
1007 sc->lmc_ok = 0; 947 sc->lmc_ok = 0;
1008 sc->last_link_status = 0; 948 sc->last_link_status = 0;
@@ -1010,58 +950,51 @@ static int __devinit lmc_init_one(struct pci_dev *pdev,
1010 lmc_trace(dev, "lmc_init_one out"); 950 lmc_trace(dev, "lmc_init_one out");
1011 return 0; 951 return 0;
1012 952
1013 out4: 953err_hdlcdev:
1014 lmc_proto_detach(sc); 954 pci_set_drvdata(pdev, NULL);
1015 out3: 955 kfree(sc);
1016 if (pdev) { 956err_kzalloc:
1017 pci_release_regions(pdev); 957 pci_release_regions(pdev);
1018 pci_set_drvdata(pdev, NULL); 958err_req_io:
1019 } 959 pci_disable_device(pdev);
1020 out2: 960 return err;
1021 free_netdev(dev);
1022 out1:
1023 return err;
1024} 961}
1025 962
1026/* 963/*
1027 * Called from pci when removing module. 964 * Called from pci when removing module.
1028 */ 965 */
1029static void __devexit lmc_remove_one (struct pci_dev *pdev) 966static void __devexit lmc_remove_one(struct pci_dev *pdev)
1030{ 967{
1031 struct net_device *dev = pci_get_drvdata(pdev); 968 struct net_device *dev = pci_get_drvdata(pdev);
1032 969
1033 if (dev) { 970 if (dev) {
1034 lmc_softc_t *sc = dev->priv; 971 printk(KERN_DEBUG "%s: removing...\n", dev->name);
1035 972 unregister_hdlc_device(dev);
1036 printk("%s: removing...\n", dev->name); 973 free_netdev(dev);
1037 lmc_proto_detach(sc); 974 pci_release_regions(pdev);
1038 unregister_netdev(dev); 975 pci_disable_device(pdev);
1039 free_netdev(dev); 976 pci_set_drvdata(pdev, NULL);
1040 pci_release_regions(pdev); 977 }
1041 pci_disable_device(pdev);
1042 pci_set_drvdata(pdev, NULL);
1043 }
1044} 978}
1045 979
1046/* After this is called, packets can be sent. 980/* After this is called, packets can be sent.
1047 * Does not initialize the addresses 981 * Does not initialize the addresses
1048 */ 982 */
1049static int lmc_open (struct net_device *dev) /*fold00*/ 983static int lmc_open(struct net_device *dev)
1050{ 984{
1051 lmc_softc_t *sc = dev->priv; 985 lmc_softc_t *sc = dev_to_sc(dev);
986 int err;
1052 987
1053 lmc_trace(dev, "lmc_open in"); 988 lmc_trace(dev, "lmc_open in");
1054 989
1055 lmc_led_on(sc, LMC_DS3_LED0); 990 lmc_led_on(sc, LMC_DS3_LED0);
1056 991
1057 lmc_dec_reset (sc); 992 lmc_dec_reset(sc);
1058 lmc_reset (sc); 993 lmc_reset(sc);
1059
1060 LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0);
1061 LMC_EVENT_LOG(LMC_EVENT_RESET2,
1062 lmc_mii_readreg (sc, 0, 16),
1063 lmc_mii_readreg (sc, 0, 17));
1064 994
995 LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ(sc, csr_status), 0);
996 LMC_EVENT_LOG(LMC_EVENT_RESET2, lmc_mii_readreg(sc, 0, 16),
997 lmc_mii_readreg(sc, 0, 17));
1065 998
1066 if (sc->lmc_ok){ 999 if (sc->lmc_ok){
1067 lmc_trace(dev, "lmc_open lmc_ok out"); 1000 lmc_trace(dev, "lmc_open lmc_ok out");
@@ -1106,14 +1039,14 @@ static int lmc_open (struct net_device *dev) /*fold00*/
1106 1039
1107 /* dev->flags |= IFF_UP; */ 1040 /* dev->flags |= IFF_UP; */
1108 1041
1109 lmc_proto_open(sc); 1042 if ((err = lmc_proto_open(sc)) != 0)
1043 return err;
1110 1044
1111 dev->do_ioctl = lmc_ioctl; 1045 dev->do_ioctl = lmc_ioctl;
1112 1046
1113 1047
1114 netif_start_queue(dev); 1048 netif_start_queue(dev);
1115 1049 sc->extra_stats.tx_tbusy0++;
1116 sc->stats.tx_tbusy0++ ;
1117 1050
1118 /* 1051 /*
1119 * select what interrupts we want to get 1052 * select what interrupts we want to get
@@ -1165,8 +1098,7 @@ static int lmc_open (struct net_device *dev) /*fold00*/
1165 1098
1166static void lmc_running_reset (struct net_device *dev) /*fold00*/ 1099static void lmc_running_reset (struct net_device *dev) /*fold00*/
1167{ 1100{
1168 1101 lmc_softc_t *sc = dev_to_sc(dev);
1169 lmc_softc_t *sc = (lmc_softc_t *) dev->priv;
1170 1102
1171 lmc_trace(dev, "lmc_runnig_reset in"); 1103 lmc_trace(dev, "lmc_runnig_reset in");
1172 1104
@@ -1184,7 +1116,7 @@ static void lmc_running_reset (struct net_device *dev) /*fold00*/
1184 netif_wake_queue(dev); 1116 netif_wake_queue(dev);
1185 1117
1186 sc->lmc_txfull = 0; 1118 sc->lmc_txfull = 0;
1187 sc->stats.tx_tbusy0++ ; 1119 sc->extra_stats.tx_tbusy0++;
1188 1120
1189 sc->lmc_intrmask = TULIP_DEFAULT_INTR_MASK; 1121 sc->lmc_intrmask = TULIP_DEFAULT_INTR_MASK;
1190 LMC_CSR_WRITE (sc, csr_intr, sc->lmc_intrmask); 1122 LMC_CSR_WRITE (sc, csr_intr, sc->lmc_intrmask);
@@ -1200,14 +1132,13 @@ static void lmc_running_reset (struct net_device *dev) /*fold00*/
1200 * This disables the timer for the watchdog and keepalives, 1132 * This disables the timer for the watchdog and keepalives,
1201 * and disables the irq for dev. 1133 * and disables the irq for dev.
1202 */ 1134 */
1203static int lmc_close (struct net_device *dev) /*fold00*/ 1135static int lmc_close(struct net_device *dev)
1204{ 1136{
1205 /* not calling release_region() as we should */ 1137 /* not calling release_region() as we should */
1206 lmc_softc_t *sc; 1138 lmc_softc_t *sc = dev_to_sc(dev);
1207 1139
1208 lmc_trace(dev, "lmc_close in"); 1140 lmc_trace(dev, "lmc_close in");
1209 1141
1210 sc = dev->priv;
1211 sc->lmc_ok = 0; 1142 sc->lmc_ok = 0;
1212 sc->lmc_media->set_link_status (sc, 0); 1143 sc->lmc_media->set_link_status (sc, 0);
1213 del_timer (&sc->timer); 1144 del_timer (&sc->timer);
@@ -1215,7 +1146,7 @@ static int lmc_close (struct net_device *dev) /*fold00*/
1215 lmc_ifdown (dev); 1146 lmc_ifdown (dev);
1216 1147
1217 lmc_trace(dev, "lmc_close out"); 1148 lmc_trace(dev, "lmc_close out");
1218 1149
1219 return 0; 1150 return 0;
1220} 1151}
1221 1152
@@ -1223,16 +1154,16 @@ static int lmc_close (struct net_device *dev) /*fold00*/
1223/* When the interface goes down, this is called */ 1154/* When the interface goes down, this is called */
1224static int lmc_ifdown (struct net_device *dev) /*fold00*/ 1155static int lmc_ifdown (struct net_device *dev) /*fold00*/
1225{ 1156{
1226 lmc_softc_t *sc = dev->priv; 1157 lmc_softc_t *sc = dev_to_sc(dev);
1227 u32 csr6; 1158 u32 csr6;
1228 int i; 1159 int i;
1229 1160
1230 lmc_trace(dev, "lmc_ifdown in"); 1161 lmc_trace(dev, "lmc_ifdown in");
1231 1162
1232 /* Don't let anything else go on right now */ 1163 /* Don't let anything else go on right now */
1233 // dev->start = 0; 1164 // dev->start = 0;
1234 netif_stop_queue(dev); 1165 netif_stop_queue(dev);
1235 sc->stats.tx_tbusy1++ ; 1166 sc->extra_stats.tx_tbusy1++;
1236 1167
1237 /* stop interrupts */ 1168 /* stop interrupts */
1238 /* Clear the interrupt mask */ 1169 /* Clear the interrupt mask */
@@ -1244,8 +1175,8 @@ static int lmc_ifdown (struct net_device *dev) /*fold00*/
1244 csr6 &= ~LMC_DEC_SR; /* Turn off the Receive bit */ 1175 csr6 &= ~LMC_DEC_SR; /* Turn off the Receive bit */
1245 LMC_CSR_WRITE (sc, csr_command, csr6); 1176 LMC_CSR_WRITE (sc, csr_command, csr6);
1246 1177
1247 sc->stats.rx_missed_errors += 1178 sc->lmc_device->stats.rx_missed_errors +=
1248 LMC_CSR_READ (sc, csr_missed_frames) & 0xffff; 1179 LMC_CSR_READ(sc, csr_missed_frames) & 0xffff;
1249 1180
1250 /* release the interrupt */ 1181 /* release the interrupt */
1251 if(sc->got_irq == 1){ 1182 if(sc->got_irq == 1){
@@ -1276,7 +1207,7 @@ static int lmc_ifdown (struct net_device *dev) /*fold00*/
1276 lmc_led_off (sc, LMC_MII16_LED_ALL); 1207 lmc_led_off (sc, LMC_MII16_LED_ALL);
1277 1208
1278 netif_wake_queue(dev); 1209 netif_wake_queue(dev);
1279 sc->stats.tx_tbusy0++ ; 1210 sc->extra_stats.tx_tbusy0++;
1280 1211
1281 lmc_trace(dev, "lmc_ifdown out"); 1212 lmc_trace(dev, "lmc_ifdown out");
1282 1213
@@ -1289,7 +1220,7 @@ static int lmc_ifdown (struct net_device *dev) /*fold00*/
1289static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/ 1220static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
1290{ 1221{
1291 struct net_device *dev = (struct net_device *) dev_instance; 1222 struct net_device *dev = (struct net_device *) dev_instance;
1292 lmc_softc_t *sc; 1223 lmc_softc_t *sc = dev_to_sc(dev);
1293 u32 csr; 1224 u32 csr;
1294 int i; 1225 int i;
1295 s32 stat; 1226 s32 stat;
@@ -1300,8 +1231,6 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
1300 1231
1301 lmc_trace(dev, "lmc_interrupt in"); 1232 lmc_trace(dev, "lmc_interrupt in");
1302 1233
1303 sc = dev->priv;
1304
1305 spin_lock(&sc->lmc_lock); 1234 spin_lock(&sc->lmc_lock);
1306 1235
1307 /* 1236 /*
@@ -1354,7 +1283,7 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
1354 1283
1355 int n_compl = 0 ; 1284 int n_compl = 0 ;
1356 /* reset the transmit timeout detection flag -baz */ 1285 /* reset the transmit timeout detection flag -baz */
1357 sc->stats.tx_NoCompleteCnt = 0; 1286 sc->extra_stats.tx_NoCompleteCnt = 0;
1358 1287
1359 badtx = sc->lmc_taint_tx; 1288 badtx = sc->lmc_taint_tx;
1360 i = badtx % LMC_TXDESCS; 1289 i = badtx % LMC_TXDESCS;
@@ -1378,27 +1307,25 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
1378 if (sc->lmc_txq[i] == NULL) 1307 if (sc->lmc_txq[i] == NULL)
1379 continue; 1308 continue;
1380 1309
1381 /* 1310 /*
1382 * Check the total error summary to look for any errors 1311 * Check the total error summary to look for any errors
1383 */ 1312 */
1384 if (stat & 0x8000) { 1313 if (stat & 0x8000) {
1385 sc->stats.tx_errors++; 1314 sc->lmc_device->stats.tx_errors++;
1386 if (stat & 0x4104) 1315 if (stat & 0x4104)
1387 sc->stats.tx_aborted_errors++; 1316 sc->lmc_device->stats.tx_aborted_errors++;
1388 if (stat & 0x0C00) 1317 if (stat & 0x0C00)
1389 sc->stats.tx_carrier_errors++; 1318 sc->lmc_device->stats.tx_carrier_errors++;
1390 if (stat & 0x0200) 1319 if (stat & 0x0200)
1391 sc->stats.tx_window_errors++; 1320 sc->lmc_device->stats.tx_window_errors++;
1392 if (stat & 0x0002) 1321 if (stat & 0x0002)
1393 sc->stats.tx_fifo_errors++; 1322 sc->lmc_device->stats.tx_fifo_errors++;
1394 } 1323 } else {
1395 else { 1324 sc->lmc_device->stats.tx_bytes += sc->lmc_txring[i].length & 0x7ff;
1396 1325
1397 sc->stats.tx_bytes += sc->lmc_txring[i].length & 0x7ff; 1326 sc->lmc_device->stats.tx_packets++;
1398
1399 sc->stats.tx_packets++;
1400 } 1327 }
1401 1328
1402 // dev_kfree_skb(sc->lmc_txq[i]); 1329 // dev_kfree_skb(sc->lmc_txq[i]);
1403 dev_kfree_skb_irq(sc->lmc_txq[i]); 1330 dev_kfree_skb_irq(sc->lmc_txq[i]);
1404 sc->lmc_txq[i] = NULL; 1331 sc->lmc_txq[i] = NULL;
@@ -1415,13 +1342,13 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
1415 LMC_EVENT_LOG(LMC_EVENT_TBUSY0, n_compl, 0); 1342 LMC_EVENT_LOG(LMC_EVENT_TBUSY0, n_compl, 0);
1416 sc->lmc_txfull = 0; 1343 sc->lmc_txfull = 0;
1417 netif_wake_queue(dev); 1344 netif_wake_queue(dev);
1418 sc->stats.tx_tbusy0++ ; 1345 sc->extra_stats.tx_tbusy0++;
1419 1346
1420 1347
1421#ifdef DEBUG 1348#ifdef DEBUG
1422 sc->stats.dirtyTx = badtx; 1349 sc->extra_stats.dirtyTx = badtx;
1423 sc->stats.lmc_next_tx = sc->lmc_next_tx; 1350 sc->extra_stats.lmc_next_tx = sc->lmc_next_tx;
1424 sc->stats.lmc_txfull = sc->lmc_txfull; 1351 sc->extra_stats.lmc_txfull = sc->lmc_txfull;
1425#endif 1352#endif
1426 sc->lmc_taint_tx = badtx; 1353 sc->lmc_taint_tx = badtx;
1427 1354
@@ -1476,9 +1403,9 @@ lmc_int_fail_out:
1476 return IRQ_RETVAL(handled); 1403 return IRQ_RETVAL(handled);
1477} 1404}
1478 1405
1479static int lmc_start_xmit (struct sk_buff *skb, struct net_device *dev) /*fold00*/ 1406static int lmc_start_xmit(struct sk_buff *skb, struct net_device *dev)
1480{ 1407{
1481 lmc_softc_t *sc; 1408 lmc_softc_t *sc = dev_to_sc(dev);
1482 u32 flag; 1409 u32 flag;
1483 int entry; 1410 int entry;
1484 int ret = 0; 1411 int ret = 0;
@@ -1486,8 +1413,6 @@ static int lmc_start_xmit (struct sk_buff *skb, struct net_device *dev) /*fold00
1486 1413
1487 lmc_trace(dev, "lmc_start_xmit in"); 1414 lmc_trace(dev, "lmc_start_xmit in");
1488 1415
1489 sc = dev->priv;
1490
1491 spin_lock_irqsave(&sc->lmc_lock, flags); 1416 spin_lock_irqsave(&sc->lmc_lock, flags);
1492 1417
1493 /* normal path, tbusy known to be zero */ 1418 /* normal path, tbusy known to be zero */
@@ -1532,8 +1457,8 @@ static int lmc_start_xmit (struct sk_buff *skb, struct net_device *dev) /*fold00
1532 if (sc->lmc_next_tx - sc->lmc_taint_tx >= LMC_TXDESCS - 1) 1457 if (sc->lmc_next_tx - sc->lmc_taint_tx >= LMC_TXDESCS - 1)
1533 { /* ring full, go busy */ 1458 { /* ring full, go busy */
1534 sc->lmc_txfull = 1; 1459 sc->lmc_txfull = 1;
1535 netif_stop_queue(dev); 1460 netif_stop_queue(dev);
1536 sc->stats.tx_tbusy1++ ; 1461 sc->extra_stats.tx_tbusy1++;
1537 LMC_EVENT_LOG(LMC_EVENT_TBUSY1, entry, 0); 1462 LMC_EVENT_LOG(LMC_EVENT_TBUSY1, entry, 0);
1538 } 1463 }
1539#endif 1464#endif
@@ -1550,7 +1475,7 @@ static int lmc_start_xmit (struct sk_buff *skb, struct net_device *dev) /*fold00
1550 * the watchdog timer handler. -baz 1475 * the watchdog timer handler. -baz
1551 */ 1476 */
1552 1477
1553 sc->stats.tx_NoCompleteCnt++; 1478 sc->extra_stats.tx_NoCompleteCnt++;
1554 sc->lmc_next_tx++; 1479 sc->lmc_next_tx++;
1555 1480
1556 /* give ownership to the chip */ 1481 /* give ownership to the chip */
@@ -1569,9 +1494,9 @@ static int lmc_start_xmit (struct sk_buff *skb, struct net_device *dev) /*fold00
1569} 1494}
1570 1495
1571 1496
1572static int lmc_rx (struct net_device *dev) /*fold00*/ 1497static int lmc_rx(struct net_device *dev)
1573{ 1498{
1574 lmc_softc_t *sc; 1499 lmc_softc_t *sc = dev_to_sc(dev);
1575 int i; 1500 int i;
1576 int rx_work_limit = LMC_RXDESCS; 1501 int rx_work_limit = LMC_RXDESCS;
1577 unsigned int next_rx; 1502 unsigned int next_rx;
@@ -1583,8 +1508,6 @@ static int lmc_rx (struct net_device *dev) /*fold00*/
1583 1508
1584 lmc_trace(dev, "lmc_rx in"); 1509 lmc_trace(dev, "lmc_rx in");
1585 1510
1586 sc = dev->priv;
1587
1588 lmc_led_on(sc, LMC_DS3_LED3); 1511 lmc_led_on(sc, LMC_DS3_LED3);
1589 1512
1590 rxIntLoopCnt = 0; /* debug -baz */ 1513 rxIntLoopCnt = 0; /* debug -baz */
@@ -1597,39 +1520,38 @@ static int lmc_rx (struct net_device *dev) /*fold00*/
1597 rxIntLoopCnt++; /* debug -baz */ 1520 rxIntLoopCnt++; /* debug -baz */
1598 len = ((stat & LMC_RDES_FRAME_LENGTH) >> RDES_FRAME_LENGTH_BIT_NUMBER); 1521 len = ((stat & LMC_RDES_FRAME_LENGTH) >> RDES_FRAME_LENGTH_BIT_NUMBER);
1599 if ((stat & 0x0300) != 0x0300) { /* Check first segment and last segment */ 1522 if ((stat & 0x0300) != 0x0300) { /* Check first segment and last segment */
1600 if ((stat & 0x0000ffff) != 0x7fff) { 1523 if ((stat & 0x0000ffff) != 0x7fff) {
1601 /* Oversized frame */ 1524 /* Oversized frame */
1602 sc->stats.rx_length_errors++; 1525 sc->lmc_device->stats.rx_length_errors++;
1603 goto skip_packet; 1526 goto skip_packet;
1604 } 1527 }
1605 } 1528 }
1606
1607 if(stat & 0x00000008){ /* Catch a dribbling bit error */
1608 sc->stats.rx_errors++;
1609 sc->stats.rx_frame_errors++;
1610 goto skip_packet;
1611 }
1612 1529
1530 if (stat & 0x00000008) { /* Catch a dribbling bit error */
1531 sc->lmc_device->stats.rx_errors++;
1532 sc->lmc_device->stats.rx_frame_errors++;
1533 goto skip_packet;
1534 }
1613 1535
1614 if(stat & 0x00000004){ /* Catch a CRC error by the Xilinx */
1615 sc->stats.rx_errors++;
1616 sc->stats.rx_crc_errors++;
1617 goto skip_packet;
1618 }
1619 1536
1537 if (stat & 0x00000004) { /* Catch a CRC error by the Xilinx */
1538 sc->lmc_device->stats.rx_errors++;
1539 sc->lmc_device->stats.rx_crc_errors++;
1540 goto skip_packet;
1541 }
1620 1542
1621 if (len > LMC_PKT_BUF_SZ){ 1543 if (len > LMC_PKT_BUF_SZ) {
1622 sc->stats.rx_length_errors++; 1544 sc->lmc_device->stats.rx_length_errors++;
1623 localLengthErrCnt++; 1545 localLengthErrCnt++;
1624 goto skip_packet; 1546 goto skip_packet;
1625 } 1547 }
1626 1548
1627 if (len < sc->lmc_crcSize + 2) { 1549 if (len < sc->lmc_crcSize + 2) {
1628 sc->stats.rx_length_errors++; 1550 sc->lmc_device->stats.rx_length_errors++;
1629 sc->stats.rx_SmallPktCnt++; 1551 sc->extra_stats.rx_SmallPktCnt++;
1630 localLengthErrCnt++; 1552 localLengthErrCnt++;
1631 goto skip_packet; 1553 goto skip_packet;
1632 } 1554 }
1633 1555
1634 if(stat & 0x00004000){ 1556 if(stat & 0x00004000){
1635 printk(KERN_WARNING "%s: Receiver descriptor error, receiver out of sync?\n", dev->name); 1557 printk(KERN_WARNING "%s: Receiver descriptor error, receiver out of sync?\n", dev->name);
@@ -1656,8 +1578,8 @@ static int lmc_rx (struct net_device *dev) /*fold00*/
1656 } 1578 }
1657 1579
1658 dev->last_rx = jiffies; 1580 dev->last_rx = jiffies;
1659 sc->stats.rx_packets++; 1581 sc->lmc_device->stats.rx_packets++;
1660 sc->stats.rx_bytes += len; 1582 sc->lmc_device->stats.rx_bytes += len;
1661 1583
1662 LMC_CONSOLE_LOG("recv", skb->data, len); 1584 LMC_CONSOLE_LOG("recv", skb->data, len);
1663 1585
@@ -1679,7 +1601,6 @@ static int lmc_rx (struct net_device *dev) /*fold00*/
1679 1601
1680 skb_put (skb, len); 1602 skb_put (skb, len);
1681 skb->protocol = lmc_proto_type(sc, skb); 1603 skb->protocol = lmc_proto_type(sc, skb);
1682 skb->protocol = htons(ETH_P_WAN_PPP);
1683 skb_reset_mac_header(skb); 1604 skb_reset_mac_header(skb);
1684 /* skb_reset_network_header(skb); */ 1605 /* skb_reset_network_header(skb); */
1685 skb->dev = dev; 1606 skb->dev = dev;
@@ -1704,7 +1625,7 @@ static int lmc_rx (struct net_device *dev) /*fold00*/
1704 * in which care we'll try to allocate the buffer 1625 * in which care we'll try to allocate the buffer
1705 * again. (once a second) 1626 * again. (once a second)
1706 */ 1627 */
1707 sc->stats.rx_BuffAllocErr++; 1628 sc->extra_stats.rx_BuffAllocErr++;
1708 LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len); 1629 LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len);
1709 sc->failed_recv_alloc = 1; 1630 sc->failed_recv_alloc = 1;
1710 goto skip_out_of_mem; 1631 goto skip_out_of_mem;
@@ -1739,16 +1660,14 @@ static int lmc_rx (struct net_device *dev) /*fold00*/
1739 * descriptors with bogus packets 1660 * descriptors with bogus packets
1740 * 1661 *
1741 if (localLengthErrCnt > LMC_RXDESCS - 3) { 1662 if (localLengthErrCnt > LMC_RXDESCS - 3) {
1742 sc->stats.rx_BadPktSurgeCnt++; 1663 sc->extra_stats.rx_BadPktSurgeCnt++;
1743 LMC_EVENT_LOG(LMC_EVENT_BADPKTSURGE, 1664 LMC_EVENT_LOG(LMC_EVENT_BADPKTSURGE, localLengthErrCnt,
1744 localLengthErrCnt, 1665 sc->extra_stats.rx_BadPktSurgeCnt);
1745 sc->stats.rx_BadPktSurgeCnt);
1746 } */ 1666 } */
1747 1667
1748 /* save max count of receive descriptors serviced */ 1668 /* save max count of receive descriptors serviced */
1749 if (rxIntLoopCnt > sc->stats.rxIntLoopCnt) { 1669 if (rxIntLoopCnt > sc->extra_stats.rxIntLoopCnt)
1750 sc->stats.rxIntLoopCnt = rxIntLoopCnt; /* debug -baz */ 1670 sc->extra_stats.rxIntLoopCnt = rxIntLoopCnt; /* debug -baz */
1751 }
1752 1671
1753#ifdef DEBUG 1672#ifdef DEBUG
1754 if (rxIntLoopCnt == 0) 1673 if (rxIntLoopCnt == 0)
@@ -1775,23 +1694,22 @@ skip_out_of_mem:
1775 return 0; 1694 return 0;
1776} 1695}
1777 1696
1778static struct net_device_stats *lmc_get_stats (struct net_device *dev) /*fold00*/ 1697static struct net_device_stats *lmc_get_stats(struct net_device *dev)
1779{ 1698{
1780 lmc_softc_t *sc = dev->priv; 1699 lmc_softc_t *sc = dev_to_sc(dev);
1781 unsigned long flags; 1700 unsigned long flags;
1782 1701
1783 lmc_trace(dev, "lmc_get_stats in"); 1702 lmc_trace(dev, "lmc_get_stats in");
1784 1703
1785
1786 spin_lock_irqsave(&sc->lmc_lock, flags); 1704 spin_lock_irqsave(&sc->lmc_lock, flags);
1787 1705
1788 sc->stats.rx_missed_errors += LMC_CSR_READ (sc, csr_missed_frames) & 0xffff; 1706 sc->lmc_device->stats.rx_missed_errors += LMC_CSR_READ(sc, csr_missed_frames) & 0xffff;
1789 1707
1790 spin_unlock_irqrestore(&sc->lmc_lock, flags); 1708 spin_unlock_irqrestore(&sc->lmc_lock, flags);
1791 1709
1792 lmc_trace(dev, "lmc_get_stats out"); 1710 lmc_trace(dev, "lmc_get_stats out");
1793 1711
1794 return (struct net_device_stats *) &sc->stats; 1712 return &sc->lmc_device->stats;
1795} 1713}
1796 1714
1797static struct pci_driver lmc_driver = { 1715static struct pci_driver lmc_driver = {
@@ -1970,7 +1888,7 @@ static void lmc_softreset (lmc_softc_t * const sc) /*fold00*/
1970 { 1888 {
1971 if (sc->lmc_txq[i] != NULL){ /* have buffer */ 1889 if (sc->lmc_txq[i] != NULL){ /* have buffer */
1972 dev_kfree_skb(sc->lmc_txq[i]); /* free it */ 1890 dev_kfree_skb(sc->lmc_txq[i]); /* free it */
1973 sc->stats.tx_dropped++; /* We just dropped a packet */ 1891 sc->lmc_device->stats.tx_dropped++; /* We just dropped a packet */
1974 } 1892 }
1975 sc->lmc_txq[i] = NULL; 1893 sc->lmc_txq[i] = NULL;
1976 sc->lmc_txring[i].status = 0x00000000; 1894 sc->lmc_txring[i].status = 0x00000000;
@@ -1982,7 +1900,7 @@ static void lmc_softreset (lmc_softc_t * const sc) /*fold00*/
1982 lmc_trace(sc->lmc_device, "lmc_softreset out"); 1900 lmc_trace(sc->lmc_device, "lmc_softreset out");
1983} 1901}
1984 1902
1985void lmc_gpio_mkinput(lmc_softc_t * const sc, u_int32_t bits) /*fold00*/ 1903void lmc_gpio_mkinput(lmc_softc_t * const sc, u32 bits) /*fold00*/
1986{ 1904{
1987 lmc_trace(sc->lmc_device, "lmc_gpio_mkinput in"); 1905 lmc_trace(sc->lmc_device, "lmc_gpio_mkinput in");
1988 sc->lmc_gpio_io &= ~bits; 1906 sc->lmc_gpio_io &= ~bits;
@@ -1990,7 +1908,7 @@ void lmc_gpio_mkinput(lmc_softc_t * const sc, u_int32_t bits) /*fold00*/
1990 lmc_trace(sc->lmc_device, "lmc_gpio_mkinput out"); 1908 lmc_trace(sc->lmc_device, "lmc_gpio_mkinput out");
1991} 1909}
1992 1910
1993void lmc_gpio_mkoutput(lmc_softc_t * const sc, u_int32_t bits) /*fold00*/ 1911void lmc_gpio_mkoutput(lmc_softc_t * const sc, u32 bits) /*fold00*/
1994{ 1912{
1995 lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput in"); 1913 lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput in");
1996 sc->lmc_gpio_io |= bits; 1914 sc->lmc_gpio_io |= bits;
@@ -1998,7 +1916,7 @@ void lmc_gpio_mkoutput(lmc_softc_t * const sc, u_int32_t bits) /*fold00*/
1998 lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput out"); 1916 lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput out");
1999} 1917}
2000 1918
2001void lmc_led_on(lmc_softc_t * const sc, u_int32_t led) /*fold00*/ 1919void lmc_led_on(lmc_softc_t * const sc, u32 led) /*fold00*/
2002{ 1920{
2003 lmc_trace(sc->lmc_device, "lmc_led_on in"); 1921 lmc_trace(sc->lmc_device, "lmc_led_on in");
2004 if((~sc->lmc_miireg16) & led){ /* Already on! */ 1922 if((~sc->lmc_miireg16) & led){ /* Already on! */
@@ -2011,7 +1929,7 @@ void lmc_led_on(lmc_softc_t * const sc, u_int32_t led) /*fold00*/
2011 lmc_trace(sc->lmc_device, "lmc_led_on out"); 1929 lmc_trace(sc->lmc_device, "lmc_led_on out");
2012} 1930}
2013 1931
2014void lmc_led_off(lmc_softc_t * const sc, u_int32_t led) /*fold00*/ 1932void lmc_led_off(lmc_softc_t * const sc, u32 led) /*fold00*/
2015{ 1933{
2016 lmc_trace(sc->lmc_device, "lmc_led_off in"); 1934 lmc_trace(sc->lmc_device, "lmc_led_off in");
2017 if(sc->lmc_miireg16 & led){ /* Already set don't do anything */ 1935 if(sc->lmc_miireg16 & led){ /* Already set don't do anything */
@@ -2061,13 +1979,13 @@ static void lmc_reset(lmc_softc_t * const sc) /*fold00*/
2061 */ 1979 */
2062 sc->lmc_media->init(sc); 1980 sc->lmc_media->init(sc);
2063 1981
2064 sc->stats.resetCount++; 1982 sc->extra_stats.resetCount++;
2065 lmc_trace(sc->lmc_device, "lmc_reset out"); 1983 lmc_trace(sc->lmc_device, "lmc_reset out");
2066} 1984}
2067 1985
2068static void lmc_dec_reset(lmc_softc_t * const sc) /*fold00*/ 1986static void lmc_dec_reset(lmc_softc_t * const sc) /*fold00*/
2069{ 1987{
2070 u_int32_t val; 1988 u32 val;
2071 lmc_trace(sc->lmc_device, "lmc_dec_reset in"); 1989 lmc_trace(sc->lmc_device, "lmc_dec_reset in");
2072 1990
2073 /* 1991 /*
@@ -2151,23 +2069,21 @@ static void lmc_initcsrs(lmc_softc_t * const sc, lmc_csrptr_t csr_base, /*fold00
2151 lmc_trace(sc->lmc_device, "lmc_initcsrs out"); 2069 lmc_trace(sc->lmc_device, "lmc_initcsrs out");
2152} 2070}
2153 2071
2154static void lmc_driver_timeout(struct net_device *dev) { /*fold00*/ 2072static void lmc_driver_timeout(struct net_device *dev)
2155 lmc_softc_t *sc; 2073{
2074 lmc_softc_t *sc = dev_to_sc(dev);
2156 u32 csr6; 2075 u32 csr6;
2157 unsigned long flags; 2076 unsigned long flags;
2158 2077
2159 lmc_trace(dev, "lmc_driver_timeout in"); 2078 lmc_trace(dev, "lmc_driver_timeout in");
2160 2079
2161 sc = dev->priv;
2162
2163 spin_lock_irqsave(&sc->lmc_lock, flags); 2080 spin_lock_irqsave(&sc->lmc_lock, flags);
2164 2081
2165 printk("%s: Xmitter busy|\n", dev->name); 2082 printk("%s: Xmitter busy|\n", dev->name);
2166 2083
2167 sc->stats.tx_tbusy_calls++ ; 2084 sc->extra_stats.tx_tbusy_calls++;
2168 if (jiffies - dev->trans_start < TX_TIMEOUT) { 2085 if (jiffies - dev->trans_start < TX_TIMEOUT)
2169 goto bug_out; 2086 goto bug_out;
2170 }
2171 2087
2172 /* 2088 /*
2173 * Chip seems to have locked up 2089 * Chip seems to have locked up
@@ -2178,7 +2094,7 @@ static void lmc_driver_timeout(struct net_device *dev) { /*fold00*/
2178 2094
2179 LMC_EVENT_LOG(LMC_EVENT_XMTPRCTMO, 2095 LMC_EVENT_LOG(LMC_EVENT_XMTPRCTMO,
2180 LMC_CSR_READ (sc, csr_status), 2096 LMC_CSR_READ (sc, csr_status),
2181 sc->stats.tx_ProcTimeout); 2097 sc->extra_stats.tx_ProcTimeout);
2182 2098
2183 lmc_running_reset (dev); 2099 lmc_running_reset (dev);
2184 2100
@@ -2195,8 +2111,8 @@ static void lmc_driver_timeout(struct net_device *dev) { /*fold00*/
2195 /* immediate transmit */ 2111 /* immediate transmit */
2196 LMC_CSR_WRITE (sc, csr_txpoll, 0); 2112 LMC_CSR_WRITE (sc, csr_txpoll, 0);
2197 2113
2198 sc->stats.tx_errors++; 2114 sc->lmc_device->stats.tx_errors++;
2199 sc->stats.tx_ProcTimeout++; /* -baz */ 2115 sc->extra_stats.tx_ProcTimeout++; /* -baz */
2200 2116
2201 dev->trans_start = jiffies; 2117 dev->trans_start = jiffies;
2202 2118
diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
index 8aa461c941ce..f327674fc93a 100644
--- a/drivers/net/wan/lmc/lmc_media.c
+++ b/drivers/net/wan/lmc/lmc_media.c
@@ -16,8 +16,6 @@
16#include <linux/inet.h> 16#include <linux/inet.h>
17#include <linux/bitops.h> 17#include <linux/bitops.h>
18 18
19#include <net/syncppp.h>
20
21#include <asm/processor.h> /* Processor type for cache alignment. */ 19#include <asm/processor.h> /* Processor type for cache alignment. */
22#include <asm/io.h> 20#include <asm/io.h>
23#include <asm/dma.h> 21#include <asm/dma.h>
@@ -95,8 +93,7 @@ static void lmc_dummy_set_1 (lmc_softc_t * const, int);
95static void lmc_dummy_set2_1 (lmc_softc_t * const, lmc_ctl_t *); 93static void lmc_dummy_set2_1 (lmc_softc_t * const, lmc_ctl_t *);
96 94
97static inline void write_av9110_bit (lmc_softc_t *, int); 95static inline void write_av9110_bit (lmc_softc_t *, int);
98static void write_av9110 (lmc_softc_t *, u_int32_t, u_int32_t, u_int32_t, 96static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32);
99 u_int32_t, u_int32_t);
100 97
101lmc_media_t lmc_ds3_media = { 98lmc_media_t lmc_ds3_media = {
102 lmc_ds3_init, /* special media init stuff */ 99 lmc_ds3_init, /* special media init stuff */
@@ -427,7 +424,7 @@ lmc_ds3_set_scram (lmc_softc_t * const sc, int ie)
427static int 424static int
428lmc_ds3_get_link_status (lmc_softc_t * const sc) 425lmc_ds3_get_link_status (lmc_softc_t * const sc)
429{ 426{
430 u_int16_t link_status, link_status_11; 427 u16 link_status, link_status_11;
431 int ret = 1; 428 int ret = 1;
432 429
433 lmc_mii_writereg (sc, 0, 17, 7); 430 lmc_mii_writereg (sc, 0, 17, 7);
@@ -449,7 +446,7 @@ lmc_ds3_get_link_status (lmc_softc_t * const sc)
449 (link_status & LMC_FRAMER_REG0_OOFS)){ 446 (link_status & LMC_FRAMER_REG0_OOFS)){
450 ret = 0; 447 ret = 0;
451 if(sc->last_led_err[3] != 1){ 448 if(sc->last_led_err[3] != 1){
452 u16 r1; 449 u16 r1;
453 lmc_mii_writereg (sc, 0, 17, 01); /* Turn on Xbit error as our cisco does */ 450 lmc_mii_writereg (sc, 0, 17, 01); /* Turn on Xbit error as our cisco does */
454 r1 = lmc_mii_readreg (sc, 0, 18); 451 r1 = lmc_mii_readreg (sc, 0, 18);
455 r1 &= 0xfe; 452 r1 &= 0xfe;
@@ -462,7 +459,7 @@ lmc_ds3_get_link_status (lmc_softc_t * const sc)
462 else { 459 else {
463 lmc_led_off(sc, LMC_DS3_LED3); /* turn on red LED */ 460 lmc_led_off(sc, LMC_DS3_LED3); /* turn on red LED */
464 if(sc->last_led_err[3] == 1){ 461 if(sc->last_led_err[3] == 1){
465 u16 r1; 462 u16 r1;
466 lmc_mii_writereg (sc, 0, 17, 01); /* Turn off Xbit error */ 463 lmc_mii_writereg (sc, 0, 17, 01); /* Turn off Xbit error */
467 r1 = lmc_mii_readreg (sc, 0, 18); 464 r1 = lmc_mii_readreg (sc, 0, 18);
468 r1 |= 0x01; 465 r1 |= 0x01;
@@ -540,20 +537,19 @@ lmc_ds3_watchdog (lmc_softc_t * const sc)
540 * SSI methods 537 * SSI methods
541 */ 538 */
542 539
543static void 540static void lmc_ssi_init(lmc_softc_t * const sc)
544lmc_ssi_init (lmc_softc_t * const sc)
545{ 541{
546 u_int16_t mii17; 542 u16 mii17;
547 int cable; 543 int cable;
548 544
549 sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1000; 545 sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1000;
550 546
551 mii17 = lmc_mii_readreg (sc, 0, 17); 547 mii17 = lmc_mii_readreg(sc, 0, 17);
552 548
553 cable = (mii17 & LMC_MII17_SSI_CABLE_MASK) >> LMC_MII17_SSI_CABLE_SHIFT; 549 cable = (mii17 & LMC_MII17_SSI_CABLE_MASK) >> LMC_MII17_SSI_CABLE_SHIFT;
554 sc->ictl.cable_type = cable; 550 sc->ictl.cable_type = cable;
555 551
556 lmc_gpio_mkoutput (sc, LMC_GEP_SSI_TXCLOCK); 552 lmc_gpio_mkoutput(sc, LMC_GEP_SSI_TXCLOCK);
557} 553}
558 554
559static void 555static void
@@ -681,11 +677,11 @@ lmc_ssi_set_speed (lmc_softc_t * const sc, lmc_ctl_t * ctl)
681static int 677static int
682lmc_ssi_get_link_status (lmc_softc_t * const sc) 678lmc_ssi_get_link_status (lmc_softc_t * const sc)
683{ 679{
684 u_int16_t link_status; 680 u16 link_status;
685 u_int32_t ticks; 681 u32 ticks;
686 int ret = 1; 682 int ret = 1;
687 int hw_hdsk = 1; 683 int hw_hdsk = 1;
688 684
689 /* 685 /*
690 * missing CTS? Hmm. If we require CTS on, we may never get the 686 * missing CTS? Hmm. If we require CTS on, we may never get the
691 * link to come up, so omit it in this test. 687 * link to come up, so omit it in this test.
@@ -720,9 +716,9 @@ lmc_ssi_get_link_status (lmc_softc_t * const sc)
720 } 716 }
721 else if (ticks == 0 ) { /* no clock found ? */ 717 else if (ticks == 0 ) { /* no clock found ? */
722 ret = 0; 718 ret = 0;
723 if(sc->last_led_err[3] != 1){ 719 if (sc->last_led_err[3] != 1) {
724 sc->stats.tx_lossOfClockCnt++; 720 sc->extra_stats.tx_lossOfClockCnt++;
725 printk(KERN_WARNING "%s: Lost Clock, Link Down\n", sc->name); 721 printk(KERN_WARNING "%s: Lost Clock, Link Down\n", sc->name);
726 } 722 }
727 sc->last_led_err[3] = 1; 723 sc->last_led_err[3] = 1;
728 lmc_led_on (sc, LMC_MII16_LED3); /* turn ON red LED */ 724 lmc_led_on (sc, LMC_MII16_LED3); /* turn ON red LED */
@@ -838,9 +834,7 @@ write_av9110_bit (lmc_softc_t * sc, int c)
838 LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); 834 LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
839} 835}
840 836
841static void 837static void write_av9110(lmc_softc_t *sc, u32 n, u32 m, u32 v, u32 x, u32 r)
842write_av9110 (lmc_softc_t * sc, u_int32_t n, u_int32_t m, u_int32_t v,
843 u_int32_t x, u_int32_t r)
844{ 838{
845 int i; 839 int i;
846 840
@@ -887,19 +881,13 @@ write_av9110 (lmc_softc_t * sc, u_int32_t n, u_int32_t m, u_int32_t v,
887 | LMC_GEP_SSI_GENERATOR)); 881 | LMC_GEP_SSI_GENERATOR));
888} 882}
889 883
890static void 884static void lmc_ssi_watchdog(lmc_softc_t * const sc)
891lmc_ssi_watchdog (lmc_softc_t * const sc)
892{ 885{
893 u_int16_t mii17 = lmc_mii_readreg (sc, 0, 17); 886 u16 mii17 = lmc_mii_readreg(sc, 0, 17);
894 if (((mii17 >> 3) & 7) == 7) 887 if (((mii17 >> 3) & 7) == 7)
895 { 888 lmc_led_off(sc, LMC_MII16_LED2);
896 lmc_led_off (sc, LMC_MII16_LED2); 889 else
897 } 890 lmc_led_on(sc, LMC_MII16_LED2);
898 else
899 {
900 lmc_led_on (sc, LMC_MII16_LED2);
901 }
902
903} 891}
904 892
905/* 893/*
@@ -929,7 +917,7 @@ lmc_t1_read (lmc_softc_t * const sc, int a)
929static void 917static void
930lmc_t1_init (lmc_softc_t * const sc) 918lmc_t1_init (lmc_softc_t * const sc)
931{ 919{
932 u_int16_t mii16; 920 u16 mii16;
933 int i; 921 int i;
934 922
935 sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1200; 923 sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1200;
@@ -1028,7 +1016,7 @@ lmc_t1_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl)
1028 */ static int 1016 */ static int
1029lmc_t1_get_link_status (lmc_softc_t * const sc) 1017lmc_t1_get_link_status (lmc_softc_t * const sc)
1030{ 1018{
1031 u_int16_t link_status; 1019 u16 link_status;
1032 int ret = 1; 1020 int ret = 1;
1033 1021
1034 /* LMC5245 (DS3) & LMC1200 (DS1) LED definitions 1022 /* LMC5245 (DS3) & LMC1200 (DS1) LED definitions
diff --git a/drivers/net/wan/lmc/lmc_proto.c b/drivers/net/wan/lmc/lmc_proto.c
index 85315758198d..be9877ff551e 100644
--- a/drivers/net/wan/lmc/lmc_proto.c
+++ b/drivers/net/wan/lmc/lmc_proto.c
@@ -36,9 +36,6 @@
36#include <linux/workqueue.h> 36#include <linux/workqueue.h>
37#include <linux/proc_fs.h> 37#include <linux/proc_fs.h>
38#include <linux/bitops.h> 38#include <linux/bitops.h>
39
40#include <net/syncppp.h>
41
42#include <asm/processor.h> /* Processor type for cache alignment. */ 39#include <asm/processor.h> /* Processor type for cache alignment. */
43#include <asm/io.h> 40#include <asm/io.h>
44#include <asm/dma.h> 41#include <asm/dma.h>
@@ -50,48 +47,6 @@
50#include "lmc_ioctl.h" 47#include "lmc_ioctl.h"
51#include "lmc_proto.h" 48#include "lmc_proto.h"
52 49
53/*
54 * The compile-time variable SPPPSTUP causes the module to be
55 * compiled without referencing any of the sync ppp routines.
56 */
57#ifdef SPPPSTUB
58#define SPPP_detach(d) (void)0
59#define SPPP_open(d) 0
60#define SPPP_reopen(d) (void)0
61#define SPPP_close(d) (void)0
62#define SPPP_attach(d) (void)0
63#define SPPP_do_ioctl(d,i,c) -EOPNOTSUPP
64#else
65#define SPPP_attach(x) sppp_attach((x)->pd)
66#define SPPP_detach(x) sppp_detach((x)->pd->dev)
67#define SPPP_open(x) sppp_open((x)->pd->dev)
68#define SPPP_reopen(x) sppp_reopen((x)->pd->dev)
69#define SPPP_close(x) sppp_close((x)->pd->dev)
70#define SPPP_do_ioctl(x, y, z) sppp_do_ioctl((x)->pd->dev, (y), (z))
71#endif
72
73// init
74void lmc_proto_init(lmc_softc_t *sc) /*FOLD00*/
75{
76 lmc_trace(sc->lmc_device, "lmc_proto_init in");
77 switch(sc->if_type){
78 case LMC_PPP:
79 sc->pd = kmalloc(sizeof(struct ppp_device), GFP_KERNEL);
80 if (!sc->pd) {
81 printk("lmc_proto_init(): kmalloc failure!\n");
82 return;
83 }
84 sc->pd->dev = sc->lmc_device;
85 sc->if_ptr = sc->pd;
86 break;
87 case LMC_RAW:
88 break;
89 default:
90 break;
91 }
92 lmc_trace(sc->lmc_device, "lmc_proto_init out");
93}
94
95// attach 50// attach
96void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/ 51void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/
97{ 52{
@@ -100,7 +55,6 @@ void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/
100 case LMC_PPP: 55 case LMC_PPP:
101 { 56 {
102 struct net_device *dev = sc->lmc_device; 57 struct net_device *dev = sc->lmc_device;
103 SPPP_attach(sc);
104 dev->do_ioctl = lmc_ioctl; 58 dev->do_ioctl = lmc_ioctl;
105 } 59 }
106 break; 60 break;
@@ -108,7 +62,7 @@ void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/
108 { 62 {
109 struct net_device *dev = sc->lmc_device; 63 struct net_device *dev = sc->lmc_device;
110 /* 64 /*
111 * They set a few basics because they don't use sync_ppp 65 * They set a few basics because they don't use HDLC
112 */ 66 */
113 dev->flags |= IFF_POINTOPOINT; 67 dev->flags |= IFF_POINTOPOINT;
114 68
@@ -124,88 +78,39 @@ void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/
124 lmc_trace(sc->lmc_device, "lmc_proto_attach out"); 78 lmc_trace(sc->lmc_device, "lmc_proto_attach out");
125} 79}
126 80
127// detach 81int lmc_proto_ioctl(lmc_softc_t *sc, struct ifreq *ifr, int cmd)
128void lmc_proto_detach(lmc_softc_t *sc) /*FOLD00*/
129{ 82{
130 switch(sc->if_type){ 83 lmc_trace(sc->lmc_device, "lmc_proto_ioctl");
131 case LMC_PPP: 84 if (sc->if_type == LMC_PPP)
132 SPPP_detach(sc); 85 return hdlc_ioctl(sc->lmc_device, ifr, cmd);
133 break; 86 return -EOPNOTSUPP;
134 case LMC_RAW: /* Tell someone we're detaching? */
135 break;
136 default:
137 break;
138 }
139
140} 87}
141 88
142// reopen 89int lmc_proto_open(lmc_softc_t *sc)
143void lmc_proto_reopen(lmc_softc_t *sc) /*FOLD00*/
144{ 90{
145 lmc_trace(sc->lmc_device, "lmc_proto_reopen in"); 91 int ret = 0;
146 switch(sc->if_type){
147 case LMC_PPP:
148 SPPP_reopen(sc);
149 break;
150 case LMC_RAW: /* Reset the interface after being down, prerape to receive packets again */
151 break;
152 default:
153 break;
154 }
155 lmc_trace(sc->lmc_device, "lmc_proto_reopen out");
156}
157 92
93 lmc_trace(sc->lmc_device, "lmc_proto_open in");
158 94
159// ioctl 95 if (sc->if_type == LMC_PPP) {
160int lmc_proto_ioctl(lmc_softc_t *sc, struct ifreq *ifr, int cmd) /*FOLD00*/ 96 ret = hdlc_open(sc->lmc_device);
161{ 97 if (ret < 0)
162 lmc_trace(sc->lmc_device, "lmc_proto_ioctl out"); 98 printk(KERN_WARNING "%s: HDLC open failed: %d\n",
163 switch(sc->if_type){ 99 sc->name, ret);
164 case LMC_PPP: 100 }
165 return SPPP_do_ioctl (sc, ifr, cmd); 101
166 break; 102 lmc_trace(sc->lmc_device, "lmc_proto_open out");
167 default: 103 return ret;
168 return -EOPNOTSUPP;
169 break;
170 }
171 lmc_trace(sc->lmc_device, "lmc_proto_ioctl out");
172} 104}
173 105
174// open 106void lmc_proto_close(lmc_softc_t *sc)
175void lmc_proto_open(lmc_softc_t *sc) /*FOLD00*/
176{ 107{
177 int ret; 108 lmc_trace(sc->lmc_device, "lmc_proto_close in");
178 109
179 lmc_trace(sc->lmc_device, "lmc_proto_open in"); 110 if (sc->if_type == LMC_PPP)
180 switch(sc->if_type){ 111 hdlc_close(sc->lmc_device);
181 case LMC_PPP:
182 ret = SPPP_open(sc);
183 if(ret < 0)
184 printk("%s: syncPPP open failed: %d\n", sc->name, ret);
185 break;
186 case LMC_RAW: /* We're about to start getting packets! */
187 break;
188 default:
189 break;
190 }
191 lmc_trace(sc->lmc_device, "lmc_proto_open out");
192}
193
194// close
195 112
196void lmc_proto_close(lmc_softc_t *sc) /*FOLD00*/ 113 lmc_trace(sc->lmc_device, "lmc_proto_close out");
197{
198 lmc_trace(sc->lmc_device, "lmc_proto_close in");
199 switch(sc->if_type){
200 case LMC_PPP:
201 SPPP_close(sc);
202 break;
203 case LMC_RAW: /* Interface going down */
204 break;
205 default:
206 break;
207 }
208 lmc_trace(sc->lmc_device, "lmc_proto_close out");
209} 114}
210 115
211__be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/ 116__be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/
@@ -213,8 +118,8 @@ __be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/
213 lmc_trace(sc->lmc_device, "lmc_proto_type in"); 118 lmc_trace(sc->lmc_device, "lmc_proto_type in");
214 switch(sc->if_type){ 119 switch(sc->if_type){
215 case LMC_PPP: 120 case LMC_PPP:
216 return htons(ETH_P_WAN_PPP); 121 return hdlc_type_trans(skb, sc->lmc_device);
217 break; 122 break;
218 case LMC_NET: 123 case LMC_NET:
219 return htons(ETH_P_802_2); 124 return htons(ETH_P_802_2);
220 break; 125 break;
@@ -245,4 +150,3 @@ void lmc_proto_netif(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/
245 } 150 }
246 lmc_trace(sc->lmc_device, "lmc_proto_netif out"); 151 lmc_trace(sc->lmc_device, "lmc_proto_netif out");
247} 152}
248
diff --git a/drivers/net/wan/lmc/lmc_proto.h b/drivers/net/wan/lmc/lmc_proto.h
index ccaa69e8b3c7..662148c54644 100644
--- a/drivers/net/wan/lmc/lmc_proto.h
+++ b/drivers/net/wan/lmc/lmc_proto.h
@@ -1,16 +1,18 @@
1#ifndef _LMC_PROTO_H_ 1#ifndef _LMC_PROTO_H_
2#define _LMC_PROTO_H_ 2#define _LMC_PROTO_H_
3 3
4void lmc_proto_init(lmc_softc_t *sc); 4#include <linux/hdlc.h>
5
5void lmc_proto_attach(lmc_softc_t *sc); 6void lmc_proto_attach(lmc_softc_t *sc);
6void lmc_proto_detach(lmc_softc_t *sc);
7void lmc_proto_reopen(lmc_softc_t *sc);
8int lmc_proto_ioctl(lmc_softc_t *sc, struct ifreq *ifr, int cmd); 7int lmc_proto_ioctl(lmc_softc_t *sc, struct ifreq *ifr, int cmd);
9void lmc_proto_open(lmc_softc_t *sc); 8int lmc_proto_open(lmc_softc_t *sc);
10void lmc_proto_close(lmc_softc_t *sc); 9void lmc_proto_close(lmc_softc_t *sc);
11__be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb); 10__be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb);
12void lmc_proto_netif(lmc_softc_t *sc, struct sk_buff *skb); 11void lmc_proto_netif(lmc_softc_t *sc, struct sk_buff *skb);
13int lmc_skb_rawpackets(char *buf, char **start, off_t offset, int len, int unused);
14 12
15#endif 13static inline lmc_softc_t* dev_to_sc(struct net_device *dev)
14{
15 return (lmc_softc_t *)dev_to_hdlc(dev)->priv;
16}
16 17
18#endif
diff --git a/drivers/net/wan/lmc/lmc_var.h b/drivers/net/wan/lmc/lmc_var.h
index 6d003a39bfad..65d01978e784 100644
--- a/drivers/net/wan/lmc/lmc_var.h
+++ b/drivers/net/wan/lmc/lmc_var.h
@@ -1,8 +1,6 @@
1#ifndef _LMC_VAR_H_ 1#ifndef _LMC_VAR_H_
2#define _LMC_VAR_H_ 2#define _LMC_VAR_H_
3 3
4/* $Id: lmc_var.h,v 1.17 2000/04/06 12:16:47 asj Exp $ */
5
6 /* 4 /*
7 * Copyright (c) 1997-2000 LAN Media Corporation (LMC) 5 * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
8 * All rights reserved. www.lanmedia.com 6 * All rights reserved. www.lanmedia.com
@@ -19,23 +17,6 @@
19 17
20#include <linux/timer.h> 18#include <linux/timer.h>
21 19
22#ifndef __KERNEL__
23typedef signed char s8;
24typedef unsigned char u8;
25
26typedef signed short s16;
27typedef unsigned short u16;
28
29typedef signed int s32;
30typedef unsigned int u32;
31
32typedef signed long long s64;
33typedef unsigned long long u64;
34
35#define BITS_PER_LONG 32
36
37#endif
38
39/* 20/*
40 * basic definitions used in lmc include files 21 * basic definitions used in lmc include files
41 */ 22 */
@@ -45,9 +26,6 @@ typedef struct lmc___media lmc_media_t;
45typedef struct lmc___ctl lmc_ctl_t; 26typedef struct lmc___ctl lmc_ctl_t;
46 27
47#define lmc_csrptr_t unsigned long 28#define lmc_csrptr_t unsigned long
48#define u_int16_t u16
49#define u_int8_t u8
50#define tulip_uint32_t u32
51 29
52#define LMC_REG_RANGE 0x80 30#define LMC_REG_RANGE 0x80
53 31
@@ -122,45 +100,45 @@ struct lmc_regfile_t {
122 * used to define bits in the second tulip_desc_t field (length) 100 * used to define bits in the second tulip_desc_t field (length)
123 * for the transmit descriptor -baz */ 101 * for the transmit descriptor -baz */
124 102
125#define LMC_TDES_FIRST_BUFFER_SIZE ((u_int32_t)(0x000007FF)) 103#define LMC_TDES_FIRST_BUFFER_SIZE ((u32)(0x000007FF))
126#define LMC_TDES_SECOND_BUFFER_SIZE ((u_int32_t)(0x003FF800)) 104#define LMC_TDES_SECOND_BUFFER_SIZE ((u32)(0x003FF800))
127#define LMC_TDES_HASH_FILTERING ((u_int32_t)(0x00400000)) 105#define LMC_TDES_HASH_FILTERING ((u32)(0x00400000))
128#define LMC_TDES_DISABLE_PADDING ((u_int32_t)(0x00800000)) 106#define LMC_TDES_DISABLE_PADDING ((u32)(0x00800000))
129#define LMC_TDES_SECOND_ADDR_CHAINED ((u_int32_t)(0x01000000)) 107#define LMC_TDES_SECOND_ADDR_CHAINED ((u32)(0x01000000))
130#define LMC_TDES_END_OF_RING ((u_int32_t)(0x02000000)) 108#define LMC_TDES_END_OF_RING ((u32)(0x02000000))
131#define LMC_TDES_ADD_CRC_DISABLE ((u_int32_t)(0x04000000)) 109#define LMC_TDES_ADD_CRC_DISABLE ((u32)(0x04000000))
132#define LMC_TDES_SETUP_PACKET ((u_int32_t)(0x08000000)) 110#define LMC_TDES_SETUP_PACKET ((u32)(0x08000000))
133#define LMC_TDES_INVERSE_FILTERING ((u_int32_t)(0x10000000)) 111#define LMC_TDES_INVERSE_FILTERING ((u32)(0x10000000))
134#define LMC_TDES_FIRST_SEGMENT ((u_int32_t)(0x20000000)) 112#define LMC_TDES_FIRST_SEGMENT ((u32)(0x20000000))
135#define LMC_TDES_LAST_SEGMENT ((u_int32_t)(0x40000000)) 113#define LMC_TDES_LAST_SEGMENT ((u32)(0x40000000))
136#define LMC_TDES_INTERRUPT_ON_COMPLETION ((u_int32_t)(0x80000000)) 114#define LMC_TDES_INTERRUPT_ON_COMPLETION ((u32)(0x80000000))
137 115
138#define TDES_SECOND_BUFFER_SIZE_BIT_NUMBER 11 116#define TDES_SECOND_BUFFER_SIZE_BIT_NUMBER 11
139#define TDES_COLLISION_COUNT_BIT_NUMBER 3 117#define TDES_COLLISION_COUNT_BIT_NUMBER 3
140 118
141/* Constants for the RCV descriptor RDES */ 119/* Constants for the RCV descriptor RDES */
142 120
143#define LMC_RDES_OVERFLOW ((u_int32_t)(0x00000001)) 121#define LMC_RDES_OVERFLOW ((u32)(0x00000001))
144#define LMC_RDES_CRC_ERROR ((u_int32_t)(0x00000002)) 122#define LMC_RDES_CRC_ERROR ((u32)(0x00000002))
145#define LMC_RDES_DRIBBLING_BIT ((u_int32_t)(0x00000004)) 123#define LMC_RDES_DRIBBLING_BIT ((u32)(0x00000004))
146#define LMC_RDES_REPORT_ON_MII_ERR ((u_int32_t)(0x00000008)) 124#define LMC_RDES_REPORT_ON_MII_ERR ((u32)(0x00000008))
147#define LMC_RDES_RCV_WATCHDOG_TIMEOUT ((u_int32_t)(0x00000010)) 125#define LMC_RDES_RCV_WATCHDOG_TIMEOUT ((u32)(0x00000010))
148#define LMC_RDES_FRAME_TYPE ((u_int32_t)(0x00000020)) 126#define LMC_RDES_FRAME_TYPE ((u32)(0x00000020))
149#define LMC_RDES_COLLISION_SEEN ((u_int32_t)(0x00000040)) 127#define LMC_RDES_COLLISION_SEEN ((u32)(0x00000040))
150#define LMC_RDES_FRAME_TOO_LONG ((u_int32_t)(0x00000080)) 128#define LMC_RDES_FRAME_TOO_LONG ((u32)(0x00000080))
151#define LMC_RDES_LAST_DESCRIPTOR ((u_int32_t)(0x00000100)) 129#define LMC_RDES_LAST_DESCRIPTOR ((u32)(0x00000100))
152#define LMC_RDES_FIRST_DESCRIPTOR ((u_int32_t)(0x00000200)) 130#define LMC_RDES_FIRST_DESCRIPTOR ((u32)(0x00000200))
153#define LMC_RDES_MULTICAST_FRAME ((u_int32_t)(0x00000400)) 131#define LMC_RDES_MULTICAST_FRAME ((u32)(0x00000400))
154#define LMC_RDES_RUNT_FRAME ((u_int32_t)(0x00000800)) 132#define LMC_RDES_RUNT_FRAME ((u32)(0x00000800))
155#define LMC_RDES_DATA_TYPE ((u_int32_t)(0x00003000)) 133#define LMC_RDES_DATA_TYPE ((u32)(0x00003000))
156#define LMC_RDES_LENGTH_ERROR ((u_int32_t)(0x00004000)) 134#define LMC_RDES_LENGTH_ERROR ((u32)(0x00004000))
157#define LMC_RDES_ERROR_SUMMARY ((u_int32_t)(0x00008000)) 135#define LMC_RDES_ERROR_SUMMARY ((u32)(0x00008000))
158#define LMC_RDES_FRAME_LENGTH ((u_int32_t)(0x3FFF0000)) 136#define LMC_RDES_FRAME_LENGTH ((u32)(0x3FFF0000))
159#define LMC_RDES_OWN_BIT ((u_int32_t)(0x80000000)) 137#define LMC_RDES_OWN_BIT ((u32)(0x80000000))
160 138
161#define RDES_FRAME_LENGTH_BIT_NUMBER 16 139#define RDES_FRAME_LENGTH_BIT_NUMBER 16
162 140
163#define LMC_RDES_ERROR_MASK ( (u_int32_t)( \ 141#define LMC_RDES_ERROR_MASK ( (u32)( \
164 LMC_RDES_OVERFLOW \ 142 LMC_RDES_OVERFLOW \
165 | LMC_RDES_DRIBBLING_BIT \ 143 | LMC_RDES_DRIBBLING_BIT \
166 | LMC_RDES_REPORT_ON_MII_ERR \ 144 | LMC_RDES_REPORT_ON_MII_ERR \
@@ -172,32 +150,32 @@ struct lmc_regfile_t {
172 */ 150 */
173 151
174typedef struct { 152typedef struct {
175 u_int32_t n; 153 u32 n;
176 u_int32_t m; 154 u32 m;
177 u_int32_t v; 155 u32 v;
178 u_int32_t x; 156 u32 x;
179 u_int32_t r; 157 u32 r;
180 u_int32_t f; 158 u32 f;
181 u_int32_t exact; 159 u32 exact;
182} lmc_av9110_t; 160} lmc_av9110_t;
183 161
184/* 162/*
185 * Common structure passed to the ioctl code. 163 * Common structure passed to the ioctl code.
186 */ 164 */
187struct lmc___ctl { 165struct lmc___ctl {
188 u_int32_t cardtype; 166 u32 cardtype;
189 u_int32_t clock_source; /* HSSI, T1 */ 167 u32 clock_source; /* HSSI, T1 */
190 u_int32_t clock_rate; /* T1 */ 168 u32 clock_rate; /* T1 */
191 u_int32_t crc_length; 169 u32 crc_length;
192 u_int32_t cable_length; /* DS3 */ 170 u32 cable_length; /* DS3 */
193 u_int32_t scrambler_onoff; /* DS3 */ 171 u32 scrambler_onoff; /* DS3 */
194 u_int32_t cable_type; /* T1 */ 172 u32 cable_type; /* T1 */
195 u_int32_t keepalive_onoff; /* protocol */ 173 u32 keepalive_onoff; /* protocol */
196 u_int32_t ticks; /* ticks/sec */ 174 u32 ticks; /* ticks/sec */
197 union { 175 union {
198 lmc_av9110_t ssi; 176 lmc_av9110_t ssi;
199 } cardspec; 177 } cardspec;
200 u_int32_t circuit_type; /* T1 or E1 */ 178 u32 circuit_type; /* T1 or E1 */
201}; 179};
202 180
203 181
@@ -244,108 +222,69 @@ struct lmc___media {
244 222
245#define STATCHECK 0xBEEFCAFE 223#define STATCHECK 0xBEEFCAFE
246 224
247/* Included in this structure are first 225struct lmc_extra_statistics
248 * - standard net_device_stats
249 * - some other counters used for debug and driver performance
250 * evaluation -baz
251 */
252struct lmc_statistics
253{ 226{
254 unsigned long rx_packets; /* total packets received */ 227 u32 version_size;
255 unsigned long tx_packets; /* total packets transmitted */ 228 u32 lmc_cardtype;
256 unsigned long rx_bytes; 229
257 unsigned long tx_bytes; 230 u32 tx_ProcTimeout;
258 231 u32 tx_IntTimeout;
259 unsigned long rx_errors; /* bad packets received */ 232 u32 tx_NoCompleteCnt;
260 unsigned long tx_errors; /* packet transmit problems */ 233 u32 tx_MaxXmtsB4Int;
261 unsigned long rx_dropped; /* no space in linux buffers */ 234 u32 tx_TimeoutCnt;
262 unsigned long tx_dropped; /* no space available in linux */ 235 u32 tx_OutOfSyncPtr;
263 unsigned long multicast; /* multicast packets received */ 236 u32 tx_tbusy0;
264 unsigned long collisions; 237 u32 tx_tbusy1;
265 238 u32 tx_tbusy_calls;
266 /* detailed rx_errors: */ 239 u32 resetCount;
267 unsigned long rx_length_errors; 240 u32 lmc_txfull;
268 unsigned long rx_over_errors; /* receiver ring buff overflow */ 241 u32 tbusy;
269 unsigned long rx_crc_errors; /* recved pkt with crc error */ 242 u32 dirtyTx;
270 unsigned long rx_frame_errors; /* recv'd frame alignment error */ 243 u32 lmc_next_tx;
271 unsigned long rx_fifo_errors; /* recv'r fifo overrun */ 244 u32 otherTypeCnt;
272 unsigned long rx_missed_errors; /* receiver missed packet */ 245 u32 lastType;
273 246 u32 lastTypeOK;
274 /* detailed tx_errors */ 247 u32 txLoopCnt;
275 unsigned long tx_aborted_errors; 248 u32 usedXmtDescripCnt;
276 unsigned long tx_carrier_errors; 249 u32 txIndexCnt;
277 unsigned long tx_fifo_errors; 250 u32 rxIntLoopCnt;
278 unsigned long tx_heartbeat_errors; 251
279 unsigned long tx_window_errors; 252 u32 rx_SmallPktCnt;
280 253 u32 rx_BadPktSurgeCnt;
281 /* for cslip etc */ 254 u32 rx_BuffAllocErr;
282 unsigned long rx_compressed; 255 u32 tx_lossOfClockCnt;
283 unsigned long tx_compressed; 256
284 257 /* T1 error counters */
285 /* ------------------------------------- 258 u32 framingBitErrorCount;
286 * Custom stats & counters follow -baz */ 259 u32 lineCodeViolationCount;
287 u_int32_t version_size; 260
288 u_int32_t lmc_cardtype; 261 u32 lossOfFrameCount;
289 262 u32 changeOfFrameAlignmentCount;
290 u_int32_t tx_ProcTimeout; 263 u32 severelyErroredFrameCount;
291 u_int32_t tx_IntTimeout; 264
292 u_int32_t tx_NoCompleteCnt; 265 u32 check;
293 u_int32_t tx_MaxXmtsB4Int;
294 u_int32_t tx_TimeoutCnt;
295 u_int32_t tx_OutOfSyncPtr;
296 u_int32_t tx_tbusy0;
297 u_int32_t tx_tbusy1;
298 u_int32_t tx_tbusy_calls;
299 u_int32_t resetCount;
300 u_int32_t lmc_txfull;
301 u_int32_t tbusy;
302 u_int32_t dirtyTx;
303 u_int32_t lmc_next_tx;
304 u_int32_t otherTypeCnt;
305 u_int32_t lastType;
306 u_int32_t lastTypeOK;
307 u_int32_t txLoopCnt;
308 u_int32_t usedXmtDescripCnt;
309 u_int32_t txIndexCnt;
310 u_int32_t rxIntLoopCnt;
311
312 u_int32_t rx_SmallPktCnt;
313 u_int32_t rx_BadPktSurgeCnt;
314 u_int32_t rx_BuffAllocErr;
315 u_int32_t tx_lossOfClockCnt;
316
317 /* T1 error counters */
318 u_int32_t framingBitErrorCount;
319 u_int32_t lineCodeViolationCount;
320
321 u_int32_t lossOfFrameCount;
322 u_int32_t changeOfFrameAlignmentCount;
323 u_int32_t severelyErroredFrameCount;
324
325 u_int32_t check;
326}; 266};
327 267
328
329typedef struct lmc_xinfo { 268typedef struct lmc_xinfo {
330 u_int32_t Magic0; /* BEEFCAFE */ 269 u32 Magic0; /* BEEFCAFE */
331 270
332 u_int32_t PciCardType; 271 u32 PciCardType;
333 u_int32_t PciSlotNumber; /* PCI slot number */ 272 u32 PciSlotNumber; /* PCI slot number */
334 273
335 u_int16_t DriverMajorVersion; 274 u16 DriverMajorVersion;
336 u_int16_t DriverMinorVersion; 275 u16 DriverMinorVersion;
337 u_int16_t DriverSubVersion; 276 u16 DriverSubVersion;
338 277
339 u_int16_t XilinxRevisionNumber; 278 u16 XilinxRevisionNumber;
340 u_int16_t MaxFrameSize; 279 u16 MaxFrameSize;
341 280
342 u_int16_t t1_alarm1_status; 281 u16 t1_alarm1_status;
343 u_int16_t t1_alarm2_status; 282 u16 t1_alarm2_status;
344 283
345 int link_status; 284 int link_status;
346 u_int32_t mii_reg16; 285 u32 mii_reg16;
347 286
348 u_int32_t Magic1; /* DEADBEEF */ 287 u32 Magic1; /* DEADBEEF */
349} LMC_XINFO; 288} LMC_XINFO;
350 289
351 290
@@ -353,23 +292,22 @@ typedef struct lmc_xinfo {
353 * forward decl 292 * forward decl
354 */ 293 */
355struct lmc___softc { 294struct lmc___softc {
356 void *if_ptr; /* General purpose pointer (used by SPPP) */
357 char *name; 295 char *name;
358 u8 board_idx; 296 u8 board_idx;
359 struct lmc_statistics stats; 297 struct lmc_extra_statistics extra_stats;
360 struct net_device *lmc_device; 298 struct net_device *lmc_device;
361 299
362 int hang, rxdesc, bad_packet, some_counter; 300 int hang, rxdesc, bad_packet, some_counter;
363 u_int32_t txgo; 301 u32 txgo;
364 struct lmc_regfile_t lmc_csrs; 302 struct lmc_regfile_t lmc_csrs;
365 volatile u_int32_t lmc_txtick; 303 volatile u32 lmc_txtick;
366 volatile u_int32_t lmc_rxtick; 304 volatile u32 lmc_rxtick;
367 u_int32_t lmc_flags; 305 u32 lmc_flags;
368 u_int32_t lmc_intrmask; /* our copy of csr_intr */ 306 u32 lmc_intrmask; /* our copy of csr_intr */
369 u_int32_t lmc_cmdmode; /* our copy of csr_cmdmode */ 307 u32 lmc_cmdmode; /* our copy of csr_cmdmode */
370 u_int32_t lmc_busmode; /* our copy of csr_busmode */ 308 u32 lmc_busmode; /* our copy of csr_busmode */
371 u_int32_t lmc_gpio_io; /* state of in/out settings */ 309 u32 lmc_gpio_io; /* state of in/out settings */
372 u_int32_t lmc_gpio; /* state of outputs */ 310 u32 lmc_gpio; /* state of outputs */
373 struct sk_buff* lmc_txq[LMC_TXDESCS]; 311 struct sk_buff* lmc_txq[LMC_TXDESCS];
374 struct sk_buff* lmc_rxq[LMC_RXDESCS]; 312 struct sk_buff* lmc_rxq[LMC_RXDESCS];
375 volatile 313 volatile
@@ -381,42 +319,41 @@ struct lmc___softc {
381 unsigned int lmc_taint_tx, lmc_taint_rx; 319 unsigned int lmc_taint_tx, lmc_taint_rx;
382 int lmc_tx_start, lmc_txfull; 320 int lmc_tx_start, lmc_txfull;
383 int lmc_txbusy; 321 int lmc_txbusy;
384 u_int16_t lmc_miireg16; 322 u16 lmc_miireg16;
385 int lmc_ok; 323 int lmc_ok;
386 int last_link_status; 324 int last_link_status;
387 int lmc_cardtype; 325 int lmc_cardtype;
388 u_int32_t last_frameerr; 326 u32 last_frameerr;
389 lmc_media_t *lmc_media; 327 lmc_media_t *lmc_media;
390 struct timer_list timer; 328 struct timer_list timer;
391 lmc_ctl_t ictl; 329 lmc_ctl_t ictl;
392 u_int32_t TxDescriptControlInit; 330 u32 TxDescriptControlInit;
393 331
394 int tx_TimeoutInd; /* additional driver state */ 332 int tx_TimeoutInd; /* additional driver state */
395 int tx_TimeoutDisplay; 333 int tx_TimeoutDisplay;
396 unsigned int lastlmc_taint_tx; 334 unsigned int lastlmc_taint_tx;
397 int lasttx_packets; 335 int lasttx_packets;
398 u_int32_t tx_clockState; 336 u32 tx_clockState;
399 u_int32_t lmc_crcSize; 337 u32 lmc_crcSize;
400 LMC_XINFO lmc_xinfo; 338 LMC_XINFO lmc_xinfo;
401 char lmc_yel, lmc_blue, lmc_red; /* for T1 and DS3 */ 339 char lmc_yel, lmc_blue, lmc_red; /* for T1 and DS3 */
402 char lmc_timing; /* for HSSI and SSI */ 340 char lmc_timing; /* for HSSI and SSI */
403 int got_irq; 341 int got_irq;
404 342
405 char last_led_err[4]; 343 char last_led_err[4];
406 344
407 u32 last_int; 345 u32 last_int;
408 u32 num_int; 346 u32 num_int;
409 347
410 spinlock_t lmc_lock; 348 spinlock_t lmc_lock;
411 u_int16_t if_type; /* PPP or NET */ 349 u16 if_type; /* HDLC/PPP or NET */
412 struct ppp_device *pd;
413 350
414 /* Failure cases */ 351 /* Failure cases */
415 u8 failed_ring; 352 u8 failed_ring;
416 u8 failed_recv_alloc; 353 u8 failed_recv_alloc;
417 354
418 /* Structure check */ 355 /* Structure check */
419 u32 check; 356 u32 check;
420}; 357};
421 358
422#define LMC_PCI_TIME 1 359#define LMC_PCI_TIME 1
@@ -512,8 +449,8 @@ struct lmc___softc {
512 | TULIP_STS_TXUNDERFLOW\ 449 | TULIP_STS_TXUNDERFLOW\
513 | TULIP_STS_RXSTOPPED ) 450 | TULIP_STS_RXSTOPPED )
514 451
515#define DESC_OWNED_BY_SYSTEM ((u_int32_t)(0x00000000)) 452#define DESC_OWNED_BY_SYSTEM ((u32)(0x00000000))
516#define DESC_OWNED_BY_DC21X4 ((u_int32_t)(0x80000000)) 453#define DESC_OWNED_BY_DC21X4 ((u32)(0x80000000))
517 454
518#ifndef TULIP_CMD_RECEIVEALL 455#ifndef TULIP_CMD_RECEIVEALL
519#define TULIP_CMD_RECEIVEALL 0x40000000L 456#define TULIP_CMD_RECEIVEALL 0x40000000L
@@ -525,46 +462,9 @@ struct lmc___softc {
525#define LMC_ADAP_SSI 4 462#define LMC_ADAP_SSI 4
526#define LMC_ADAP_T1 5 463#define LMC_ADAP_T1 5
527 464
528#define HDLC_HDR_LEN 4
529#define HDLC_ADDR_LEN 1
530#define HDLC_SLARP 0x8035
531#define LMC_MTU 1500 465#define LMC_MTU 1500
532#define SLARP_LINECHECK 2
533 466
534#define LMC_CRC_LEN_16 2 /* 16-bit CRC */ 467#define LMC_CRC_LEN_16 2 /* 16-bit CRC */
535#define LMC_CRC_LEN_32 4 468#define LMC_CRC_LEN_32 4
536 469
537#ifdef LMC_HDLC
538/* definition of an hdlc header. */
539struct hdlc_hdr
540{
541 u8 address;
542 u8 control;
543 u16 type;
544};
545
546/* definition of a slarp header. */
547struct slarp
548{
549 long code;
550 union sl
551 {
552 struct
553 {
554 ulong address;
555 ulong mask;
556 ushort unused;
557 } add;
558 struct
559 {
560 ulong mysequence;
561 ulong yoursequence;
562 ushort reliability;
563 ulong time;
564 } chk;
565 } t;
566};
567#endif /* LMC_HDLC */
568
569
570#endif /* _LMC_VAR_H_ */ 470#endif /* _LMC_VAR_H_ */
diff --git a/drivers/net/wan/pc300.h b/drivers/net/wan/pc300.h
index 63e9fcf31fb8..2e4f84f6cad4 100644
--- a/drivers/net/wan/pc300.h
+++ b/drivers/net/wan/pc300.h
@@ -100,31 +100,14 @@
100#define _PC300_H 100#define _PC300_H
101 101
102#include <linux/hdlc.h> 102#include <linux/hdlc.h>
103#include <net/syncppp.h>
104#include "hd64572.h" 103#include "hd64572.h"
105#include "pc300-falc-lh.h" 104#include "pc300-falc-lh.h"
106 105
107#ifndef CY_TYPES 106#define PC300_PROTO_MLPPP 1
108#define CY_TYPES
109typedef __u64 ucdouble; /* 64 bits, unsigned */
110typedef __u32 uclong; /* 32 bits, unsigned */
111typedef __u16 ucshort; /* 16 bits, unsigned */
112typedef __u8 ucchar; /* 8 bits, unsigned */
113#endif /* CY_TYPES */
114 107
115#define PC300_PROTO_MLPPP 1
116
117#define PC300_KERNEL "2.4.x" /* Kernel supported by this driver */
118
119#define PC300_DEVNAME "hdlc" /* Dev. name base (for hdlc0, hdlc1, etc.) */
120#define PC300_MAXINDEX 100 /* Max dev. name index (the '0' in hdlc0) */
121
122#define PC300_MAXCARDS 4 /* Max number of cards per system */
123#define PC300_MAXCHAN 2 /* Number of channels per card */ 108#define PC300_MAXCHAN 2 /* Number of channels per card */
124 109
125#define PC300_PLX_WIN 0x80 /* PLX control window size (128b) */
126#define PC300_RAMSIZE 0x40000 /* RAM window size (256Kb) */ 110#define PC300_RAMSIZE 0x40000 /* RAM window size (256Kb) */
127#define PC300_SCASIZE 0x400 /* SCA window size (1Kb) */
128#define PC300_FALCSIZE 0x400 /* FALC window size (1Kb) */ 111#define PC300_FALCSIZE 0x400 /* FALC window size (1Kb) */
129 112
130#define PC300_OSC_CLOCK 24576000 113#define PC300_OSC_CLOCK 24576000
@@ -160,26 +143,14 @@ typedef __u8 ucchar; /* 8 bits, unsigned */
160 * Memory access functions/macros * 143 * Memory access functions/macros *
161 * (required to support Alpha systems) * 144 * (required to support Alpha systems) *
162 ***************************************/ 145 ***************************************/
163#ifdef __KERNEL__ 146#define cpc_writeb(port,val) {writeb((u8)(val),(port)); mb();}
164#define cpc_writeb(port,val) {writeb((ucchar)(val),(port)); mb();}
165#define cpc_writew(port,val) {writew((ushort)(val),(port)); mb();} 147#define cpc_writew(port,val) {writew((ushort)(val),(port)); mb();}
166#define cpc_writel(port,val) {writel((uclong)(val),(port)); mb();} 148#define cpc_writel(port,val) {writel((u32)(val),(port)); mb();}
167 149
168#define cpc_readb(port) readb(port) 150#define cpc_readb(port) readb(port)
169#define cpc_readw(port) readw(port) 151#define cpc_readw(port) readw(port)
170#define cpc_readl(port) readl(port) 152#define cpc_readl(port) readl(port)
171 153
172#else /* __KERNEL__ */
173#define cpc_writeb(port,val) (*(volatile ucchar *)(port) = (ucchar)(val))
174#define cpc_writew(port,val) (*(volatile ucshort *)(port) = (ucshort)(val))
175#define cpc_writel(port,val) (*(volatile uclong *)(port) = (uclong)(val))
176
177#define cpc_readb(port) (*(volatile ucchar *)(port))
178#define cpc_readw(port) (*(volatile ucshort *)(port))
179#define cpc_readl(port) (*(volatile uclong *)(port))
180
181#endif /* __KERNEL__ */
182
183/****** Data Structures *****************************************************/ 154/****** Data Structures *****************************************************/
184 155
185/* 156/*
@@ -188,15 +159,15 @@ typedef __u8 ucchar; /* 8 bits, unsigned */
188 * (memory mapped). 159 * (memory mapped).
189 */ 160 */
190struct RUNTIME_9050 { 161struct RUNTIME_9050 {
191 uclong loc_addr_range[4]; /* 00-0Ch : Local Address Ranges */ 162 u32 loc_addr_range[4]; /* 00-0Ch : Local Address Ranges */
192 uclong loc_rom_range; /* 10h : Local ROM Range */ 163 u32 loc_rom_range; /* 10h : Local ROM Range */
193 uclong loc_addr_base[4]; /* 14-20h : Local Address Base Addrs */ 164 u32 loc_addr_base[4]; /* 14-20h : Local Address Base Addrs */
194 uclong loc_rom_base; /* 24h : Local ROM Base */ 165 u32 loc_rom_base; /* 24h : Local ROM Base */
195 uclong loc_bus_descr[4]; /* 28-34h : Local Bus Descriptors */ 166 u32 loc_bus_descr[4]; /* 28-34h : Local Bus Descriptors */
196 uclong rom_bus_descr; /* 38h : ROM Bus Descriptor */ 167 u32 rom_bus_descr; /* 38h : ROM Bus Descriptor */
197 uclong cs_base[4]; /* 3C-48h : Chip Select Base Addrs */ 168 u32 cs_base[4]; /* 3C-48h : Chip Select Base Addrs */
198 uclong intr_ctrl_stat; /* 4Ch : Interrupt Control/Status */ 169 u32 intr_ctrl_stat; /* 4Ch : Interrupt Control/Status */
199 uclong init_ctrl; /* 50h : EEPROM ctrl, Init Ctrl, etc */ 170 u32 init_ctrl; /* 50h : EEPROM ctrl, Init Ctrl, etc */
200}; 171};
201 172
202#define PLX_9050_LINT1_ENABLE 0x01 173#define PLX_9050_LINT1_ENABLE 0x01
@@ -240,66 +211,66 @@ struct RUNTIME_9050 {
240#define PC300_FALC_MAXLOOP 0x0000ffff /* for falc_issue_cmd() */ 211#define PC300_FALC_MAXLOOP 0x0000ffff /* for falc_issue_cmd() */
241 212
242typedef struct falc { 213typedef struct falc {
243 ucchar sync; /* If true FALC is synchronized */ 214 u8 sync; /* If true FALC is synchronized */
244 ucchar active; /* if TRUE then already active */ 215 u8 active; /* if TRUE then already active */
245 ucchar loop_active; /* if TRUE a line loopback UP was received */ 216 u8 loop_active; /* if TRUE a line loopback UP was received */
246 ucchar loop_gen; /* if TRUE a line loopback UP was issued */ 217 u8 loop_gen; /* if TRUE a line loopback UP was issued */
247 218
248 ucchar num_channels; 219 u8 num_channels;
249 ucchar offset; /* 1 for T1, 0 for E1 */ 220 u8 offset; /* 1 for T1, 0 for E1 */
250 ucchar full_bandwidth; 221 u8 full_bandwidth;
251 222
252 ucchar xmb_cause; 223 u8 xmb_cause;
253 ucchar multiframe_mode; 224 u8 multiframe_mode;
254 225
255 /* Statistics */ 226 /* Statistics */
256 ucshort pden; /* Pulse Density violation count */ 227 u16 pden; /* Pulse Density violation count */
257 ucshort los; /* Loss of Signal count */ 228 u16 los; /* Loss of Signal count */
258 ucshort losr; /* Loss of Signal recovery count */ 229 u16 losr; /* Loss of Signal recovery count */
259 ucshort lfa; /* Loss of frame alignment count */ 230 u16 lfa; /* Loss of frame alignment count */
260 ucshort farec; /* Frame Alignment Recovery count */ 231 u16 farec; /* Frame Alignment Recovery count */
261 ucshort lmfa; /* Loss of multiframe alignment count */ 232 u16 lmfa; /* Loss of multiframe alignment count */
262 ucshort ais; /* Remote Alarm indication Signal count */ 233 u16 ais; /* Remote Alarm indication Signal count */
263 ucshort sec; /* One-second timer */ 234 u16 sec; /* One-second timer */
264 ucshort es; /* Errored second */ 235 u16 es; /* Errored second */
265 ucshort rai; /* remote alarm received */ 236 u16 rai; /* remote alarm received */
266 ucshort bec; 237 u16 bec;
267 ucshort fec; 238 u16 fec;
268 ucshort cvc; 239 u16 cvc;
269 ucshort cec; 240 u16 cec;
270 ucshort ebc; 241 u16 ebc;
271 242
272 /* Status */ 243 /* Status */
273 ucchar red_alarm; 244 u8 red_alarm;
274 ucchar blue_alarm; 245 u8 blue_alarm;
275 ucchar loss_fa; 246 u8 loss_fa;
276 ucchar yellow_alarm; 247 u8 yellow_alarm;
277 ucchar loss_mfa; 248 u8 loss_mfa;
278 ucchar prbs; 249 u8 prbs;
279} falc_t; 250} falc_t;
280 251
281typedef struct falc_status { 252typedef struct falc_status {
282 ucchar sync; /* If true FALC is synchronized */ 253 u8 sync; /* If true FALC is synchronized */
283 ucchar red_alarm; 254 u8 red_alarm;
284 ucchar blue_alarm; 255 u8 blue_alarm;
285 ucchar loss_fa; 256 u8 loss_fa;
286 ucchar yellow_alarm; 257 u8 yellow_alarm;
287 ucchar loss_mfa; 258 u8 loss_mfa;
288 ucchar prbs; 259 u8 prbs;
289} falc_status_t; 260} falc_status_t;
290 261
291typedef struct rsv_x21_status { 262typedef struct rsv_x21_status {
292 ucchar dcd; 263 u8 dcd;
293 ucchar dsr; 264 u8 dsr;
294 ucchar cts; 265 u8 cts;
295 ucchar rts; 266 u8 rts;
296 ucchar dtr; 267 u8 dtr;
297} rsv_x21_status_t; 268} rsv_x21_status_t;
298 269
299typedef struct pc300stats { 270typedef struct pc300stats {
300 int hw_type; 271 int hw_type;
301 uclong line_on; 272 u32 line_on;
302 uclong line_off; 273 u32 line_off;
303 struct net_device_stats gen_stats; 274 struct net_device_stats gen_stats;
304 falc_t te_stats; 275 falc_t te_stats;
305} pc300stats_t; 276} pc300stats_t;
@@ -317,28 +288,19 @@ typedef struct pc300loopback {
317 288
318typedef struct pc300patterntst { 289typedef struct pc300patterntst {
319 char patrntst_on; /* 0 - off; 1 - on; 2 - read num_errors */ 290 char patrntst_on; /* 0 - off; 1 - on; 2 - read num_errors */
320 ucshort num_errors; 291 u16 num_errors;
321} pc300patterntst_t; 292} pc300patterntst_t;
322 293
323typedef struct pc300dev { 294typedef struct pc300dev {
324 void *if_ptr; /* General purpose pointer */
325 struct pc300ch *chan; 295 struct pc300ch *chan;
326 ucchar trace_on; 296 u8 trace_on;
327 uclong line_on; /* DCD(X.21, RSV) / sync(TE) change counters */ 297 u32 line_on; /* DCD(X.21, RSV) / sync(TE) change counters */
328 uclong line_off; 298 u32 line_off;
329#ifdef __KERNEL__
330 char name[16]; 299 char name[16];
331 struct net_device *dev; 300 struct net_device *dev;
332
333 void *private;
334 struct sk_buff *tx_skb;
335 union { /* This union has all the protocol-specific structures */
336 struct ppp_device pppdev;
337 }ifu;
338#ifdef CONFIG_PC300_MLPPP 301#ifdef CONFIG_PC300_MLPPP
339 void *cpc_tty; /* information to PC300 TTY driver */ 302 void *cpc_tty; /* information to PC300 TTY driver */
340#endif 303#endif
341#endif /* __KERNEL__ */
342}pc300dev_t; 304}pc300dev_t;
343 305
344typedef struct pc300hw { 306typedef struct pc300hw {
@@ -346,43 +308,42 @@ typedef struct pc300hw {
346 int bus; /* Bus (PCI, PMC, etc.) */ 308 int bus; /* Bus (PCI, PMC, etc.) */
347 int nchan; /* number of channels */ 309 int nchan; /* number of channels */
348 int irq; /* interrupt request level */ 310 int irq; /* interrupt request level */
349 uclong clock; /* Board clock */ 311 u32 clock; /* Board clock */
350 ucchar cpld_id; /* CPLD ID (TE only) */ 312 u8 cpld_id; /* CPLD ID (TE only) */
351 ucshort cpld_reg1; /* CPLD reg 1 (TE only) */ 313 u16 cpld_reg1; /* CPLD reg 1 (TE only) */
352 ucshort cpld_reg2; /* CPLD reg 2 (TE only) */ 314 u16 cpld_reg2; /* CPLD reg 2 (TE only) */
353 ucshort gpioc_reg; /* PLX GPIOC reg */ 315 u16 gpioc_reg; /* PLX GPIOC reg */
354 ucshort intctl_reg; /* PLX Int Ctrl/Status reg */ 316 u16 intctl_reg; /* PLX Int Ctrl/Status reg */
355 uclong iophys; /* PLX registers I/O base */ 317 u32 iophys; /* PLX registers I/O base */
356 uclong iosize; /* PLX registers I/O size */ 318 u32 iosize; /* PLX registers I/O size */
357 uclong plxphys; /* PLX registers MMIO base (physical) */ 319 u32 plxphys; /* PLX registers MMIO base (physical) */
358 void __iomem * plxbase; /* PLX registers MMIO base (virtual) */ 320 void __iomem * plxbase; /* PLX registers MMIO base (virtual) */
359 uclong plxsize; /* PLX registers MMIO size */ 321 u32 plxsize; /* PLX registers MMIO size */
360 uclong scaphys; /* SCA registers MMIO base (physical) */ 322 u32 scaphys; /* SCA registers MMIO base (physical) */
361 void __iomem * scabase; /* SCA registers MMIO base (virtual) */ 323 void __iomem * scabase; /* SCA registers MMIO base (virtual) */
362 uclong scasize; /* SCA registers MMIO size */ 324 u32 scasize; /* SCA registers MMIO size */
363 uclong ramphys; /* On-board RAM MMIO base (physical) */ 325 u32 ramphys; /* On-board RAM MMIO base (physical) */
364 void __iomem * rambase; /* On-board RAM MMIO base (virtual) */ 326 void __iomem * rambase; /* On-board RAM MMIO base (virtual) */
365 uclong alloc_ramsize; /* RAM MMIO size allocated by the PCI bridge */ 327 u32 alloc_ramsize; /* RAM MMIO size allocated by the PCI bridge */
366 uclong ramsize; /* On-board RAM MMIO size */ 328 u32 ramsize; /* On-board RAM MMIO size */
367 uclong falcphys; /* FALC registers MMIO base (physical) */ 329 u32 falcphys; /* FALC registers MMIO base (physical) */
368 void __iomem * falcbase;/* FALC registers MMIO base (virtual) */ 330 void __iomem * falcbase;/* FALC registers MMIO base (virtual) */
369 uclong falcsize; /* FALC registers MMIO size */ 331 u32 falcsize; /* FALC registers MMIO size */
370} pc300hw_t; 332} pc300hw_t;
371 333
372typedef struct pc300chconf { 334typedef struct pc300chconf {
373 sync_serial_settings phys_settings; /* Clock type/rate (in bps), 335 sync_serial_settings phys_settings; /* Clock type/rate (in bps),
374 loopback mode */ 336 loopback mode */
375 raw_hdlc_proto proto_settings; /* Encoding, parity (CRC) */ 337 raw_hdlc_proto proto_settings; /* Encoding, parity (CRC) */
376 uclong media; /* HW media (RS232, V.35, etc.) */ 338 u32 media; /* HW media (RS232, V.35, etc.) */
377 uclong proto; /* Protocol (PPP, X.25, etc.) */ 339 u32 proto; /* Protocol (PPP, X.25, etc.) */
378 ucchar monitor; /* Monitor mode (0 = off, !0 = on) */
379 340
380 /* TE-specific parameters */ 341 /* TE-specific parameters */
381 ucchar lcode; /* Line Code (AMI, B8ZS, etc.) */ 342 u8 lcode; /* Line Code (AMI, B8ZS, etc.) */
382 ucchar fr_mode; /* Frame Mode (ESF, D4, etc.) */ 343 u8 fr_mode; /* Frame Mode (ESF, D4, etc.) */
383 ucchar lbo; /* Line Build Out */ 344 u8 lbo; /* Line Build Out */
384 ucchar rx_sens; /* Rx Sensitivity (long- or short-haul) */ 345 u8 rx_sens; /* Rx Sensitivity (long- or short-haul) */
385 uclong tslot_bitmap; /* bit[i]=1 => timeslot _i_ is active */ 346 u32 tslot_bitmap; /* bit[i]=1 => timeslot _i_ is active */
386} pc300chconf_t; 347} pc300chconf_t;
387 348
388typedef struct pc300ch { 349typedef struct pc300ch {
@@ -390,20 +351,18 @@ typedef struct pc300ch {
390 int channel; 351 int channel;
391 pc300dev_t d; 352 pc300dev_t d;
392 pc300chconf_t conf; 353 pc300chconf_t conf;
393 ucchar tx_first_bd; /* First TX DMA block descr. w/ data */ 354 u8 tx_first_bd; /* First TX DMA block descr. w/ data */
394 ucchar tx_next_bd; /* Next free TX DMA block descriptor */ 355 u8 tx_next_bd; /* Next free TX DMA block descriptor */
395 ucchar rx_first_bd; /* First free RX DMA block descriptor */ 356 u8 rx_first_bd; /* First free RX DMA block descriptor */
396 ucchar rx_last_bd; /* Last free RX DMA block descriptor */ 357 u8 rx_last_bd; /* Last free RX DMA block descriptor */
397 ucchar nfree_tx_bd; /* Number of free TX DMA block descriptors */ 358 u8 nfree_tx_bd; /* Number of free TX DMA block descriptors */
398 falc_t falc; /* FALC structure (TE only) */ 359 falc_t falc; /* FALC structure (TE only) */
399} pc300ch_t; 360} pc300ch_t;
400 361
401typedef struct pc300 { 362typedef struct pc300 {
402 pc300hw_t hw; /* hardware config. */ 363 pc300hw_t hw; /* hardware config. */
403 pc300ch_t chan[PC300_MAXCHAN]; 364 pc300ch_t chan[PC300_MAXCHAN];
404#ifdef __KERNEL__
405 spinlock_t card_lock; 365 spinlock_t card_lock;
406#endif /* __KERNEL__ */
407} pc300_t; 366} pc300_t;
408 367
409typedef struct pc300conf { 368typedef struct pc300conf {
@@ -471,12 +430,7 @@ enum pc300_loopback_cmds {
471#define PC300_TX_QUEUE_LEN 100 430#define PC300_TX_QUEUE_LEN 100
472#define PC300_DEF_MTU 1600 431#define PC300_DEF_MTU 1600
473 432
474#ifdef __KERNEL__
475/* Function Prototypes */ 433/* Function Prototypes */
476void tx_dma_start(pc300_t *, int);
477int cpc_open(struct net_device *dev); 434int cpc_open(struct net_device *dev);
478int cpc_set_media(hdlc_device *, int);
479#endif /* __KERNEL__ */
480 435
481#endif /* _PC300_H */ 436#endif /* _PC300_H */
482
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
index 334170527755..d0a8d1e352ac 100644
--- a/drivers/net/wan/pc300_drv.c
+++ b/drivers/net/wan/pc300_drv.c
@@ -227,8 +227,6 @@ static char rcsid[] =
227#include <linux/netdevice.h> 227#include <linux/netdevice.h>
228#include <linux/spinlock.h> 228#include <linux/spinlock.h>
229#include <linux/if.h> 229#include <linux/if.h>
230
231#include <net/syncppp.h>
232#include <net/arp.h> 230#include <net/arp.h>
233 231
234#include <asm/io.h> 232#include <asm/io.h>
@@ -285,8 +283,8 @@ static void rx_dma_buf_init(pc300_t *, int);
285static void tx_dma_buf_check(pc300_t *, int); 283static void tx_dma_buf_check(pc300_t *, int);
286static void rx_dma_buf_check(pc300_t *, int); 284static void rx_dma_buf_check(pc300_t *, int);
287static irqreturn_t cpc_intr(int, void *); 285static irqreturn_t cpc_intr(int, void *);
288static int clock_rate_calc(uclong, uclong, int *); 286static int clock_rate_calc(u32, u32, int *);
289static uclong detect_ram(pc300_t *); 287static u32 detect_ram(pc300_t *);
290static void plx_init(pc300_t *); 288static void plx_init(pc300_t *);
291static void cpc_trace(struct net_device *, struct sk_buff *, char); 289static void cpc_trace(struct net_device *, struct sk_buff *, char);
292static int cpc_attach(struct net_device *, unsigned short, unsigned short); 290static int cpc_attach(struct net_device *, unsigned short, unsigned short);
@@ -311,10 +309,10 @@ static void tx_dma_buf_pt_init(pc300_t * card, int ch)
311 + DMA_TX_BD_BASE + ch_factor * sizeof(pcsca_bd_t)); 309 + DMA_TX_BD_BASE + ch_factor * sizeof(pcsca_bd_t));
312 310
313 for (i = 0; i < N_DMA_TX_BUF; i++, ptdescr++) { 311 for (i = 0; i < N_DMA_TX_BUF; i++, ptdescr++) {
314 cpc_writel(&ptdescr->next, (uclong) (DMA_TX_BD_BASE + 312 cpc_writel(&ptdescr->next, (u32)(DMA_TX_BD_BASE +
315 (ch_factor + ((i + 1) & (N_DMA_TX_BUF - 1))) * sizeof(pcsca_bd_t))); 313 (ch_factor + ((i + 1) & (N_DMA_TX_BUF - 1))) * sizeof(pcsca_bd_t)));
316 cpc_writel(&ptdescr->ptbuf, 314 cpc_writel(&ptdescr->ptbuf,
317 (uclong) (DMA_TX_BASE + (ch_factor + i) * BD_DEF_LEN)); 315 (u32)(DMA_TX_BASE + (ch_factor + i) * BD_DEF_LEN));
318 } 316 }
319} 317}
320 318
@@ -341,10 +339,10 @@ static void rx_dma_buf_pt_init(pc300_t * card, int ch)
341 + DMA_RX_BD_BASE + ch_factor * sizeof(pcsca_bd_t)); 339 + DMA_RX_BD_BASE + ch_factor * sizeof(pcsca_bd_t));
342 340
343 for (i = 0; i < N_DMA_RX_BUF; i++, ptdescr++) { 341 for (i = 0; i < N_DMA_RX_BUF; i++, ptdescr++) {
344 cpc_writel(&ptdescr->next, (uclong) (DMA_RX_BD_BASE + 342 cpc_writel(&ptdescr->next, (u32)(DMA_RX_BD_BASE +
345 (ch_factor + ((i + 1) & (N_DMA_RX_BUF - 1))) * sizeof(pcsca_bd_t))); 343 (ch_factor + ((i + 1) & (N_DMA_RX_BUF - 1))) * sizeof(pcsca_bd_t)));
346 cpc_writel(&ptdescr->ptbuf, 344 cpc_writel(&ptdescr->ptbuf,
347 (uclong) (DMA_RX_BASE + (ch_factor + i) * BD_DEF_LEN)); 345 (u32)(DMA_RX_BASE + (ch_factor + i) * BD_DEF_LEN));
348 } 346 }
349} 347}
350 348
@@ -367,8 +365,8 @@ static void tx_dma_buf_check(pc300_t * card, int ch)
367{ 365{
368 volatile pcsca_bd_t __iomem *ptdescr; 366 volatile pcsca_bd_t __iomem *ptdescr;
369 int i; 367 int i;
370 ucshort first_bd = card->chan[ch].tx_first_bd; 368 u16 first_bd = card->chan[ch].tx_first_bd;
371 ucshort next_bd = card->chan[ch].tx_next_bd; 369 u16 next_bd = card->chan[ch].tx_next_bd;
372 370
373 printk("#CH%d: f_bd = %d(0x%08zx), n_bd = %d(0x%08zx)\n", ch, 371 printk("#CH%d: f_bd = %d(0x%08zx), n_bd = %d(0x%08zx)\n", ch,
374 first_bd, TX_BD_ADDR(ch, first_bd), 372 first_bd, TX_BD_ADDR(ch, first_bd),
@@ -392,9 +390,9 @@ static void tx1_dma_buf_check(pc300_t * card, int ch)
392{ 390{
393 volatile pcsca_bd_t __iomem *ptdescr; 391 volatile pcsca_bd_t __iomem *ptdescr;
394 int i; 392 int i;
395 ucshort first_bd = card->chan[ch].tx_first_bd; 393 u16 first_bd = card->chan[ch].tx_first_bd;
396 ucshort next_bd = card->chan[ch].tx_next_bd; 394 u16 next_bd = card->chan[ch].tx_next_bd;
397 uclong scabase = card->hw.scabase; 395 u32 scabase = card->hw.scabase;
398 396
399 printk ("\nnfree_tx_bd = %d \n", card->chan[ch].nfree_tx_bd); 397 printk ("\nnfree_tx_bd = %d \n", card->chan[ch].nfree_tx_bd);
400 printk("#CH%d: f_bd = %d(0x%08x), n_bd = %d(0x%08x)\n", ch, 398 printk("#CH%d: f_bd = %d(0x%08x), n_bd = %d(0x%08x)\n", ch,
@@ -413,13 +411,13 @@ static void tx1_dma_buf_check(pc300_t * card, int ch)
413 printk("\n"); 411 printk("\n");
414} 412}
415#endif 413#endif
416 414
417static void rx_dma_buf_check(pc300_t * card, int ch) 415static void rx_dma_buf_check(pc300_t * card, int ch)
418{ 416{
419 volatile pcsca_bd_t __iomem *ptdescr; 417 volatile pcsca_bd_t __iomem *ptdescr;
420 int i; 418 int i;
421 ucshort first_bd = card->chan[ch].rx_first_bd; 419 u16 first_bd = card->chan[ch].rx_first_bd;
422 ucshort last_bd = card->chan[ch].rx_last_bd; 420 u16 last_bd = card->chan[ch].rx_last_bd;
423 int ch_factor; 421 int ch_factor;
424 422
425 ch_factor = ch * N_DMA_RX_BUF; 423 ch_factor = ch * N_DMA_RX_BUF;
@@ -440,9 +438,9 @@ static void rx_dma_buf_check(pc300_t * card, int ch)
440static int dma_get_rx_frame_size(pc300_t * card, int ch) 438static int dma_get_rx_frame_size(pc300_t * card, int ch)
441{ 439{
442 volatile pcsca_bd_t __iomem *ptdescr; 440 volatile pcsca_bd_t __iomem *ptdescr;
443 ucshort first_bd = card->chan[ch].rx_first_bd; 441 u16 first_bd = card->chan[ch].rx_first_bd;
444 int rcvd = 0; 442 int rcvd = 0;
445 volatile ucchar status; 443 volatile u8 status;
446 444
447 ptdescr = (card->hw.rambase + RX_BD_ADDR(ch, first_bd)); 445 ptdescr = (card->hw.rambase + RX_BD_ADDR(ch, first_bd));
448 while ((status = cpc_readb(&ptdescr->status)) & DST_OSB) { 446 while ((status = cpc_readb(&ptdescr->status)) & DST_OSB) {
@@ -462,12 +460,12 @@ static int dma_get_rx_frame_size(pc300_t * card, int ch)
462 * dma_buf_write: writes a frame to the Tx DMA buffers 460 * dma_buf_write: writes a frame to the Tx DMA buffers
463 * NOTE: this function writes one frame at a time. 461 * NOTE: this function writes one frame at a time.
464 */ 462 */
465static int dma_buf_write(pc300_t * card, int ch, ucchar * ptdata, int len) 463static int dma_buf_write(pc300_t *card, int ch, u8 *ptdata, int len)
466{ 464{
467 int i, nchar; 465 int i, nchar;
468 volatile pcsca_bd_t __iomem *ptdescr; 466 volatile pcsca_bd_t __iomem *ptdescr;
469 int tosend = len; 467 int tosend = len;
470 ucchar nbuf = ((len - 1) / BD_DEF_LEN) + 1; 468 u8 nbuf = ((len - 1) / BD_DEF_LEN) + 1;
471 469
472 if (nbuf >= card->chan[ch].nfree_tx_bd) { 470 if (nbuf >= card->chan[ch].nfree_tx_bd) {
473 return -ENOMEM; 471 return -ENOMEM;
@@ -509,7 +507,7 @@ static int dma_buf_read(pc300_t * card, int ch, struct sk_buff *skb)
509 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 507 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
510 volatile pcsca_bd_t __iomem *ptdescr; 508 volatile pcsca_bd_t __iomem *ptdescr;
511 int rcvd = 0; 509 int rcvd = 0;
512 volatile ucchar status; 510 volatile u8 status;
513 511
514 ptdescr = (card->hw.rambase + 512 ptdescr = (card->hw.rambase +
515 RX_BD_ADDR(ch, chan->rx_first_bd)); 513 RX_BD_ADDR(ch, chan->rx_first_bd));
@@ -563,8 +561,8 @@ static int dma_buf_read(pc300_t * card, int ch, struct sk_buff *skb)
563static void tx_dma_stop(pc300_t * card, int ch) 561static void tx_dma_stop(pc300_t * card, int ch)
564{ 562{
565 void __iomem *scabase = card->hw.scabase; 563 void __iomem *scabase = card->hw.scabase;
566 ucchar drr_ena_bit = 1 << (5 + 2 * ch); 564 u8 drr_ena_bit = 1 << (5 + 2 * ch);
567 ucchar drr_rst_bit = 1 << (1 + 2 * ch); 565 u8 drr_rst_bit = 1 << (1 + 2 * ch);
568 566
569 /* Disable DMA */ 567 /* Disable DMA */
570 cpc_writeb(scabase + DRR, drr_ena_bit); 568 cpc_writeb(scabase + DRR, drr_ena_bit);
@@ -574,8 +572,8 @@ static void tx_dma_stop(pc300_t * card, int ch)
574static void rx_dma_stop(pc300_t * card, int ch) 572static void rx_dma_stop(pc300_t * card, int ch)
575{ 573{
576 void __iomem *scabase = card->hw.scabase; 574 void __iomem *scabase = card->hw.scabase;
577 ucchar drr_ena_bit = 1 << (4 + 2 * ch); 575 u8 drr_ena_bit = 1 << (4 + 2 * ch);
578 ucchar drr_rst_bit = 1 << (2 * ch); 576 u8 drr_rst_bit = 1 << (2 * ch);
579 577
580 /* Disable DMA */ 578 /* Disable DMA */
581 cpc_writeb(scabase + DRR, drr_ena_bit); 579 cpc_writeb(scabase + DRR, drr_ena_bit);
@@ -607,7 +605,7 @@ static void rx_dma_start(pc300_t * card, int ch)
607/*************************/ 605/*************************/
608/*** FALC Routines ***/ 606/*** FALC Routines ***/
609/*************************/ 607/*************************/
610static void falc_issue_cmd(pc300_t * card, int ch, ucchar cmd) 608static void falc_issue_cmd(pc300_t *card, int ch, u8 cmd)
611{ 609{
612 void __iomem *falcbase = card->hw.falcbase; 610 void __iomem *falcbase = card->hw.falcbase;
613 unsigned long i = 0; 611 unsigned long i = 0;
@@ -675,7 +673,7 @@ static void falc_intr_enable(pc300_t * card, int ch)
675static void falc_open_timeslot(pc300_t * card, int ch, int timeslot) 673static void falc_open_timeslot(pc300_t * card, int ch, int timeslot)
676{ 674{
677 void __iomem *falcbase = card->hw.falcbase; 675 void __iomem *falcbase = card->hw.falcbase;
678 ucchar tshf = card->chan[ch].falc.offset; 676 u8 tshf = card->chan[ch].falc.offset;
679 677
680 cpc_writeb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch), 678 cpc_writeb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch),
681 cpc_readb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch)) & 679 cpc_readb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch)) &
@@ -691,7 +689,7 @@ static void falc_open_timeslot(pc300_t * card, int ch, int timeslot)
691static void falc_close_timeslot(pc300_t * card, int ch, int timeslot) 689static void falc_close_timeslot(pc300_t * card, int ch, int timeslot)
692{ 690{
693 void __iomem *falcbase = card->hw.falcbase; 691 void __iomem *falcbase = card->hw.falcbase;
694 ucchar tshf = card->chan[ch].falc.offset; 692 u8 tshf = card->chan[ch].falc.offset;
695 693
696 cpc_writeb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch), 694 cpc_writeb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch),
697 cpc_readb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch)) | 695 cpc_readb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch)) |
@@ -812,7 +810,7 @@ static void falc_init_t1(pc300_t * card, int ch)
812 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; 810 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
813 falc_t *pfalc = (falc_t *) & chan->falc; 811 falc_t *pfalc = (falc_t *) & chan->falc;
814 void __iomem *falcbase = card->hw.falcbase; 812 void __iomem *falcbase = card->hw.falcbase;
815 ucchar dja = (ch ? (LIM2_DJA2 | LIM2_DJA1) : 0); 813 u8 dja = (ch ? (LIM2_DJA2 | LIM2_DJA1) : 0);
816 814
817 /* Switch to T1 mode (PCM 24) */ 815 /* Switch to T1 mode (PCM 24) */
818 cpc_writeb(falcbase + F_REG(FMR1, ch), FMR1_PMOD); 816 cpc_writeb(falcbase + F_REG(FMR1, ch), FMR1_PMOD);
@@ -981,7 +979,7 @@ static void falc_init_e1(pc300_t * card, int ch)
981 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; 979 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
982 falc_t *pfalc = (falc_t *) & chan->falc; 980 falc_t *pfalc = (falc_t *) & chan->falc;
983 void __iomem *falcbase = card->hw.falcbase; 981 void __iomem *falcbase = card->hw.falcbase;
984 ucchar dja = (ch ? (LIM2_DJA2 | LIM2_DJA1) : 0); 982 u8 dja = (ch ? (LIM2_DJA2 | LIM2_DJA1) : 0);
985 983
986 /* Switch to E1 mode (PCM 30) */ 984 /* Switch to E1 mode (PCM 30) */
987 cpc_writeb(falcbase + F_REG(FMR1, ch), 985 cpc_writeb(falcbase + F_REG(FMR1, ch),
@@ -1187,7 +1185,7 @@ static void te_config(pc300_t * card, int ch)
1187 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; 1185 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
1188 falc_t *pfalc = (falc_t *) & chan->falc; 1186 falc_t *pfalc = (falc_t *) & chan->falc;
1189 void __iomem *falcbase = card->hw.falcbase; 1187 void __iomem *falcbase = card->hw.falcbase;
1190 ucchar dummy; 1188 u8 dummy;
1191 unsigned long flags; 1189 unsigned long flags;
1192 1190
1193 memset(pfalc, 0, sizeof(falc_t)); 1191 memset(pfalc, 0, sizeof(falc_t));
@@ -1403,7 +1401,7 @@ static void falc_update_stats(pc300_t * card, int ch)
1403 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; 1401 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
1404 falc_t *pfalc = (falc_t *) & chan->falc; 1402 falc_t *pfalc = (falc_t *) & chan->falc;
1405 void __iomem *falcbase = card->hw.falcbase; 1403 void __iomem *falcbase = card->hw.falcbase;
1406 ucshort counter; 1404 u16 counter;
1407 1405
1408 counter = cpc_readb(falcbase + F_REG(FECL, ch)); 1406 counter = cpc_readb(falcbase + F_REG(FECL, ch));
1409 counter |= cpc_readb(falcbase + F_REG(FECH, ch)) << 8; 1407 counter |= cpc_readb(falcbase + F_REG(FECH, ch)) << 8;
@@ -1729,7 +1727,7 @@ static void falc_pattern_test(pc300_t * card, int ch, unsigned int activate)
1729 * Description: This routine returns the bit error counter value 1727 * Description: This routine returns the bit error counter value
1730 *---------------------------------------------------------------------------- 1728 *----------------------------------------------------------------------------
1731 */ 1729 */
1732static ucshort falc_pattern_test_error(pc300_t * card, int ch) 1730static u16 falc_pattern_test_error(pc300_t * card, int ch)
1733{ 1731{
1734 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 1732 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
1735 falc_t *pfalc = (falc_t *) & chan->falc; 1733 falc_t *pfalc = (falc_t *) & chan->falc;
@@ -1776,7 +1774,7 @@ static void cpc_tx_timeout(struct net_device *dev)
1776 pc300_t *card = (pc300_t *) chan->card; 1774 pc300_t *card = (pc300_t *) chan->card;
1777 int ch = chan->channel; 1775 int ch = chan->channel;
1778 unsigned long flags; 1776 unsigned long flags;
1779 ucchar ilar; 1777 u8 ilar;
1780 1778
1781 dev->stats.tx_errors++; 1779 dev->stats.tx_errors++;
1782 dev->stats.tx_aborted_errors++; 1780 dev->stats.tx_aborted_errors++;
@@ -1807,11 +1805,7 @@ static int cpc_queue_xmit(struct sk_buff *skb, struct net_device *dev)
1807 int i; 1805 int i;
1808#endif 1806#endif
1809 1807
1810 if (chan->conf.monitor) { 1808 if (!netif_carrier_ok(dev)) {
1811 /* In monitor mode no Tx is done: ignore packet */
1812 dev_kfree_skb(skb);
1813 return 0;
1814 } else if (!netif_carrier_ok(dev)) {
1815 /* DCD must be OFF: drop packet */ 1809 /* DCD must be OFF: drop packet */
1816 dev_kfree_skb(skb); 1810 dev_kfree_skb(skb);
1817 dev->stats.tx_errors++; 1811 dev->stats.tx_errors++;
@@ -1836,7 +1830,7 @@ static int cpc_queue_xmit(struct sk_buff *skb, struct net_device *dev)
1836 } 1830 }
1837 1831
1838 /* Write buffer to DMA buffers */ 1832 /* Write buffer to DMA buffers */
1839 if (dma_buf_write(card, ch, (ucchar *) skb->data, skb->len) != 0) { 1833 if (dma_buf_write(card, ch, (u8 *)skb->data, skb->len) != 0) {
1840// printk("%s: write error. Dropping TX packet.\n", dev->name); 1834// printk("%s: write error. Dropping TX packet.\n", dev->name);
1841 netif_stop_queue(dev); 1835 netif_stop_queue(dev);
1842 dev_kfree_skb(skb); 1836 dev_kfree_skb(skb);
@@ -2001,7 +1995,7 @@ static void sca_tx_intr(pc300dev_t *dev)
2001static void sca_intr(pc300_t * card) 1995static void sca_intr(pc300_t * card)
2002{ 1996{
2003 void __iomem *scabase = card->hw.scabase; 1997 void __iomem *scabase = card->hw.scabase;
2004 volatile uclong status; 1998 volatile u32 status;
2005 int ch; 1999 int ch;
2006 int intr_count = 0; 2000 int intr_count = 0;
2007 unsigned char dsr_rx; 2001 unsigned char dsr_rx;
@@ -2016,7 +2010,7 @@ static void sca_intr(pc300_t * card)
2016 2010
2017 /**** Reception ****/ 2011 /**** Reception ****/
2018 if (status & IR0_DRX((IR0_DMIA | IR0_DMIB), ch)) { 2012 if (status & IR0_DRX((IR0_DMIA | IR0_DMIB), ch)) {
2019 ucchar drx_stat = cpc_readb(scabase + DSR_RX(ch)); 2013 u8 drx_stat = cpc_readb(scabase + DSR_RX(ch));
2020 2014
2021 /* Clear RX interrupts */ 2015 /* Clear RX interrupts */
2022 cpc_writeb(scabase + DSR_RX(ch), drx_stat | DSR_DWE); 2016 cpc_writeb(scabase + DSR_RX(ch), drx_stat | DSR_DWE);
@@ -2090,7 +2084,7 @@ static void sca_intr(pc300_t * card)
2090 2084
2091 /**** Transmission ****/ 2085 /**** Transmission ****/
2092 if (status & IR0_DTX((IR0_EFT | IR0_DMIA | IR0_DMIB), ch)) { 2086 if (status & IR0_DTX((IR0_EFT | IR0_DMIA | IR0_DMIB), ch)) {
2093 ucchar dtx_stat = cpc_readb(scabase + DSR_TX(ch)); 2087 u8 dtx_stat = cpc_readb(scabase + DSR_TX(ch));
2094 2088
2095 /* Clear TX interrupts */ 2089 /* Clear TX interrupts */
2096 cpc_writeb(scabase + DSR_TX(ch), dtx_stat | DSR_DWE); 2090 cpc_writeb(scabase + DSR_TX(ch), dtx_stat | DSR_DWE);
@@ -2134,7 +2128,7 @@ static void sca_intr(pc300_t * card)
2134 2128
2135 /**** MSCI ****/ 2129 /**** MSCI ****/
2136 if (status & IR0_M(IR0_RXINTA, ch)) { 2130 if (status & IR0_M(IR0_RXINTA, ch)) {
2137 ucchar st1 = cpc_readb(scabase + M_REG(ST1, ch)); 2131 u8 st1 = cpc_readb(scabase + M_REG(ST1, ch));
2138 2132
2139 /* Clear MSCI interrupts */ 2133 /* Clear MSCI interrupts */
2140 cpc_writeb(scabase + M_REG(ST1, ch), st1); 2134 cpc_writeb(scabase + M_REG(ST1, ch), st1);
@@ -2176,7 +2170,7 @@ static void sca_intr(pc300_t * card)
2176 } 2170 }
2177} 2171}
2178 2172
2179static void falc_t1_loop_detection(pc300_t * card, int ch, ucchar frs1) 2173static void falc_t1_loop_detection(pc300_t *card, int ch, u8 frs1)
2180{ 2174{
2181 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 2175 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
2182 falc_t *pfalc = (falc_t *) & chan->falc; 2176 falc_t *pfalc = (falc_t *) & chan->falc;
@@ -2201,7 +2195,7 @@ static void falc_t1_loop_detection(pc300_t * card, int ch, ucchar frs1)
2201 } 2195 }
2202} 2196}
2203 2197
2204static void falc_e1_loop_detection(pc300_t * card, int ch, ucchar rsp) 2198static void falc_e1_loop_detection(pc300_t *card, int ch, u8 rsp)
2205{ 2199{
2206 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 2200 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
2207 falc_t *pfalc = (falc_t *) & chan->falc; 2201 falc_t *pfalc = (falc_t *) & chan->falc;
@@ -2231,8 +2225,8 @@ static void falc_t1_intr(pc300_t * card, int ch)
2231 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 2225 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
2232 falc_t *pfalc = (falc_t *) & chan->falc; 2226 falc_t *pfalc = (falc_t *) & chan->falc;
2233 void __iomem *falcbase = card->hw.falcbase; 2227 void __iomem *falcbase = card->hw.falcbase;
2234 ucchar isr0, isr3, gis; 2228 u8 isr0, isr3, gis;
2235 ucchar dummy; 2229 u8 dummy;
2236 2230
2237 while ((gis = cpc_readb(falcbase + F_REG(GIS, ch))) != 0) { 2231 while ((gis = cpc_readb(falcbase + F_REG(GIS, ch))) != 0) {
2238 if (gis & GIS_ISR0) { 2232 if (gis & GIS_ISR0) {
@@ -2278,8 +2272,8 @@ static void falc_e1_intr(pc300_t * card, int ch)
2278 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 2272 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
2279 falc_t *pfalc = (falc_t *) & chan->falc; 2273 falc_t *pfalc = (falc_t *) & chan->falc;
2280 void __iomem *falcbase = card->hw.falcbase; 2274 void __iomem *falcbase = card->hw.falcbase;
2281 ucchar isr1, isr2, isr3, gis, rsp; 2275 u8 isr1, isr2, isr3, gis, rsp;
2282 ucchar dummy; 2276 u8 dummy;
2283 2277
2284 while ((gis = cpc_readb(falcbase + F_REG(GIS, ch))) != 0) { 2278 while ((gis = cpc_readb(falcbase + F_REG(GIS, ch))) != 0) {
2285 rsp = cpc_readb(falcbase + F_REG(RSP, ch)); 2279 rsp = cpc_readb(falcbase + F_REG(RSP, ch));
@@ -2361,7 +2355,7 @@ static void falc_intr(pc300_t * card)
2361static irqreturn_t cpc_intr(int irq, void *dev_id) 2355static irqreturn_t cpc_intr(int irq, void *dev_id)
2362{ 2356{
2363 pc300_t *card = dev_id; 2357 pc300_t *card = dev_id;
2364 volatile ucchar plx_status; 2358 volatile u8 plx_status;
2365 2359
2366 if (!card) { 2360 if (!card) {
2367#ifdef PC300_DEBUG_INTR 2361#ifdef PC300_DEBUG_INTR
@@ -2400,7 +2394,7 @@ static irqreturn_t cpc_intr(int irq, void *dev_id)
2400 2394
2401static void cpc_sca_status(pc300_t * card, int ch) 2395static void cpc_sca_status(pc300_t * card, int ch)
2402{ 2396{
2403 ucchar ilar; 2397 u8 ilar;
2404 void __iomem *scabase = card->hw.scabase; 2398 void __iomem *scabase = card->hw.scabase;
2405 unsigned long flags; 2399 unsigned long flags;
2406 2400
@@ -2818,7 +2812,7 @@ static int cpc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2818 } 2812 }
2819} 2813}
2820 2814
2821static int clock_rate_calc(uclong rate, uclong clock, int *br_io) 2815static int clock_rate_calc(u32 rate, u32 clock, int *br_io)
2822{ 2816{
2823 int br, tc; 2817 int br, tc;
2824 int br_pwr, error; 2818 int br_pwr, error;
@@ -2855,12 +2849,12 @@ static int ch_config(pc300dev_t * d)
2855 void __iomem *scabase = card->hw.scabase; 2849 void __iomem *scabase = card->hw.scabase;
2856 void __iomem *plxbase = card->hw.plxbase; 2850 void __iomem *plxbase = card->hw.plxbase;
2857 int ch = chan->channel; 2851 int ch = chan->channel;
2858 uclong clkrate = chan->conf.phys_settings.clock_rate; 2852 u32 clkrate = chan->conf.phys_settings.clock_rate;
2859 uclong clktype = chan->conf.phys_settings.clock_type; 2853 u32 clktype = chan->conf.phys_settings.clock_type;
2860 ucshort encoding = chan->conf.proto_settings.encoding; 2854 u16 encoding = chan->conf.proto_settings.encoding;
2861 ucshort parity = chan->conf.proto_settings.parity; 2855 u16 parity = chan->conf.proto_settings.parity;
2862 ucchar md0, md2; 2856 u8 md0, md2;
2863 2857
2864 /* Reset the channel */ 2858 /* Reset the channel */
2865 cpc_writeb(scabase + M_REG(CMD, ch), CMD_CH_RST); 2859 cpc_writeb(scabase + M_REG(CMD, ch), CMD_CH_RST);
2866 2860
@@ -3152,19 +3146,10 @@ int cpc_open(struct net_device *dev)
3152 printk("pc300: cpc_open"); 3146 printk("pc300: cpc_open");
3153#endif 3147#endif
3154 3148
3155#ifdef FIXME
3156 if (hdlc->proto.id == IF_PROTO_PPP) {
3157 d->if_ptr = &hdlc->state.ppp.pppdev;
3158 }
3159#endif
3160
3161 result = hdlc_open(dev); 3149 result = hdlc_open(dev);
3162 if (/* FIXME hdlc->proto.id == IF_PROTO_PPP*/ 0) { 3150
3163 dev->priv = d; 3151 if (result)
3164 }
3165 if (result) {
3166 return result; 3152 return result;
3167 }
3168 3153
3169 sprintf(ifr.ifr_name, "%s", dev->name); 3154 sprintf(ifr.ifr_name, "%s", dev->name);
3170 result = cpc_opench(d); 3155 result = cpc_opench(d);
@@ -3197,9 +3182,7 @@ static int cpc_close(struct net_device *dev)
3197 CPC_UNLOCK(card, flags); 3182 CPC_UNLOCK(card, flags);
3198 3183
3199 hdlc_close(dev); 3184 hdlc_close(dev);
3200 if (/* FIXME hdlc->proto.id == IF_PROTO_PPP*/ 0) { 3185
3201 d->if_ptr = NULL;
3202 }
3203#ifdef CONFIG_PC300_MLPPP 3186#ifdef CONFIG_PC300_MLPPP
3204 if (chan->conf.proto == PC300_PROTO_MLPPP) { 3187 if (chan->conf.proto == PC300_PROTO_MLPPP) {
3205 cpc_tty_unregister_service(d); 3188 cpc_tty_unregister_service(d);
@@ -3210,16 +3193,16 @@ static int cpc_close(struct net_device *dev)
3210 return 0; 3193 return 0;
3211} 3194}
3212 3195
3213static uclong detect_ram(pc300_t * card) 3196static u32 detect_ram(pc300_t * card)
3214{ 3197{
3215 uclong i; 3198 u32 i;
3216 ucchar data; 3199 u8 data;
3217 void __iomem *rambase = card->hw.rambase; 3200 void __iomem *rambase = card->hw.rambase;
3218 3201
3219 card->hw.ramsize = PC300_RAMSIZE; 3202 card->hw.ramsize = PC300_RAMSIZE;
3220 /* Let's find out how much RAM is present on this board */ 3203 /* Let's find out how much RAM is present on this board */
3221 for (i = 0; i < card->hw.ramsize; i++) { 3204 for (i = 0; i < card->hw.ramsize; i++) {
3222 data = (ucchar) (i & 0xff); 3205 data = (u8)(i & 0xff);
3223 cpc_writeb(rambase + i, data); 3206 cpc_writeb(rambase + i, data);
3224 if (cpc_readb(rambase + i) != data) { 3207 if (cpc_readb(rambase + i) != data) {
3225 break; 3208 break;
@@ -3296,7 +3279,7 @@ static void cpc_init_card(pc300_t * card)
3296 cpc_writeb(card->hw.scabase + DMER, 0x80); 3279 cpc_writeb(card->hw.scabase + DMER, 0x80);
3297 3280
3298 if (card->hw.type == PC300_TE) { 3281 if (card->hw.type == PC300_TE) {
3299 ucchar reg1; 3282 u8 reg1;
3300 3283
3301 /* Check CPLD version */ 3284 /* Check CPLD version */
3302 reg1 = cpc_readb(card->hw.falcbase + CPLD_REG1); 3285 reg1 = cpc_readb(card->hw.falcbase + CPLD_REG1);
@@ -3360,7 +3343,6 @@ static void cpc_init_card(pc300_t * card)
3360 chan->nfree_tx_bd = N_DMA_TX_BUF; 3343 chan->nfree_tx_bd = N_DMA_TX_BUF;
3361 3344
3362 d->chan = chan; 3345 d->chan = chan;
3363 d->tx_skb = NULL;
3364 d->trace_on = 0; 3346 d->trace_on = 0;
3365 d->line_on = 0; 3347 d->line_on = 0;
3366 d->line_off = 0; 3348 d->line_off = 0;
@@ -3431,7 +3413,7 @@ cpc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3431{ 3413{
3432 static int first_time = 1; 3414 static int first_time = 1;
3433 int err, eeprom_outdated = 0; 3415 int err, eeprom_outdated = 0;
3434 ucshort device_id; 3416 u16 device_id;
3435 pc300_t *card; 3417 pc300_t *card;
3436 3418
3437 if (first_time) { 3419 if (first_time) {
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c
index 44a89df1b8bf..c0235844a4d5 100644
--- a/drivers/net/wan/sealevel.c
+++ b/drivers/net/wan/sealevel.c
@@ -8,6 +8,7 @@
8 * 8 *
9 * (c) Copyright 1999, 2001 Alan Cox 9 * (c) Copyright 1999, 2001 Alan Cox
10 * (c) Copyright 2001 Red Hat Inc. 10 * (c) Copyright 2001 Red Hat Inc.
11 * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
11 * 12 *
12 */ 13 */
13 14
@@ -19,6 +20,7 @@
19#include <linux/netdevice.h> 20#include <linux/netdevice.h>
20#include <linux/if_arp.h> 21#include <linux/if_arp.h>
21#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/hdlc.h>
22#include <linux/ioport.h> 24#include <linux/ioport.h>
23#include <linux/init.h> 25#include <linux/init.h>
24#include <net/arp.h> 26#include <net/arp.h>
@@ -27,22 +29,19 @@
27#include <asm/io.h> 29#include <asm/io.h>
28#include <asm/dma.h> 30#include <asm/dma.h>
29#include <asm/byteorder.h> 31#include <asm/byteorder.h>
30#include <net/syncppp.h>
31#include "z85230.h" 32#include "z85230.h"
32 33
33 34
34struct slvl_device 35struct slvl_device
35{ 36{
36 void *if_ptr; /* General purpose pointer (used by SPPP) */
37 struct z8530_channel *chan; 37 struct z8530_channel *chan;
38 struct ppp_device pppdev;
39 int channel; 38 int channel;
40}; 39};
41 40
42 41
43struct slvl_board 42struct slvl_board
44{ 43{
45 struct slvl_device *dev[2]; 44 struct slvl_device dev[2];
46 struct z8530_dev board; 45 struct z8530_dev board;
47 int iobase; 46 int iobase;
48}; 47};
@@ -51,72 +50,69 @@ struct slvl_board
51 * Network driver support routines 50 * Network driver support routines
52 */ 51 */
53 52
53static inline struct slvl_device* dev_to_chan(struct net_device *dev)
54{
55 return (struct slvl_device *)dev_to_hdlc(dev)->priv;
56}
57
54/* 58/*
55 * Frame receive. Simple for our card as we do sync ppp and there 59 * Frame receive. Simple for our card as we do HDLC and there
56 * is no funny garbage involved 60 * is no funny garbage involved
57 */ 61 */
58 62
59static void sealevel_input(struct z8530_channel *c, struct sk_buff *skb) 63static void sealevel_input(struct z8530_channel *c, struct sk_buff *skb)
60{ 64{
61 /* Drop the CRC - it's not a good idea to try and negotiate it ;) */ 65 /* Drop the CRC - it's not a good idea to try and negotiate it ;) */
62 skb_trim(skb, skb->len-2); 66 skb_trim(skb, skb->len - 2);
63 skb->protocol=htons(ETH_P_WAN_PPP); 67 skb->protocol = hdlc_type_trans(skb, c->netdevice);
64 skb_reset_mac_header(skb); 68 skb_reset_mac_header(skb);
65 skb->dev=c->netdevice; 69 skb->dev = c->netdevice;
66 /*
67 * Send it to the PPP layer. We don't have time to process
68 * it right now.
69 */
70 netif_rx(skb); 70 netif_rx(skb);
71 c->netdevice->last_rx = jiffies; 71 c->netdevice->last_rx = jiffies;
72} 72}
73 73
74/* 74/*
75 * We've been placed in the UP state 75 * We've been placed in the UP state
76 */ 76 */
77 77
78static int sealevel_open(struct net_device *d) 78static int sealevel_open(struct net_device *d)
79{ 79{
80 struct slvl_device *slvl=d->priv; 80 struct slvl_device *slvl = dev_to_chan(d);
81 int err = -1; 81 int err = -1;
82 int unit = slvl->channel; 82 int unit = slvl->channel;
83 83
84 /* 84 /*
85 * Link layer up. 85 * Link layer up.
86 */ 86 */
87 87
88 switch(unit) 88 switch (unit)
89 { 89 {
90 case 0: 90 case 0:
91 err=z8530_sync_dma_open(d, slvl->chan); 91 err = z8530_sync_dma_open(d, slvl->chan);
92 break; 92 break;
93 case 1: 93 case 1:
94 err=z8530_sync_open(d, slvl->chan); 94 err = z8530_sync_open(d, slvl->chan);
95 break; 95 break;
96 } 96 }
97 97
98 if(err) 98 if (err)
99 return err; 99 return err;
100 /* 100
101 * Begin PPP 101 err = hdlc_open(d);
102 */ 102 if (err) {
103 err=sppp_open(d); 103 switch (unit) {
104 if(err)
105 {
106 switch(unit)
107 {
108 case 0: 104 case 0:
109 z8530_sync_dma_close(d, slvl->chan); 105 z8530_sync_dma_close(d, slvl->chan);
110 break; 106 break;
111 case 1: 107 case 1:
112 z8530_sync_close(d, slvl->chan); 108 z8530_sync_close(d, slvl->chan);
113 break; 109 break;
114 } 110 }
115 return err; 111 return err;
116 } 112 }
117 113
118 slvl->chan->rx_function=sealevel_input; 114 slvl->chan->rx_function = sealevel_input;
119 115
120 /* 116 /*
121 * Go go go 117 * Go go go
122 */ 118 */
@@ -126,26 +122,19 @@ static int sealevel_open(struct net_device *d)
126 122
127static int sealevel_close(struct net_device *d) 123static int sealevel_close(struct net_device *d)
128{ 124{
129 struct slvl_device *slvl=d->priv; 125 struct slvl_device *slvl = dev_to_chan(d);
130 int unit = slvl->channel; 126 int unit = slvl->channel;
131 127
132 /* 128 /*
133 * Discard new frames 129 * Discard new frames
134 */ 130 */
135
136 slvl->chan->rx_function=z8530_null_rx;
137
138 /*
139 * PPP off
140 */
141 sppp_close(d);
142 /*
143 * Link layer down
144 */
145 131
132 slvl->chan->rx_function = z8530_null_rx;
133
134 hdlc_close(d);
146 netif_stop_queue(d); 135 netif_stop_queue(d);
147 136
148 switch(unit) 137 switch (unit)
149 { 138 {
150 case 0: 139 case 0:
151 z8530_sync_dma_close(d, slvl->chan); 140 z8530_sync_dma_close(d, slvl->chan);
@@ -159,210 +148,153 @@ static int sealevel_close(struct net_device *d)
159 148
160static int sealevel_ioctl(struct net_device *d, struct ifreq *ifr, int cmd) 149static int sealevel_ioctl(struct net_device *d, struct ifreq *ifr, int cmd)
161{ 150{
162 /* struct slvl_device *slvl=d->priv; 151 /* struct slvl_device *slvl=dev_to_chan(d);
163 z8530_ioctl(d,&slvl->sync.chanA,ifr,cmd) */ 152 z8530_ioctl(d,&slvl->sync.chanA,ifr,cmd) */
164 return sppp_do_ioctl(d, ifr,cmd); 153 return hdlc_ioctl(d, ifr, cmd);
165}
166
167static struct net_device_stats *sealevel_get_stats(struct net_device *d)
168{
169 struct slvl_device *slvl=d->priv;
170 if(slvl)
171 return z8530_get_stats(slvl->chan);
172 else
173 return NULL;
174} 154}
175 155
176/* 156/*
177 * Passed PPP frames, fire them downwind. 157 * Passed network frames, fire them downwind.
178 */ 158 */
179 159
180static int sealevel_queue_xmit(struct sk_buff *skb, struct net_device *d) 160static int sealevel_queue_xmit(struct sk_buff *skb, struct net_device *d)
181{ 161{
182 struct slvl_device *slvl=d->priv; 162 return z8530_queue_xmit(dev_to_chan(d)->chan, skb);
183 return z8530_queue_xmit(slvl->chan, skb);
184} 163}
185 164
186static int sealevel_neigh_setup(struct neighbour *n) 165static int sealevel_attach(struct net_device *dev, unsigned short encoding,
166 unsigned short parity)
187{ 167{
188 if (n->nud_state == NUD_NONE) { 168 if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)
189 n->ops = &arp_broken_ops; 169 return 0;
190 n->output = n->ops->output; 170 return -EINVAL;
191 }
192 return 0;
193} 171}
194 172
195static int sealevel_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p) 173static int slvl_setup(struct slvl_device *sv, int iobase, int irq)
196{ 174{
197 if (p->tbl->family == AF_INET) { 175 struct net_device *dev = alloc_hdlcdev(sv);
198 p->neigh_setup = sealevel_neigh_setup; 176 if (!dev)
199 p->ucast_probes = 0; 177 return -1;
200 p->mcast_probes = 0; 178
179 dev_to_hdlc(dev)->attach = sealevel_attach;
180 dev_to_hdlc(dev)->xmit = sealevel_queue_xmit;
181 dev->open = sealevel_open;
182 dev->stop = sealevel_close;
183 dev->do_ioctl = sealevel_ioctl;
184 dev->base_addr = iobase;
185 dev->irq = irq;
186
187 if (register_hdlc_device(dev)) {
188 printk(KERN_ERR "sealevel: unable to register HDLC device\n");
189 free_netdev(dev);
190 return -1;
201 } 191 }
202 return 0;
203}
204 192
205static int sealevel_attach(struct net_device *dev) 193 sv->chan->netdevice = dev;
206{
207 struct slvl_device *sv = dev->priv;
208 sppp_attach(&sv->pppdev);
209 return 0; 194 return 0;
210} 195}
211 196
212static void sealevel_detach(struct net_device *dev)
213{
214 sppp_detach(dev);
215}
216
217static void slvl_setup(struct net_device *d)
218{
219 d->open = sealevel_open;
220 d->stop = sealevel_close;
221 d->init = sealevel_attach;
222 d->uninit = sealevel_detach;
223 d->hard_start_xmit = sealevel_queue_xmit;
224 d->get_stats = sealevel_get_stats;
225 d->set_multicast_list = NULL;
226 d->do_ioctl = sealevel_ioctl;
227 d->neigh_setup = sealevel_neigh_setup_dev;
228 d->set_mac_address = NULL;
229
230}
231
232static inline struct slvl_device *slvl_alloc(int iobase, int irq)
233{
234 struct net_device *d;
235 struct slvl_device *sv;
236
237 d = alloc_netdev(sizeof(struct slvl_device), "hdlc%d",
238 slvl_setup);
239
240 if (!d)
241 return NULL;
242
243 sv = d->priv;
244 d->ml_priv = sv;
245 sv->if_ptr = &sv->pppdev;
246 sv->pppdev.dev = d;
247 d->base_addr = iobase;
248 d->irq = irq;
249
250 return sv;
251}
252
253 197
254/* 198/*
255 * Allocate and setup Sealevel board. 199 * Allocate and setup Sealevel board.
256 */ 200 */
257 201
258static __init struct slvl_board *slvl_init(int iobase, int irq, 202static __init struct slvl_board *slvl_init(int iobase, int irq,
259 int txdma, int rxdma, int slow) 203 int txdma, int rxdma, int slow)
260{ 204{
261 struct z8530_dev *dev; 205 struct z8530_dev *dev;
262 struct slvl_board *b; 206 struct slvl_board *b;
263 207
264 /* 208 /*
265 * Get the needed I/O space 209 * Get the needed I/O space
266 */ 210 */
267 211
268 if(!request_region(iobase, 8, "Sealevel 4021")) 212 if (!request_region(iobase, 8, "Sealevel 4021")) {
269 { 213 printk(KERN_WARNING "sealevel: I/O 0x%X already in use.\n",
270 printk(KERN_WARNING "sealevel: I/O 0x%X already in use.\n", iobase); 214 iobase);
271 return NULL; 215 return NULL;
272 } 216 }
273
274 b = kzalloc(sizeof(struct slvl_board), GFP_KERNEL);
275 if(!b)
276 goto fail3;
277 217
278 if (!(b->dev[0]= slvl_alloc(iobase, irq))) 218 b = kzalloc(sizeof(struct slvl_board), GFP_KERNEL);
279 goto fail2; 219 if (!b)
220 goto err_kzalloc;
280 221
281 b->dev[0]->chan = &b->board.chanA; 222 b->dev[0].chan = &b->board.chanA;
282 b->dev[0]->channel = 0; 223 b->dev[0].channel = 0;
283
284 if (!(b->dev[1] = slvl_alloc(iobase, irq)))
285 goto fail1_0;
286 224
287 b->dev[1]->chan = &b->board.chanB; 225 b->dev[1].chan = &b->board.chanB;
288 b->dev[1]->channel = 1; 226 b->dev[1].channel = 1;
289 227
290 dev = &b->board; 228 dev = &b->board;
291 229
292 /* 230 /*
293 * Stuff in the I/O addressing 231 * Stuff in the I/O addressing
294 */ 232 */
295 233
296 dev->active = 0; 234 dev->active = 0;
297 235
298 b->iobase = iobase; 236 b->iobase = iobase;
299 237
300 /* 238 /*
301 * Select 8530 delays for the old board 239 * Select 8530 delays for the old board
302 */ 240 */
303 241
304 if(slow) 242 if (slow)
305 iobase |= Z8530_PORT_SLEEP; 243 iobase |= Z8530_PORT_SLEEP;
306 244
307 dev->chanA.ctrlio=iobase+1; 245 dev->chanA.ctrlio = iobase + 1;
308 dev->chanA.dataio=iobase; 246 dev->chanA.dataio = iobase;
309 dev->chanB.ctrlio=iobase+3; 247 dev->chanB.ctrlio = iobase + 3;
310 dev->chanB.dataio=iobase+2; 248 dev->chanB.dataio = iobase + 2;
311 249
312 dev->chanA.irqs=&z8530_nop; 250 dev->chanA.irqs = &z8530_nop;
313 dev->chanB.irqs=&z8530_nop; 251 dev->chanB.irqs = &z8530_nop;
314 252
315 /* 253 /*
316 * Assert DTR enable DMA 254 * Assert DTR enable DMA
317 */ 255 */
318 256
319 outb(3|(1<<7), b->iobase+4); 257 outb(3 | (1 << 7), b->iobase + 4);
320 258
321 259
322 /* We want a fast IRQ for this device. Actually we'd like an even faster 260 /* We want a fast IRQ for this device. Actually we'd like an even faster
323 IRQ ;) - This is one driver RtLinux is made for */ 261 IRQ ;) - This is one driver RtLinux is made for */
324 262
325 if(request_irq(irq, &z8530_interrupt, IRQF_DISABLED, "SeaLevel", dev)<0) 263 if (request_irq(irq, &z8530_interrupt, IRQF_DISABLED,
326 { 264 "SeaLevel", dev) < 0) {
327 printk(KERN_WARNING "sealevel: IRQ %d already in use.\n", irq); 265 printk(KERN_WARNING "sealevel: IRQ %d already in use.\n", irq);
328 goto fail1_1; 266 goto err_request_irq;
329 } 267 }
330 268
331 dev->irq=irq; 269 dev->irq = irq;
332 dev->chanA.private=&b->dev[0]; 270 dev->chanA.private = &b->dev[0];
333 dev->chanB.private=&b->dev[1]; 271 dev->chanB.private = &b->dev[1];
334 dev->chanA.netdevice=b->dev[0]->pppdev.dev; 272 dev->chanA.dev = dev;
335 dev->chanB.netdevice=b->dev[1]->pppdev.dev; 273 dev->chanB.dev = dev;
336 dev->chanA.dev=dev; 274
337 dev->chanB.dev=dev; 275 dev->chanA.txdma = 3;
338 276 dev->chanA.rxdma = 1;
339 dev->chanA.txdma=3; 277 if (request_dma(dev->chanA.txdma, "SeaLevel (TX)"))
340 dev->chanA.rxdma=1; 278 goto err_dma_tx;
341 if(request_dma(dev->chanA.txdma, "SeaLevel (TX)")!=0) 279
342 goto fail; 280 if (request_dma(dev->chanA.rxdma, "SeaLevel (RX)"))
343 281 goto err_dma_rx;
344 if(request_dma(dev->chanA.rxdma, "SeaLevel (RX)")!=0) 282
345 goto dmafail;
346
347 disable_irq(irq); 283 disable_irq(irq);
348 284
349 /* 285 /*
350 * Begin normal initialise 286 * Begin normal initialise
351 */ 287 */
352 288
353 if(z8530_init(dev)!=0) 289 if (z8530_init(dev) != 0) {
354 {
355 printk(KERN_ERR "Z8530 series device not found.\n"); 290 printk(KERN_ERR "Z8530 series device not found.\n");
356 enable_irq(irq); 291 enable_irq(irq);
357 goto dmafail2; 292 goto free_hw;
358 } 293 }
359 if(dev->type==Z85C30) 294 if (dev->type == Z85C30) {
360 {
361 z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream); 295 z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream);
362 z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream); 296 z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream);
363 } 297 } else {
364 else
365 {
366 z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream_85230); 298 z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream_85230);
367 z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream_85230); 299 z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream_85230);
368 } 300 }
@@ -370,36 +302,31 @@ static __init struct slvl_board *slvl_init(int iobase, int irq,
370 /* 302 /*
371 * Now we can take the IRQ 303 * Now we can take the IRQ
372 */ 304 */
373 305
374 enable_irq(irq); 306 enable_irq(irq);
375 307
376 if (register_netdev(b->dev[0]->pppdev.dev)) 308 if (slvl_setup(&b->dev[0], iobase, irq))
377 goto dmafail2; 309 goto free_hw;
378 310 if (slvl_setup(&b->dev[1], iobase, irq))
379 if (register_netdev(b->dev[1]->pppdev.dev)) 311 goto free_netdev0;
380 goto fail_unit;
381 312
382 z8530_describe(dev, "I/O", iobase); 313 z8530_describe(dev, "I/O", iobase);
383 dev->active=1; 314 dev->active = 1;
384 return b; 315 return b;
385 316
386fail_unit: 317free_netdev0:
387 unregister_netdev(b->dev[0]->pppdev.dev); 318 unregister_hdlc_device(b->dev[0].chan->netdevice);
388 319 free_netdev(b->dev[0].chan->netdevice);
389dmafail2: 320free_hw:
390 free_dma(dev->chanA.rxdma); 321 free_dma(dev->chanA.rxdma);
391dmafail: 322err_dma_rx:
392 free_dma(dev->chanA.txdma); 323 free_dma(dev->chanA.txdma);
393fail: 324err_dma_tx:
394 free_irq(irq, dev); 325 free_irq(irq, dev);
395fail1_1: 326err_request_irq:
396 free_netdev(b->dev[1]->pppdev.dev);
397fail1_0:
398 free_netdev(b->dev[0]->pppdev.dev);
399fail2:
400 kfree(b); 327 kfree(b);
401fail3: 328err_kzalloc:
402 release_region(iobase,8); 329 release_region(iobase, 8);
403 return NULL; 330 return NULL;
404} 331}
405 332
@@ -408,14 +335,14 @@ static void __exit slvl_shutdown(struct slvl_board *b)
408 int u; 335 int u;
409 336
410 z8530_shutdown(&b->board); 337 z8530_shutdown(&b->board);
411 338
412 for(u=0; u<2; u++) 339 for (u = 0; u < 2; u++)
413 { 340 {
414 struct net_device *d = b->dev[u]->pppdev.dev; 341 struct net_device *d = b->dev[u].chan->netdevice;
415 unregister_netdev(d); 342 unregister_hdlc_device(d);
416 free_netdev(d); 343 free_netdev(d);
417 } 344 }
418 345
419 free_irq(b->board.irq, &b->board); 346 free_irq(b->board.irq, &b->board);
420 free_dma(b->board.chanA.rxdma); 347 free_dma(b->board.chanA.rxdma);
421 free_dma(b->board.chanA.txdma); 348 free_dma(b->board.chanA.txdma);
@@ -451,10 +378,6 @@ static struct slvl_board *slvl_unit;
451 378
452static int __init slvl_init_module(void) 379static int __init slvl_init_module(void)
453{ 380{
454#ifdef MODULE
455 printk(KERN_INFO "SeaLevel Z85230 Synchronous Driver v 0.02.\n");
456 printk(KERN_INFO "(c) Copyright 1998, Building Number Three Ltd.\n");
457#endif
458 slvl_unit = slvl_init(io, irq, txdma, rxdma, slow); 381 slvl_unit = slvl_init(io, irq, txdma, rxdma, slow);
459 382
460 return slvl_unit ? 0 : -ENODEV; 383 return slvl_unit ? 0 : -ENODEV;
diff --git a/drivers/net/wan/syncppp.c b/drivers/net/wan/syncppp.c
index 29b4b94e4947..327d58589e12 100644
--- a/drivers/net/wan/syncppp.c
+++ b/drivers/net/wan/syncppp.c
@@ -230,13 +230,6 @@ static void sppp_input (struct net_device *dev, struct sk_buff *skb)
230 skb->dev=dev; 230 skb->dev=dev;
231 skb_reset_mac_header(skb); 231 skb_reset_mac_header(skb);
232 232
233 if (dev->flags & IFF_RUNNING)
234 {
235 /* Count received bytes, add FCS and one flag */
236 sp->ibytes+= skb->len + 3;
237 sp->ipkts++;
238 }
239
240 if (!pskb_may_pull(skb, PPP_HEADER_LEN)) { 233 if (!pskb_may_pull(skb, PPP_HEADER_LEN)) {
241 /* Too small packet, drop it. */ 234 /* Too small packet, drop it. */
242 if (sp->pp_flags & PP_DEBUG) 235 if (sp->pp_flags & PP_DEBUG)
@@ -832,7 +825,6 @@ static void sppp_cp_send (struct sppp *sp, u16 proto, u8 type,
832 sppp_print_bytes ((u8*) (lh+1), len); 825 sppp_print_bytes ((u8*) (lh+1), len);
833 printk (">\n"); 826 printk (">\n");
834 } 827 }
835 sp->obytes += skb->len;
836 /* Control is high priority so it doesn't get queued behind data */ 828 /* Control is high priority so it doesn't get queued behind data */
837 skb->priority=TC_PRIO_CONTROL; 829 skb->priority=TC_PRIO_CONTROL;
838 skb->dev = dev; 830 skb->dev = dev;
@@ -875,7 +867,6 @@ static void sppp_cisco_send (struct sppp *sp, int type, u32 par1, u32 par2)
875 printk (KERN_WARNING "%s: cisco output: <%xh %xh %xh %xh %xh-%xh>\n", 867 printk (KERN_WARNING "%s: cisco output: <%xh %xh %xh %xh %xh-%xh>\n",
876 dev->name, ntohl (ch->type), ch->par1, 868 dev->name, ntohl (ch->type), ch->par1,
877 ch->par2, ch->rel, ch->time0, ch->time1); 869 ch->par2, ch->rel, ch->time0, ch->time1);
878 sp->obytes += skb->len;
879 skb->priority=TC_PRIO_CONTROL; 870 skb->priority=TC_PRIO_CONTROL;
880 skb->dev = dev; 871 skb->dev = dev;
881 skb_queue_tail(&tx_queue, skb); 872 skb_queue_tail(&tx_queue, skb);
diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
index 98ef400908b8..243bd8d918fe 100644
--- a/drivers/net/wan/z85230.c
+++ b/drivers/net/wan/z85230.c
@@ -43,6 +43,7 @@
43#include <linux/netdevice.h> 43#include <linux/netdevice.h>
44#include <linux/if_arp.h> 44#include <linux/if_arp.h>
45#include <linux/delay.h> 45#include <linux/delay.h>
46#include <linux/hdlc.h>
46#include <linux/ioport.h> 47#include <linux/ioport.h>
47#include <linux/init.h> 48#include <linux/init.h>
48#include <asm/dma.h> 49#include <asm/dma.h>
@@ -51,7 +52,6 @@
51#define RT_UNLOCK 52#define RT_UNLOCK
52#include <linux/spinlock.h> 53#include <linux/spinlock.h>
53 54
54#include <net/syncppp.h>
55#include "z85230.h" 55#include "z85230.h"
56 56
57 57
@@ -440,51 +440,46 @@ static void z8530_tx(struct z8530_channel *c)
440 * A status event occurred in PIO synchronous mode. There are several 440 * A status event occurred in PIO synchronous mode. There are several
441 * reasons the chip will bother us here. A transmit underrun means we 441 * reasons the chip will bother us here. A transmit underrun means we
442 * failed to feed the chip fast enough and just broke a packet. A DCD 442 * failed to feed the chip fast enough and just broke a packet. A DCD
443 * change is a line up or down. We communicate that back to the protocol 443 * change is a line up or down.
444 * layer for synchronous PPP to renegotiate.
445 */ 444 */
446 445
447static void z8530_status(struct z8530_channel *chan) 446static void z8530_status(struct z8530_channel *chan)
448{ 447{
449 u8 status, altered; 448 u8 status, altered;
450 449
451 status=read_zsreg(chan, R0); 450 status = read_zsreg(chan, R0);
452 altered=chan->status^status; 451 altered = chan->status ^ status;
453 452
454 chan->status=status; 453 chan->status = status;
455 454
456 if(status&TxEOM) 455 if (status & TxEOM) {
457 {
458/* printk("%s: Tx underrun.\n", chan->dev->name); */ 456/* printk("%s: Tx underrun.\n", chan->dev->name); */
459 chan->stats.tx_fifo_errors++; 457 chan->netdevice->stats.tx_fifo_errors++;
460 write_zsctrl(chan, ERR_RES); 458 write_zsctrl(chan, ERR_RES);
461 z8530_tx_done(chan); 459 z8530_tx_done(chan);
462 } 460 }
463 461
464 if(altered&chan->dcdcheck) 462 if (altered & chan->dcdcheck)
465 { 463 {
466 if(status&chan->dcdcheck) 464 if (status & chan->dcdcheck) {
467 {
468 printk(KERN_INFO "%s: DCD raised\n", chan->dev->name); 465 printk(KERN_INFO "%s: DCD raised\n", chan->dev->name);
469 write_zsreg(chan, R3, chan->regs[3]|RxENABLE); 466 write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
470 if(chan->netdevice && 467 if (chan->netdevice)
471 ((chan->netdevice->type == ARPHRD_HDLC) || 468 netif_carrier_on(chan->netdevice);
472 (chan->netdevice->type == ARPHRD_PPP))) 469 } else {
473 sppp_reopen(chan->netdevice);
474 }
475 else
476 {
477 printk(KERN_INFO "%s: DCD lost\n", chan->dev->name); 470 printk(KERN_INFO "%s: DCD lost\n", chan->dev->name);
478 write_zsreg(chan, R3, chan->regs[3]&~RxENABLE); 471 write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
479 z8530_flush_fifo(chan); 472 z8530_flush_fifo(chan);
473 if (chan->netdevice)
474 netif_carrier_off(chan->netdevice);
480 } 475 }
481 476
482 } 477 }
483 write_zsctrl(chan, RES_EXT_INT); 478 write_zsctrl(chan, RES_EXT_INT);
484 write_zsctrl(chan, RES_H_IUS); 479 write_zsctrl(chan, RES_H_IUS);
485} 480}
486 481
487struct z8530_irqhandler z8530_sync= 482struct z8530_irqhandler z8530_sync =
488{ 483{
489 z8530_rx, 484 z8530_rx,
490 z8530_tx, 485 z8530_tx,
@@ -556,8 +551,7 @@ static void z8530_dma_tx(struct z8530_channel *chan)
556 * 551 *
557 * A status event occurred on the Z8530. We receive these for two reasons 552 * A status event occurred on the Z8530. We receive these for two reasons
558 * when in DMA mode. Firstly if we finished a packet transfer we get one 553 * when in DMA mode. Firstly if we finished a packet transfer we get one
559 * and kick the next packet out. Secondly we may see a DCD change and 554 * and kick the next packet out. Secondly we may see a DCD change.
560 * have to poke the protocol layer.
561 * 555 *
562 */ 556 */
563 557
@@ -586,24 +580,21 @@ static void z8530_dma_status(struct z8530_channel *chan)
586 } 580 }
587 } 581 }
588 582
589 if(altered&chan->dcdcheck) 583 if (altered & chan->dcdcheck)
590 { 584 {
591 if(status&chan->dcdcheck) 585 if (status & chan->dcdcheck) {
592 {
593 printk(KERN_INFO "%s: DCD raised\n", chan->dev->name); 586 printk(KERN_INFO "%s: DCD raised\n", chan->dev->name);
594 write_zsreg(chan, R3, chan->regs[3]|RxENABLE); 587 write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
595 if(chan->netdevice && 588 if (chan->netdevice)
596 ((chan->netdevice->type == ARPHRD_HDLC) || 589 netif_carrier_on(chan->netdevice);
597 (chan->netdevice->type == ARPHRD_PPP))) 590 } else {
598 sppp_reopen(chan->netdevice);
599 }
600 else
601 {
602 printk(KERN_INFO "%s:DCD lost\n", chan->dev->name); 591 printk(KERN_INFO "%s:DCD lost\n", chan->dev->name);
603 write_zsreg(chan, R3, chan->regs[3]&~RxENABLE); 592 write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
604 z8530_flush_fifo(chan); 593 z8530_flush_fifo(chan);
594 if (chan->netdevice)
595 netif_carrier_off(chan->netdevice);
605 } 596 }
606 } 597 }
607 598
608 write_zsctrl(chan, RES_EXT_INT); 599 write_zsctrl(chan, RES_EXT_INT);
609 write_zsctrl(chan, RES_H_IUS); 600 write_zsctrl(chan, RES_H_IUS);
@@ -1459,10 +1450,10 @@ static void z8530_tx_begin(struct z8530_channel *c)
1459 /* 1450 /*
1460 * Check if we crapped out. 1451 * Check if we crapped out.
1461 */ 1452 */
1462 if(get_dma_residue(c->txdma)) 1453 if (get_dma_residue(c->txdma))
1463 { 1454 {
1464 c->stats.tx_dropped++; 1455 c->netdevice->stats.tx_dropped++;
1465 c->stats.tx_fifo_errors++; 1456 c->netdevice->stats.tx_fifo_errors++;
1466 } 1457 }
1467 release_dma_lock(flags); 1458 release_dma_lock(flags);
1468 } 1459 }
@@ -1534,21 +1525,21 @@ static void z8530_tx_begin(struct z8530_channel *c)
1534 * packet. This code is fairly timing sensitive. 1525 * packet. This code is fairly timing sensitive.
1535 * 1526 *
1536 * Called with the register lock held. 1527 * Called with the register lock held.
1537 */ 1528 */
1538 1529
1539static void z8530_tx_done(struct z8530_channel *c) 1530static void z8530_tx_done(struct z8530_channel *c)
1540{ 1531{
1541 struct sk_buff *skb; 1532 struct sk_buff *skb;
1542 1533
1543 /* Actually this can happen.*/ 1534 /* Actually this can happen.*/
1544 if(c->tx_skb==NULL) 1535 if (c->tx_skb == NULL)
1545 return; 1536 return;
1546 1537
1547 skb=c->tx_skb; 1538 skb = c->tx_skb;
1548 c->tx_skb=NULL; 1539 c->tx_skb = NULL;
1549 z8530_tx_begin(c); 1540 z8530_tx_begin(c);
1550 c->stats.tx_packets++; 1541 c->netdevice->stats.tx_packets++;
1551 c->stats.tx_bytes+=skb->len; 1542 c->netdevice->stats.tx_bytes += skb->len;
1552 dev_kfree_skb_irq(skb); 1543 dev_kfree_skb_irq(skb);
1553} 1544}
1554 1545
@@ -1558,7 +1549,7 @@ static void z8530_tx_done(struct z8530_channel *c)
1558 * @skb: The buffer 1549 * @skb: The buffer
1559 * 1550 *
1560 * We point the receive handler at this function when idle. Instead 1551 * We point the receive handler at this function when idle. Instead
1561 * of syncppp processing the frames we get to throw them away. 1552 * of processing the frames we get to throw them away.
1562 */ 1553 */
1563 1554
1564void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb) 1555void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb)
@@ -1635,10 +1626,11 @@ static void z8530_rx_done(struct z8530_channel *c)
1635 else 1626 else
1636 /* Can't occur as we dont reenable the DMA irq until 1627 /* Can't occur as we dont reenable the DMA irq until
1637 after the flip is done */ 1628 after the flip is done */
1638 printk(KERN_WARNING "%s: DMA flip overrun!\n", c->netdevice->name); 1629 printk(KERN_WARNING "%s: DMA flip overrun!\n",
1639 1630 c->netdevice->name);
1631
1640 release_dma_lock(flags); 1632 release_dma_lock(flags);
1641 1633
1642 /* 1634 /*
1643 * Shove the old buffer into an sk_buff. We can't DMA 1635 * Shove the old buffer into an sk_buff. We can't DMA
1644 * directly into one on a PC - it might be above the 16Mb 1636 * directly into one on a PC - it might be above the 16Mb
@@ -1646,27 +1638,23 @@ static void z8530_rx_done(struct z8530_channel *c)
1646 * can avoid the copy. Optimisation 2 - make the memcpy 1638 * can avoid the copy. Optimisation 2 - make the memcpy
1647 * a copychecksum. 1639 * a copychecksum.
1648 */ 1640 */
1649 1641
1650 skb=dev_alloc_skb(ct); 1642 skb = dev_alloc_skb(ct);
1651 if(skb==NULL) 1643 if (skb == NULL) {
1652 { 1644 c->netdevice->stats.rx_dropped++;
1653 c->stats.rx_dropped++; 1645 printk(KERN_WARNING "%s: Memory squeeze.\n",
1654 printk(KERN_WARNING "%s: Memory squeeze.\n", c->netdevice->name); 1646 c->netdevice->name);
1655 } 1647 } else {
1656 else
1657 {
1658 skb_put(skb, ct); 1648 skb_put(skb, ct);
1659 skb_copy_to_linear_data(skb, rxb, ct); 1649 skb_copy_to_linear_data(skb, rxb, ct);
1660 c->stats.rx_packets++; 1650 c->netdevice->stats.rx_packets++;
1661 c->stats.rx_bytes+=ct; 1651 c->netdevice->stats.rx_bytes += ct;
1662 } 1652 }
1663 c->dma_ready=1; 1653 c->dma_ready = 1;
1664 } 1654 } else {
1665 else 1655 RT_LOCK;
1666 { 1656 skb = c->skb;
1667 RT_LOCK; 1657
1668 skb=c->skb;
1669
1670 /* 1658 /*
1671 * The game we play for non DMA is similar. We want to 1659 * The game we play for non DMA is similar. We want to
1672 * get the controller set up for the next packet as fast 1660 * get the controller set up for the next packet as fast
@@ -1677,48 +1665,39 @@ static void z8530_rx_done(struct z8530_channel *c)
1677 * if you build a system where the sync irq isnt blocked 1665 * if you build a system where the sync irq isnt blocked
1678 * by the kernel IRQ disable then you need only block the 1666 * by the kernel IRQ disable then you need only block the
1679 * sync IRQ for the RT_LOCK area. 1667 * sync IRQ for the RT_LOCK area.
1680 * 1668 *
1681 */ 1669 */
1682 ct=c->count; 1670 ct=c->count;
1683 1671
1684 c->skb = c->skb2; 1672 c->skb = c->skb2;
1685 c->count = 0; 1673 c->count = 0;
1686 c->max = c->mtu; 1674 c->max = c->mtu;
1687 if(c->skb) 1675 if (c->skb) {
1688 {
1689 c->dptr = c->skb->data; 1676 c->dptr = c->skb->data;
1690 c->max = c->mtu; 1677 c->max = c->mtu;
1691 } 1678 } else {
1692 else 1679 c->count = 0;
1693 {
1694 c->count= 0;
1695 c->max = 0; 1680 c->max = 0;
1696 } 1681 }
1697 RT_UNLOCK; 1682 RT_UNLOCK;
1698 1683
1699 c->skb2 = dev_alloc_skb(c->mtu); 1684 c->skb2 = dev_alloc_skb(c->mtu);
1700 if(c->skb2==NULL) 1685 if (c->skb2 == NULL)
1701 printk(KERN_WARNING "%s: memory squeeze.\n", 1686 printk(KERN_WARNING "%s: memory squeeze.\n",
1702 c->netdevice->name); 1687 c->netdevice->name);
1703 else 1688 else
1704 { 1689 skb_put(c->skb2, c->mtu);
1705 skb_put(c->skb2,c->mtu); 1690 c->netdevice->stats.rx_packets++;
1706 } 1691 c->netdevice->stats.rx_bytes += ct;
1707 c->stats.rx_packets++;
1708 c->stats.rx_bytes+=ct;
1709
1710 } 1692 }
1711 /* 1693 /*
1712 * If we received a frame we must now process it. 1694 * If we received a frame we must now process it.
1713 */ 1695 */
1714 if(skb) 1696 if (skb) {
1715 {
1716 skb_trim(skb, ct); 1697 skb_trim(skb, ct);
1717 c->rx_function(c,skb); 1698 c->rx_function(c, skb);
1718 } 1699 } else {
1719 else 1700 c->netdevice->stats.rx_dropped++;
1720 {
1721 c->stats.rx_dropped++;
1722 printk(KERN_ERR "%s: Lost a frame\n", c->netdevice->name); 1701 printk(KERN_ERR "%s: Lost a frame\n", c->netdevice->name);
1723 } 1702 }
1724} 1703}
@@ -1730,7 +1709,7 @@ static void z8530_rx_done(struct z8530_channel *c)
1730 * Returns true if the buffer cross a DMA boundary on a PC. The poor 1709 * Returns true if the buffer cross a DMA boundary on a PC. The poor
1731 * thing can only DMA within a 64K block not across the edges of it. 1710 * thing can only DMA within a 64K block not across the edges of it.
1732 */ 1711 */
1733 1712
1734static inline int spans_boundary(struct sk_buff *skb) 1713static inline int spans_boundary(struct sk_buff *skb)
1735{ 1714{
1736 unsigned long a=(unsigned long)skb->data; 1715 unsigned long a=(unsigned long)skb->data;
@@ -1799,24 +1778,6 @@ int z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb)
1799 1778
1800EXPORT_SYMBOL(z8530_queue_xmit); 1779EXPORT_SYMBOL(z8530_queue_xmit);
1801 1780
1802/**
1803 * z8530_get_stats - Get network statistics
1804 * @c: The channel to use
1805 *
1806 * Get the statistics block. We keep the statistics in software as
1807 * the chip doesn't do it for us.
1808 *
1809 * Locking is ignored here - we could lock for a copy but its
1810 * not likely to be that big an issue
1811 */
1812
1813struct net_device_stats *z8530_get_stats(struct z8530_channel *c)
1814{
1815 return &c->stats;
1816}
1817
1818EXPORT_SYMBOL(z8530_get_stats);
1819
1820/* 1781/*
1821 * Module support 1782 * Module support
1822 */ 1783 */
diff --git a/drivers/net/wan/z85230.h b/drivers/net/wan/z85230.h
index 158aea7b8eac..4f372396c512 100644
--- a/drivers/net/wan/z85230.h
+++ b/drivers/net/wan/z85230.h
@@ -325,7 +325,6 @@ struct z8530_channel
325 325
326 void *private; /* For our owner */ 326 void *private; /* For our owner */
327 struct net_device *netdevice; /* Network layer device */ 327 struct net_device *netdevice; /* Network layer device */
328 struct net_device_stats stats; /* Network layer statistics */
329 328
330 /* 329 /*
331 * Async features 330 * Async features
@@ -366,13 +365,13 @@ struct z8530_channel
366 unsigned char tx_active; /* character is being xmitted */ 365 unsigned char tx_active; /* character is being xmitted */
367 unsigned char tx_stopped; /* output is suspended */ 366 unsigned char tx_stopped; /* output is suspended */
368 367
369 spinlock_t *lock; /* Devicr lock */ 368 spinlock_t *lock; /* Device lock */
370}; 369};
371 370
372/* 371/*
373 * Each Z853x0 device. 372 * Each Z853x0 device.
374 */ 373 */
375 374
376struct z8530_dev 375struct z8530_dev
377{ 376{
378 char *name; /* Device instance name */ 377 char *name; /* Device instance name */
@@ -408,7 +407,6 @@ extern int z8530_sync_txdma_open(struct net_device *, struct z8530_channel *);
408extern int z8530_sync_txdma_close(struct net_device *, struct z8530_channel *); 407extern int z8530_sync_txdma_close(struct net_device *, struct z8530_channel *);
409extern int z8530_channel_load(struct z8530_channel *, u8 *); 408extern int z8530_channel_load(struct z8530_channel *, u8 *);
410extern int z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb); 409extern int z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb);
411extern struct net_device_stats *z8530_get_stats(struct z8530_channel *c);
412extern void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb); 410extern void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb);
413 411
414 412
diff --git a/drivers/net/wd.c b/drivers/net/wd.c
index fa14255282af..6f9aa1643743 100644
--- a/drivers/net/wd.c
+++ b/drivers/net/wd.c
@@ -337,7 +337,7 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
337#ifdef CONFIG_NET_POLL_CONTROLLER 337#ifdef CONFIG_NET_POLL_CONTROLLER
338 dev->poll_controller = ei_poll; 338 dev->poll_controller = ei_poll;
339#endif 339#endif
340 NS8390_init(dev, 0); 340 NS8390p_init(dev, 0);
341 341
342#if 1 342#if 1
343 /* Enable interrupt generation on softconfig cards -- M.U */ 343 /* Enable interrupt generation on softconfig cards -- M.U */
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 91fc2c765d90..9931b5ab59cd 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -649,6 +649,7 @@ config RTL8187
649 Trendnet TEW-424UB 649 Trendnet TEW-424UB
650 ASUS P5B Deluxe 650 ASUS P5B Deluxe
651 Toshiba Satellite Pro series of laptops 651 Toshiba Satellite Pro series of laptops
652 Asus Wireless Link
652 653
653 Thanks to Realtek for their support! 654 Thanks to Realtek for their support!
654 655
@@ -694,6 +695,7 @@ config MAC80211_HWSIM
694 695
695source "drivers/net/wireless/p54/Kconfig" 696source "drivers/net/wireless/p54/Kconfig"
696source "drivers/net/wireless/ath5k/Kconfig" 697source "drivers/net/wireless/ath5k/Kconfig"
698source "drivers/net/wireless/ath9k/Kconfig"
697source "drivers/net/wireless/iwlwifi/Kconfig" 699source "drivers/net/wireless/iwlwifi/Kconfig"
698source "drivers/net/wireless/hostap/Kconfig" 700source "drivers/net/wireless/hostap/Kconfig"
699source "drivers/net/wireless/b43/Kconfig" 701source "drivers/net/wireless/b43/Kconfig"
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 54a4f6f1db67..59aa89ec6e81 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -62,5 +62,6 @@ obj-$(CONFIG_RT2X00) += rt2x00/
62obj-$(CONFIG_P54_COMMON) += p54/ 62obj-$(CONFIG_P54_COMMON) += p54/
63 63
64obj-$(CONFIG_ATH5K) += ath5k/ 64obj-$(CONFIG_ATH5K) += ath5k/
65obj-$(CONFIG_ATH9K) += ath9k/
65 66
66obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o 67obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o
diff --git a/drivers/net/wireless/ath5k/ath5k.h b/drivers/net/wireless/ath5k/ath5k.h
index ba35c30d203c..9102eea3c8bf 100644
--- a/drivers/net/wireless/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath5k/ath5k.h
@@ -186,11 +186,13 @@ struct ath5k_srev_name {
186#define AR5K_SREV_RAD_2111 0x20 186#define AR5K_SREV_RAD_2111 0x20
187#define AR5K_SREV_RAD_5112 0x30 187#define AR5K_SREV_RAD_5112 0x30
188#define AR5K_SREV_RAD_5112A 0x35 188#define AR5K_SREV_RAD_5112A 0x35
189#define AR5K_SREV_RAD_5112B 0x36
189#define AR5K_SREV_RAD_2112 0x40 190#define AR5K_SREV_RAD_2112 0x40
190#define AR5K_SREV_RAD_2112A 0x45 191#define AR5K_SREV_RAD_2112A 0x45
191#define AR5K_SREV_RAD_SC0 0x56 /* Found on 2413/2414 */ 192#define AR5K_SREV_RAD_2112B 0x46
192#define AR5K_SREV_RAD_SC1 0x63 /* Found on 5413/5414 */ 193#define AR5K_SREV_RAD_SC0 0x50 /* Found on 2413/2414 */
193#define AR5K_SREV_RAD_SC2 0xa2 /* Found on 2424-5/5424 */ 194#define AR5K_SREV_RAD_SC1 0x60 /* Found on 5413/5414 */
195#define AR5K_SREV_RAD_SC2 0xa0 /* Found on 2424-5/5424 */
194#define AR5K_SREV_RAD_5133 0xc0 /* MIMO found on 5418 */ 196#define AR5K_SREV_RAD_5133 0xc0 /* MIMO found on 5418 */
195 197
196/* IEEE defs */ 198/* IEEE defs */
diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c
index d9769c527346..2028866f5995 100644
--- a/drivers/net/wireless/ath5k/base.c
+++ b/drivers/net/wireless/ath5k/base.c
@@ -43,7 +43,9 @@
43#include <linux/version.h> 43#include <linux/version.h>
44#include <linux/module.h> 44#include <linux/module.h>
45#include <linux/delay.h> 45#include <linux/delay.h>
46#include <linux/hardirq.h>
46#include <linux/if.h> 47#include <linux/if.h>
48#include <linux/io.h>
47#include <linux/netdevice.h> 49#include <linux/netdevice.h>
48#include <linux/cache.h> 50#include <linux/cache.h>
49#include <linux/pci.h> 51#include <linux/pci.h>
@@ -93,8 +95,6 @@ static struct pci_device_id ath5k_pci_id_table[] __devinitdata = {
93 { PCI_VDEVICE(ATHEROS, 0x001a), .driver_data = AR5K_AR5212 }, /* 2413 Griffin-lite */ 95 { PCI_VDEVICE(ATHEROS, 0x001a), .driver_data = AR5K_AR5212 }, /* 2413 Griffin-lite */
94 { PCI_VDEVICE(ATHEROS, 0x001b), .driver_data = AR5K_AR5212 }, /* 5413 Eagle */ 96 { PCI_VDEVICE(ATHEROS, 0x001b), .driver_data = AR5K_AR5212 }, /* 5413 Eagle */
95 { PCI_VDEVICE(ATHEROS, 0x001c), .driver_data = AR5K_AR5212 }, /* 5424 Condor (PCI-E)*/ 97 { PCI_VDEVICE(ATHEROS, 0x001c), .driver_data = AR5K_AR5212 }, /* 5424 Condor (PCI-E)*/
96 { PCI_VDEVICE(ATHEROS, 0x0023), .driver_data = AR5K_AR5212 }, /* 5416 */
97 { PCI_VDEVICE(ATHEROS, 0x0024), .driver_data = AR5K_AR5212 }, /* 5418 */
98 { 0 } 98 { 0 }
99}; 99};
100MODULE_DEVICE_TABLE(pci, ath5k_pci_id_table); 100MODULE_DEVICE_TABLE(pci, ath5k_pci_id_table);
@@ -471,9 +471,6 @@ ath5k_pci_probe(struct pci_dev *pdev,
471 /* Set private data */ 471 /* Set private data */
472 pci_set_drvdata(pdev, hw); 472 pci_set_drvdata(pdev, hw);
473 473
474 /* Enable msi for devices that support it */
475 pci_enable_msi(pdev);
476
477 /* Setup interrupt handler */ 474 /* Setup interrupt handler */
478 ret = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc); 475 ret = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc);
479 if (ret) { 476 if (ret) {
@@ -551,7 +548,6 @@ err_ah:
551err_irq: 548err_irq:
552 free_irq(pdev->irq, sc); 549 free_irq(pdev->irq, sc);
553err_free: 550err_free:
554 pci_disable_msi(pdev);
555 ieee80211_free_hw(hw); 551 ieee80211_free_hw(hw);
556err_map: 552err_map:
557 pci_iounmap(pdev, mem); 553 pci_iounmap(pdev, mem);
@@ -573,7 +569,6 @@ ath5k_pci_remove(struct pci_dev *pdev)
573 ath5k_detach(pdev, hw); 569 ath5k_detach(pdev, hw);
574 ath5k_hw_detach(sc->ah); 570 ath5k_hw_detach(sc->ah);
575 free_irq(pdev->irq, sc); 571 free_irq(pdev->irq, sc);
576 pci_disable_msi(pdev);
577 pci_iounmap(pdev, sc->iobase); 572 pci_iounmap(pdev, sc->iobase);
578 pci_release_region(pdev, 0); 573 pci_release_region(pdev, 0);
579 pci_disable_device(pdev); 574 pci_disable_device(pdev);
@@ -590,6 +585,9 @@ ath5k_pci_suspend(struct pci_dev *pdev, pm_message_t state)
590 ath5k_led_off(sc); 585 ath5k_led_off(sc);
591 586
592 ath5k_stop_hw(sc); 587 ath5k_stop_hw(sc);
588
589 free_irq(pdev->irq, sc);
590 pci_disable_msi(pdev);
593 pci_save_state(pdev); 591 pci_save_state(pdev);
594 pci_disable_device(pdev); 592 pci_disable_device(pdev);
595 pci_set_power_state(pdev, PCI_D3hot); 593 pci_set_power_state(pdev, PCI_D3hot);
@@ -605,15 +603,12 @@ ath5k_pci_resume(struct pci_dev *pdev)
605 struct ath5k_hw *ah = sc->ah; 603 struct ath5k_hw *ah = sc->ah;
606 int i, err; 604 int i, err;
607 605
608 err = pci_set_power_state(pdev, PCI_D0); 606 pci_restore_state(pdev);
609 if (err)
610 return err;
611 607
612 err = pci_enable_device(pdev); 608 err = pci_enable_device(pdev);
613 if (err) 609 if (err)
614 return err; 610 return err;
615 611
616 pci_restore_state(pdev);
617 /* 612 /*
618 * Suspend/Resume resets the PCI configuration space, so we have to 613 * Suspend/Resume resets the PCI configuration space, so we have to
619 * re-disable the RETRY_TIMEOUT register (0x41) to keep 614 * re-disable the RETRY_TIMEOUT register (0x41) to keep
@@ -621,7 +616,17 @@ ath5k_pci_resume(struct pci_dev *pdev)
621 */ 616 */
622 pci_write_config_byte(pdev, 0x41, 0); 617 pci_write_config_byte(pdev, 0x41, 0);
623 618
624 ath5k_init(sc); 619 pci_enable_msi(pdev);
620
621 err = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc);
622 if (err) {
623 ATH5K_ERR(sc, "request_irq failed\n");
624 goto err_msi;
625 }
626
627 err = ath5k_init(sc);
628 if (err)
629 goto err_irq;
625 ath5k_led_enable(sc); 630 ath5k_led_enable(sc);
626 631
627 /* 632 /*
@@ -635,6 +640,12 @@ ath5k_pci_resume(struct pci_dev *pdev)
635 ath5k_hw_reset_key(ah, i); 640 ath5k_hw_reset_key(ah, i);
636 641
637 return 0; 642 return 0;
643err_irq:
644 free_irq(pdev->irq, sc);
645err_msi:
646 pci_disable_msi(pdev);
647 pci_disable_device(pdev);
648 return err;
638} 649}
639#endif /* CONFIG_PM */ 650#endif /* CONFIG_PM */
640 651
@@ -1224,7 +1235,7 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
1224 1235
1225 pktlen = skb->len; 1236 pktlen = skb->len;
1226 1237
1227 if (!(info->flags & IEEE80211_TX_CTL_DO_NOT_ENCRYPT)) { 1238 if (info->control.hw_key) {
1228 keyidx = info->control.hw_key->hw_key_idx; 1239 keyidx = info->control.hw_key->hw_key_idx;
1229 pktlen += info->control.icv_len; 1240 pktlen += info->control.icv_len;
1230 } 1241 }
@@ -1249,6 +1260,7 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
1249 1260
1250 txq->link = &ds->ds_link; 1261 txq->link = &ds->ds_link;
1251 ath5k_hw_tx_start(ah, txq->qnum); 1262 ath5k_hw_tx_start(ah, txq->qnum);
1263 mmiowb();
1252 spin_unlock_bh(&txq->lock); 1264 spin_unlock_bh(&txq->lock);
1253 1265
1254 return 0; 1266 return 0;
@@ -1583,7 +1595,6 @@ ath5k_rx_stop(struct ath5k_softc *sc)
1583 ath5k_hw_stop_pcu_recv(ah); /* disable PCU */ 1595 ath5k_hw_stop_pcu_recv(ah); /* disable PCU */
1584 ath5k_hw_set_rx_filter(ah, 0); /* clear recv filter */ 1596 ath5k_hw_set_rx_filter(ah, 0); /* clear recv filter */
1585 ath5k_hw_stop_rx_dma(ah); /* disable DMA engine */ 1597 ath5k_hw_stop_rx_dma(ah); /* disable DMA engine */
1586 mdelay(3); /* 3ms is long enough for 1 frame */
1587 1598
1588 ath5k_debug_printrxbuffs(sc, ah); 1599 ath5k_debug_printrxbuffs(sc, ah);
1589 1600
@@ -1682,31 +1693,44 @@ ath5k_tasklet_rx(unsigned long data)
1682 struct ath5k_rx_status rs = {}; 1693 struct ath5k_rx_status rs = {};
1683 struct sk_buff *skb; 1694 struct sk_buff *skb;
1684 struct ath5k_softc *sc = (void *)data; 1695 struct ath5k_softc *sc = (void *)data;
1685 struct ath5k_buf *bf; 1696 struct ath5k_buf *bf, *bf_last;
1686 struct ath5k_desc *ds; 1697 struct ath5k_desc *ds;
1687 int ret; 1698 int ret;
1688 int hdrlen; 1699 int hdrlen;
1689 int pad; 1700 int pad;
1690 1701
1691 spin_lock(&sc->rxbuflock); 1702 spin_lock(&sc->rxbuflock);
1703 if (list_empty(&sc->rxbuf)) {
1704 ATH5K_WARN(sc, "empty rx buf pool\n");
1705 goto unlock;
1706 }
1707 bf_last = list_entry(sc->rxbuf.prev, struct ath5k_buf, list);
1692 do { 1708 do {
1693 rxs.flag = 0; 1709 rxs.flag = 0;
1694 1710
1695 if (unlikely(list_empty(&sc->rxbuf))) {
1696 ATH5K_WARN(sc, "empty rx buf pool\n");
1697 break;
1698 }
1699 bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list); 1711 bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list);
1700 BUG_ON(bf->skb == NULL); 1712 BUG_ON(bf->skb == NULL);
1701 skb = bf->skb; 1713 skb = bf->skb;
1702 ds = bf->desc; 1714 ds = bf->desc;
1703 1715
1704 /* TODO only one segment */ 1716 /*
1705 pci_dma_sync_single_for_cpu(sc->pdev, sc->desc_daddr, 1717 * last buffer must not be freed to ensure proper hardware
1706 sc->desc_len, PCI_DMA_FROMDEVICE); 1718 * function. When the hardware finishes also a packet next to
1707 1719 * it, we are sure, it doesn't use it anymore and we can go on.
1708 if (unlikely(ds->ds_link == bf->daddr)) /* this is the end */ 1720 */
1709 break; 1721 if (bf_last == bf)
1722 bf->flags |= 1;
1723 if (bf->flags) {
1724 struct ath5k_buf *bf_next = list_entry(bf->list.next,
1725 struct ath5k_buf, list);
1726 ret = sc->ah->ah_proc_rx_desc(sc->ah, bf_next->desc,
1727 &rs);
1728 if (ret)
1729 break;
1730 bf->flags &= ~1;
1731 /* skip the overwritten one (even status is martian) */
1732 goto next;
1733 }
1710 1734
1711 ret = sc->ah->ah_proc_rx_desc(sc->ah, ds, &rs); 1735 ret = sc->ah->ah_proc_rx_desc(sc->ah, ds, &rs);
1712 if (unlikely(ret == -EINPROGRESS)) 1736 if (unlikely(ret == -EINPROGRESS))
@@ -1752,8 +1776,6 @@ ath5k_tasklet_rx(unsigned long data)
1752 goto next; 1776 goto next;
1753 } 1777 }
1754accept: 1778accept:
1755 pci_dma_sync_single_for_cpu(sc->pdev, bf->skbaddr,
1756 rs.rs_datalen, PCI_DMA_FROMDEVICE);
1757 pci_unmap_single(sc->pdev, bf->skbaddr, sc->rxbufsize, 1779 pci_unmap_single(sc->pdev, bf->skbaddr, sc->rxbufsize,
1758 PCI_DMA_FROMDEVICE); 1780 PCI_DMA_FROMDEVICE);
1759 bf->skb = NULL; 1781 bf->skb = NULL;
@@ -1816,6 +1838,7 @@ accept:
1816next: 1838next:
1817 list_move_tail(&bf->list, &sc->rxbuf); 1839 list_move_tail(&bf->list, &sc->rxbuf);
1818 } while (ath5k_rxbuf_setup(sc, bf) == 0); 1840 } while (ath5k_rxbuf_setup(sc, bf) == 0);
1841unlock:
1819 spin_unlock(&sc->rxbuflock); 1842 spin_unlock(&sc->rxbuflock);
1820} 1843}
1821 1844
@@ -1840,9 +1863,6 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
1840 list_for_each_entry_safe(bf, bf0, &txq->q, list) { 1863 list_for_each_entry_safe(bf, bf0, &txq->q, list) {
1841 ds = bf->desc; 1864 ds = bf->desc;
1842 1865
1843 /* TODO only one segment */
1844 pci_dma_sync_single_for_cpu(sc->pdev, sc->desc_daddr,
1845 sc->desc_len, PCI_DMA_FROMDEVICE);
1846 ret = sc->ah->ah_proc_tx_desc(sc->ah, ds, &ts); 1866 ret = sc->ah->ah_proc_tx_desc(sc->ah, ds, &ts);
1847 if (unlikely(ret == -EINPROGRESS)) 1867 if (unlikely(ret == -EINPROGRESS))
1848 break; 1868 break;
@@ -2015,8 +2035,6 @@ ath5k_beacon_send(struct ath5k_softc *sc)
2015 ATH5K_WARN(sc, "beacon queue %u didn't stop?\n", sc->bhalq); 2035 ATH5K_WARN(sc, "beacon queue %u didn't stop?\n", sc->bhalq);
2016 /* NB: hw still stops DMA, so proceed */ 2036 /* NB: hw still stops DMA, so proceed */
2017 } 2037 }
2018 pci_dma_sync_single_for_cpu(sc->pdev, bf->skbaddr, bf->skb->len,
2019 PCI_DMA_TODEVICE);
2020 2038
2021 ath5k_hw_put_tx_buf(ah, sc->bhalq, bf->daddr); 2039 ath5k_hw_put_tx_buf(ah, sc->bhalq, bf->daddr);
2022 ath5k_hw_tx_start(ah, sc->bhalq); 2040 ath5k_hw_tx_start(ah, sc->bhalq);
@@ -2150,6 +2168,7 @@ ath5k_beacon_config(struct ath5k_softc *sc)
2150 2168
2151 ath5k_hw_set_intr(ah, 0); 2169 ath5k_hw_set_intr(ah, 0);
2152 sc->bmisscount = 0; 2170 sc->bmisscount = 0;
2171 sc->imask &= ~(AR5K_INT_BMISS | AR5K_INT_SWBA);
2153 2172
2154 if (sc->opmode == IEEE80211_IF_TYPE_STA) { 2173 if (sc->opmode == IEEE80211_IF_TYPE_STA) {
2155 sc->imask |= AR5K_INT_BMISS; 2174 sc->imask |= AR5K_INT_BMISS;
@@ -2240,6 +2259,7 @@ ath5k_init(struct ath5k_softc *sc)
2240 2259
2241 ret = 0; 2260 ret = 0;
2242done: 2261done:
2262 mmiowb();
2243 mutex_unlock(&sc->lock); 2263 mutex_unlock(&sc->lock);
2244 return ret; 2264 return ret;
2245} 2265}
@@ -2272,6 +2292,7 @@ ath5k_stop_locked(struct ath5k_softc *sc)
2272 if (!test_bit(ATH_STAT_INVALID, sc->status)) { 2292 if (!test_bit(ATH_STAT_INVALID, sc->status)) {
2273 ath5k_led_off(sc); 2293 ath5k_led_off(sc);
2274 ath5k_hw_set_intr(ah, 0); 2294 ath5k_hw_set_intr(ah, 0);
2295 synchronize_irq(sc->pdev->irq);
2275 } 2296 }
2276 ath5k_txq_cleanup(sc); 2297 ath5k_txq_cleanup(sc);
2277 if (!test_bit(ATH_STAT_INVALID, sc->status)) { 2298 if (!test_bit(ATH_STAT_INVALID, sc->status)) {
@@ -2321,9 +2342,13 @@ ath5k_stop_hw(struct ath5k_softc *sc)
2321 } 2342 }
2322 } 2343 }
2323 ath5k_txbuf_free(sc, sc->bbuf); 2344 ath5k_txbuf_free(sc, sc->bbuf);
2345 mmiowb();
2324 mutex_unlock(&sc->lock); 2346 mutex_unlock(&sc->lock);
2325 2347
2326 del_timer_sync(&sc->calib_tim); 2348 del_timer_sync(&sc->calib_tim);
2349 tasklet_kill(&sc->rxtq);
2350 tasklet_kill(&sc->txtq);
2351 tasklet_kill(&sc->restq);
2327 2352
2328 return ret; 2353 return ret;
2329} 2354}
@@ -2550,8 +2575,6 @@ ath5k_init_leds(struct ath5k_softc *sc)
2550 struct pci_dev *pdev = sc->pdev; 2575 struct pci_dev *pdev = sc->pdev;
2551 char name[ATH5K_LED_MAX_NAME_LEN + 1]; 2576 char name[ATH5K_LED_MAX_NAME_LEN + 1];
2552 2577
2553 sc->led_on = 0; /* active low */
2554
2555 /* 2578 /*
2556 * Auto-enable soft led processing for IBM cards and for 2579 * Auto-enable soft led processing for IBM cards and for
2557 * 5211 minipci cards. 2580 * 5211 minipci cards.
@@ -2560,11 +2583,13 @@ ath5k_init_leds(struct ath5k_softc *sc)
2560 pdev->device == PCI_DEVICE_ID_ATHEROS_AR5211) { 2583 pdev->device == PCI_DEVICE_ID_ATHEROS_AR5211) {
2561 __set_bit(ATH_STAT_LEDSOFT, sc->status); 2584 __set_bit(ATH_STAT_LEDSOFT, sc->status);
2562 sc->led_pin = 0; 2585 sc->led_pin = 0;
2586 sc->led_on = 0; /* active low */
2563 } 2587 }
2564 /* Enable softled on PIN1 on HP Compaq nc6xx, nc4000 & nx5000 laptops */ 2588 /* Enable softled on PIN1 on HP Compaq nc6xx, nc4000 & nx5000 laptops */
2565 if (pdev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ) { 2589 if (pdev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ) {
2566 __set_bit(ATH_STAT_LEDSOFT, sc->status); 2590 __set_bit(ATH_STAT_LEDSOFT, sc->status);
2567 sc->led_pin = 1; 2591 sc->led_pin = 1;
2592 sc->led_on = 1; /* active high */
2568 } 2593 }
2569 if (!test_bit(ATH_STAT_LEDSOFT, sc->status)) 2594 if (!test_bit(ATH_STAT_LEDSOFT, sc->status))
2570 goto out; 2595 goto out;
@@ -2783,6 +2808,7 @@ ath5k_config_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2783 /* XXX: assoc id is set to 0 for now, mac80211 doesn't have 2808 /* XXX: assoc id is set to 0 for now, mac80211 doesn't have
2784 * a clean way of letting us retrieve this yet. */ 2809 * a clean way of letting us retrieve this yet. */
2785 ath5k_hw_set_associd(ah, ah->ah_bssid, 0); 2810 ath5k_hw_set_associd(ah, ah->ah_bssid, 0);
2811 mmiowb();
2786 } 2812 }
2787 2813
2788 if (conf->changed & IEEE80211_IFCC_BEACON && 2814 if (conf->changed & IEEE80211_IFCC_BEACON &&
@@ -2971,6 +2997,7 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2971 } 2997 }
2972 2998
2973unlock: 2999unlock:
3000 mmiowb();
2974 mutex_unlock(&sc->lock); 3001 mutex_unlock(&sc->lock);
2975 return ret; 3002 return ret;
2976} 3003}
@@ -3032,8 +3059,6 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
3032 3059
3033 ath5k_debug_dump_skb(sc, skb, "BC ", 1); 3060 ath5k_debug_dump_skb(sc, skb, "BC ", 1);
3034 3061
3035 mutex_lock(&sc->lock);
3036
3037 if (sc->opmode != IEEE80211_IF_TYPE_IBSS) { 3062 if (sc->opmode != IEEE80211_IF_TYPE_IBSS) {
3038 ret = -EIO; 3063 ret = -EIO;
3039 goto end; 3064 goto end;
@@ -3044,11 +3069,12 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
3044 ret = ath5k_beacon_setup(sc, sc->bbuf); 3069 ret = ath5k_beacon_setup(sc, sc->bbuf);
3045 if (ret) 3070 if (ret)
3046 sc->bbuf->skb = NULL; 3071 sc->bbuf->skb = NULL;
3047 else 3072 else {
3048 ath5k_beacon_config(sc); 3073 ath5k_beacon_config(sc);
3074 mmiowb();
3075 }
3049 3076
3050end: 3077end:
3051 mutex_unlock(&sc->lock);
3052 return ret; 3078 return ret;
3053} 3079}
3054 3080
diff --git a/drivers/net/wireless/ath5k/base.h b/drivers/net/wireless/ath5k/base.h
index 47f414b09e67..d7e03e6b8271 100644
--- a/drivers/net/wireless/ath5k/base.h
+++ b/drivers/net/wireless/ath5k/base.h
@@ -56,7 +56,7 @@
56 56
57struct ath5k_buf { 57struct ath5k_buf {
58 struct list_head list; 58 struct list_head list;
59 unsigned int flags; /* tx descriptor flags */ 59 unsigned int flags; /* rx descriptor flags */
60 struct ath5k_desc *desc; /* virtual addr of desc */ 60 struct ath5k_desc *desc; /* virtual addr of desc */
61 dma_addr_t daddr; /* physical addr of desc */ 61 dma_addr_t daddr; /* physical addr of desc */
62 struct sk_buff *skb; /* skbuff for buf */ 62 struct sk_buff *skb; /* skbuff for buf */
diff --git a/drivers/net/wireless/ath5k/debug.c b/drivers/net/wireless/ath5k/debug.c
index 41d5fa34b544..6fa6c8e04ff0 100644
--- a/drivers/net/wireless/ath5k/debug.c
+++ b/drivers/net/wireless/ath5k/debug.c
@@ -129,7 +129,7 @@ static struct reg regs[] = {
129 REG_STRUCT_INIT(AR5K_CPC1), 129 REG_STRUCT_INIT(AR5K_CPC1),
130 REG_STRUCT_INIT(AR5K_CPC2), 130 REG_STRUCT_INIT(AR5K_CPC2),
131 REG_STRUCT_INIT(AR5K_CPC3), 131 REG_STRUCT_INIT(AR5K_CPC3),
132 REG_STRUCT_INIT(AR5K_CPCORN), 132 REG_STRUCT_INIT(AR5K_CPCOVF),
133 REG_STRUCT_INIT(AR5K_RESET_CTL), 133 REG_STRUCT_INIT(AR5K_RESET_CTL),
134 REG_STRUCT_INIT(AR5K_SLEEP_CTL), 134 REG_STRUCT_INIT(AR5K_SLEEP_CTL),
135 REG_STRUCT_INIT(AR5K_INTPEND), 135 REG_STRUCT_INIT(AR5K_INTPEND),
diff --git a/drivers/net/wireless/ath5k/debug.h b/drivers/net/wireless/ath5k/debug.h
index 2cf8d18b10e3..ffc529393306 100644
--- a/drivers/net/wireless/ath5k/debug.h
+++ b/drivers/net/wireless/ath5k/debug.h
@@ -63,7 +63,6 @@
63 63
64struct ath5k_softc; 64struct ath5k_softc;
65struct ath5k_hw; 65struct ath5k_hw;
66struct ieee80211_hw_mode;
67struct sk_buff; 66struct sk_buff;
68struct ath5k_buf; 67struct ath5k_buf;
69 68
diff --git a/drivers/net/wireless/ath5k/hw.c b/drivers/net/wireless/ath5k/hw.c
index c6d12c53bda4..ad1a5b422c8c 100644
--- a/drivers/net/wireless/ath5k/hw.c
+++ b/drivers/net/wireless/ath5k/hw.c
@@ -139,6 +139,8 @@ static int ath5k_hw_post(struct ath5k_hw *ah)
139 for (c = 0; c < 2; c++) { 139 for (c = 0; c < 2; c++) {
140 140
141 cur_reg = regs[c]; 141 cur_reg = regs[c];
142
143 /* Save previous value */
142 init_val = ath5k_hw_reg_read(ah, cur_reg); 144 init_val = ath5k_hw_reg_read(ah, cur_reg);
143 145
144 for (i = 0; i < 256; i++) { 146 for (i = 0; i < 256; i++) {
@@ -170,6 +172,10 @@ static int ath5k_hw_post(struct ath5k_hw *ah)
170 var_pattern = 0x003b080f; 172 var_pattern = 0x003b080f;
171 ath5k_hw_reg_write(ah, var_pattern, cur_reg); 173 ath5k_hw_reg_write(ah, var_pattern, cur_reg);
172 } 174 }
175
176 /* Restore previous value */
177 ath5k_hw_reg_write(ah, init_val, cur_reg);
178
173 } 179 }
174 180
175 return 0; 181 return 0;
@@ -287,67 +293,42 @@ struct ath5k_hw *ath5k_hw_attach(struct ath5k_softc *sc, u8 mac_version)
287 /* Identify the radio chip*/ 293 /* Identify the radio chip*/
288 if (ah->ah_version == AR5K_AR5210) { 294 if (ah->ah_version == AR5K_AR5210) {
289 ah->ah_radio = AR5K_RF5110; 295 ah->ah_radio = AR5K_RF5110;
296 /*
297 * Register returns 0x0/0x04 for radio revision
298 * so ath5k_hw_radio_revision doesn't parse the value
299 * correctly. For now we are based on mac's srev to
300 * identify RF2425 radio.
301 */
302 } else if (srev == AR5K_SREV_VER_AR2425) {
303 ah->ah_radio = AR5K_RF2425;
304 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF2425;
290 } else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_5112) { 305 } else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_5112) {
291 ah->ah_radio = AR5K_RF5111; 306 ah->ah_radio = AR5K_RF5111;
292 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5111; 307 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5111;
293 } else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_SC0) { 308 } else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_SC0) {
294
295 ah->ah_radio = AR5K_RF5112; 309 ah->ah_radio = AR5K_RF5112;
296 310 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5112;
297 if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_5112A) {
298 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5112;
299 } else {
300 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5112A;
301 }
302
303 } else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_SC1) { 311 } else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_SC1) {
304 ah->ah_radio = AR5K_RF2413; 312 ah->ah_radio = AR5K_RF2413;
305 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5112A; 313 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF2413;
306 } else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_SC2) { 314 } else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_SC2) {
307 ah->ah_radio = AR5K_RF5413; 315 ah->ah_radio = AR5K_RF5413;
308 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5112A; 316 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5413;
309 } else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_5133) { 317 } else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_5133) {
310
311 /* AR5424 */ 318 /* AR5424 */
312 if (srev >= AR5K_SREV_VER_AR5424) { 319 if (srev >= AR5K_SREV_VER_AR5424) {
313 ah->ah_radio = AR5K_RF5413; 320 ah->ah_radio = AR5K_RF5413;
314 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5424; 321 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5413;
315 /* AR2424 */ 322 /* AR2424 */
316 } else { 323 } else {
317 ah->ah_radio = AR5K_RF2413; /* For testing */ 324 ah->ah_radio = AR5K_RF2413; /* For testing */
318 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5112A; 325 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF2413;
319 } 326 }
320
321 /*
322 * Register returns 0x4 for radio revision
323 * so ath5k_hw_radio_revision doesn't parse the value
324 * correctly. For now we are based on mac's srev to
325 * identify RF2425 radio.
326 */
327 } else if (srev == AR5K_SREV_VER_AR2425) {
328 ah->ah_radio = AR5K_RF2425;
329 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5112;
330 } 327 }
331
332 ah->ah_phy = AR5K_PHY(0); 328 ah->ah_phy = AR5K_PHY(0);
333 329
334 /* 330 /*
335 * Identify AR5212-based PCI-E cards 331 * Write PCI-E power save settings
336 * And write some initial settings.
337 *
338 * (doing a "strings" on ndis driver
339 * -ar5211.sys- reveals the following
340 * pci-e related functions:
341 *
342 * pcieClockReq
343 * pcieRxErrNotify
344 * pcieL1SKPEnable
345 * pcieAspm
346 * pcieDisableAspmOnRfWake
347 * pciePowerSaveEnable
348 *
349 * I guess these point to ClockReq but
350 * i'm not sure.)
351 */ 332 */
352 if ((ah->ah_version == AR5K_AR5212) && (pdev->is_pcie)) { 333 if ((ah->ah_version == AR5K_AR5212) && (pdev->is_pcie)) {
353 ath5k_hw_reg_write(ah, 0x9248fc00, 0x4080); 334 ath5k_hw_reg_write(ah, 0x9248fc00, 0x4080);
@@ -369,10 +350,15 @@ struct ath5k_hw *ath5k_hw_attach(struct ath5k_softc *sc, u8 mac_version)
369 if (ret) 350 if (ret)
370 goto err_free; 351 goto err_free;
371 352
353 /* Write AR5K_PCICFG_UNK on 2112B and later chips */
354 if (ah->ah_radio_5ghz_revision > AR5K_SREV_RAD_2112B ||
355 srev > AR5K_SREV_VER_AR2413) {
356 ath5k_hw_reg_write(ah, AR5K_PCICFG_UNK, AR5K_PCICFG);
357 }
358
372 /* 359 /*
373 * Get card capabilities, values, ... 360 * Get card capabilities, values, ...
374 */ 361 */
375
376 ret = ath5k_eeprom_init(ah); 362 ret = ath5k_eeprom_init(ah);
377 if (ret) { 363 if (ret) {
378 ATH5K_ERR(sc, "unable to init EEPROM\n"); 364 ATH5K_ERR(sc, "unable to init EEPROM\n");
@@ -843,27 +829,41 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode,
843 * Write some more initial register settings 829 * Write some more initial register settings
844 */ 830 */
845 if (ah->ah_version == AR5K_AR5212) { 831 if (ah->ah_version == AR5K_AR5212) {
846 ath5k_hw_reg_write(ah, 0x0002a002, AR5K_PHY(11)); 832 ath5k_hw_reg_write(ah, 0x0002a002, 0x982c);
847 833
848 if (channel->hw_value == CHANNEL_G) 834 if (channel->hw_value == CHANNEL_G)
849 if (ah->ah_mac_srev < AR5K_SREV_VER_AR2413) 835 if (ah->ah_mac_srev < AR5K_SREV_VER_AR2413)
850 ath5k_hw_reg_write(ah, 0x00f80d80, 836 ath5k_hw_reg_write(ah, 0x00f80d80,
851 AR5K_PHY(83)); 837 0x994c);
852 else if (ah->ah_mac_srev < AR5K_SREV_VER_AR2424) 838 else if (ah->ah_mac_srev < AR5K_SREV_VER_AR2424)
853 ath5k_hw_reg_write(ah, 0x00380140, 839 ath5k_hw_reg_write(ah, 0x00380140,
854 AR5K_PHY(83)); 840 0x994c);
855 else if (ah->ah_mac_srev < AR5K_SREV_VER_AR2425) 841 else if (ah->ah_mac_srev < AR5K_SREV_VER_AR2425)
856 ath5k_hw_reg_write(ah, 0x00fc0ec0, 842 ath5k_hw_reg_write(ah, 0x00fc0ec0,
857 AR5K_PHY(83)); 843 0x994c);
858 else /* 2425 */ 844 else /* 2425 */
859 ath5k_hw_reg_write(ah, 0x00fc0fc0, 845 ath5k_hw_reg_write(ah, 0x00fc0fc0,
860 AR5K_PHY(83)); 846 0x994c);
861 else 847 else
862 ath5k_hw_reg_write(ah, 0x00000000, 848 ath5k_hw_reg_write(ah, 0x00000000, 0x994c);
863 AR5K_PHY(83)); 849
864 850 /* Some bits are disabled here, we know nothing about
865 ath5k_hw_reg_write(ah, 0x000009b5, 0xa228); 851 * register 0xa228 yet, most of the times this ends up
866 ath5k_hw_reg_write(ah, 0x0000000f, 0x8060); 852 * with a value 0x9b5 -haven't seen any dump with
853 * a different value- */
854 /* Got this from decompiling binary HAL */
855 data = ath5k_hw_reg_read(ah, 0xa228);
856 data &= 0xfffffdff;
857 ath5k_hw_reg_write(ah, data, 0xa228);
858
859 data = ath5k_hw_reg_read(ah, 0xa228);
860 data &= 0xfffe03ff;
861 ath5k_hw_reg_write(ah, data, 0xa228);
862 data = 0;
863
864 /* Just write 0x9b5 ? */
865 /* ath5k_hw_reg_write(ah, 0x000009b5, 0xa228); */
866 ath5k_hw_reg_write(ah, 0x0000000f, AR5K_SEQ_MASK);
867 ath5k_hw_reg_write(ah, 0x00000000, 0xa254); 867 ath5k_hw_reg_write(ah, 0x00000000, 0xa254);
868 ath5k_hw_reg_write(ah, 0x0000000e, AR5K_PHY_SCAL); 868 ath5k_hw_reg_write(ah, 0x0000000e, AR5K_PHY_SCAL);
869 } 869 }
@@ -879,6 +879,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode,
879 else 879 else
880 data = 0xffb80d20; 880 data = 0xffb80d20;
881 ath5k_hw_reg_write(ah, data, AR5K_PHY_FRAME_CTL); 881 ath5k_hw_reg_write(ah, data, AR5K_PHY_FRAME_CTL);
882 data = 0;
882 } 883 }
883 884
884 /* 885 /*
@@ -898,7 +899,6 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode,
898 899
899 /* 900 /*
900 * Write RF registers 901 * Write RF registers
901 * TODO:Does this work on 5211 (5111) ?
902 */ 902 */
903 ret = ath5k_hw_rfregs(ah, channel, mode); 903 ret = ath5k_hw_rfregs(ah, channel, mode);
904 if (ret) 904 if (ret)
@@ -935,7 +935,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode,
935 return ret; 935 return ret;
936 936
937 /* Set antenna mode */ 937 /* Set antenna mode */
938 AR5K_REG_MASKED_BITS(ah, AR5K_PHY(0x44), 938 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_ANT_CTL,
939 ah->ah_antenna[ee_mode][0], 0xfffffc06); 939 ah->ah_antenna[ee_mode][0], 0xfffffc06);
940 940
941 /* 941 /*
@@ -965,15 +965,15 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode,
965 965
966 ath5k_hw_reg_write(ah, 966 ath5k_hw_reg_write(ah,
967 AR5K_PHY_NF_SVAL(ee->ee_noise_floor_thr[ee_mode]), 967 AR5K_PHY_NF_SVAL(ee->ee_noise_floor_thr[ee_mode]),
968 AR5K_PHY(0x5a)); 968 AR5K_PHY_NFTHRES);
969 969
970 AR5K_REG_MASKED_BITS(ah, AR5K_PHY(0x11), 970 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_SETTLING,
971 (ee->ee_switch_settling[ee_mode] << 7) & 0x3f80, 971 (ee->ee_switch_settling[ee_mode] << 7) & 0x3f80,
972 0xffffc07f); 972 0xffffc07f);
973 AR5K_REG_MASKED_BITS(ah, AR5K_PHY(0x12), 973 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_GAIN,
974 (ee->ee_ant_tx_rx[ee_mode] << 12) & 0x3f000, 974 (ee->ee_ant_tx_rx[ee_mode] << 12) & 0x3f000,
975 0xfffc0fff); 975 0xfffc0fff);
976 AR5K_REG_MASKED_BITS(ah, AR5K_PHY(0x14), 976 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_DESIRED_SIZE,
977 (ee->ee_adc_desired_size[ee_mode] & 0x00ff) | 977 (ee->ee_adc_desired_size[ee_mode] & 0x00ff) |
978 ((ee->ee_pga_desired_size[ee_mode] << 8) & 0xff00), 978 ((ee->ee_pga_desired_size[ee_mode] << 8) & 0xff00),
979 0xffff0000); 979 0xffff0000);
@@ -982,13 +982,13 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode,
982 (ee->ee_tx_end2xpa_disable[ee_mode] << 24) | 982 (ee->ee_tx_end2xpa_disable[ee_mode] << 24) |
983 (ee->ee_tx_end2xpa_disable[ee_mode] << 16) | 983 (ee->ee_tx_end2xpa_disable[ee_mode] << 16) |
984 (ee->ee_tx_frm2xpa_enable[ee_mode] << 8) | 984 (ee->ee_tx_frm2xpa_enable[ee_mode] << 8) |
985 (ee->ee_tx_frm2xpa_enable[ee_mode]), AR5K_PHY(0x0d)); 985 (ee->ee_tx_frm2xpa_enable[ee_mode]), AR5K_PHY_RF_CTL4);
986 986
987 AR5K_REG_MASKED_BITS(ah, AR5K_PHY(0x0a), 987 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_RF_CTL3,
988 ee->ee_tx_end2xlna_enable[ee_mode] << 8, 0xffff00ff); 988 ee->ee_tx_end2xlna_enable[ee_mode] << 8, 0xffff00ff);
989 AR5K_REG_MASKED_BITS(ah, AR5K_PHY(0x19), 989 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_NF,
990 (ee->ee_thr_62[ee_mode] << 12) & 0x7f000, 0xfff80fff); 990 (ee->ee_thr_62[ee_mode] << 12) & 0x7f000, 0xfff80fff);
991 AR5K_REG_MASKED_BITS(ah, AR5K_PHY(0x49), 4, 0xffffff01); 991 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_OFDM_SELFCORR, 4, 0xffffff01);
992 992
993 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ, 993 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ,
994 AR5K_PHY_IQ_CORR_ENABLE | 994 AR5K_PHY_IQ_CORR_ENABLE |
@@ -1063,7 +1063,8 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode,
1063 ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT); 1063 ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT);
1064 1064
1065 /* 1065 /*
1066 * 5111/5112 Specific 1066 * On 5211+ read activation -> rx delay
1067 * and use it.
1067 */ 1068 */
1068 if (ah->ah_version != AR5K_AR5210) { 1069 if (ah->ah_version != AR5K_AR5210) {
1069 data = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) & 1070 data = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) &
@@ -1071,40 +1072,77 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode,
1071 data = (channel->hw_value & CHANNEL_CCK) ? 1072 data = (channel->hw_value & CHANNEL_CCK) ?
1072 ((data << 2) / 22) : (data / 10); 1073 ((data << 2) / 22) : (data / 10);
1073 1074
1074 udelay(100 + data); 1075 udelay(100 + (2 * data));
1076 data = 0;
1075 } else { 1077 } else {
1076 mdelay(1); 1078 mdelay(1);
1077 } 1079 }
1078 1080
1079 /* 1081 /*
1080 * Enable calibration and wait until completion 1082 * Perform ADC test (?)
1083 */
1084 data = ath5k_hw_reg_read(ah, AR5K_PHY_TST1);
1085 ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1);
1086 for (i = 0; i <= 20; i++) {
1087 if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10))
1088 break;
1089 udelay(200);
1090 }
1091 ath5k_hw_reg_write(ah, data, AR5K_PHY_TST1);
1092 data = 0;
1093
1094 /*
1095 * Start automatic gain calibration
1096 *
1097 * During AGC calibration RX path is re-routed to
1098 * a signal detector so we don't receive anything.
1099 *
1100 * This method is used to calibrate some static offsets
1101 * used together with on-the fly I/Q calibration (the
1102 * one performed via ath5k_hw_phy_calibrate), that doesn't
1103 * interrupt rx path.
1104 *
1105 * If we are in a noisy environment AGC calibration may time
1106 * out.
1081 */ 1107 */
1082 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL, 1108 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
1083 AR5K_PHY_AGCCTL_CAL); 1109 AR5K_PHY_AGCCTL_CAL);
1084 1110
1111 /* At the same time start I/Q calibration for QAM constellation
1112 * -no need for CCK- */
1113 ah->ah_calibration = false;
1114 if (!(mode == AR5K_MODE_11B)) {
1115 ah->ah_calibration = true;
1116 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ,
1117 AR5K_PHY_IQ_CAL_NUM_LOG_MAX, 15);
1118 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ,
1119 AR5K_PHY_IQ_RUN);
1120 }
1121
1122 /* Wait for gain calibration to finish (we check for I/Q calibration
1123 * during ath5k_phy_calibrate) */
1085 if (ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL, 1124 if (ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL,
1086 AR5K_PHY_AGCCTL_CAL, 0, false)) { 1125 AR5K_PHY_AGCCTL_CAL, 0, false)) {
1087 ATH5K_ERR(ah->ah_sc, "calibration timeout (%uMHz)\n", 1126 ATH5K_ERR(ah->ah_sc, "gain calibration timeout (%uMHz)\n",
1088 channel->center_freq); 1127 channel->center_freq);
1089 return -EAGAIN; 1128 return -EAGAIN;
1090 } 1129 }
1091 1130
1131 /*
1132 * Start noise floor calibration
1133 *
1134 * If we run NF calibration before AGC, it always times out.
1135 * Binary HAL starts NF and AGC calibration at the same time
1136 * and only waits for AGC to finish. I believe that's wrong because
1137 * during NF calibration, rx path is also routed to a detector, so if
1138 * it doesn't finish we won't have RX.
1139 *
1140 * XXX: Find an interval that's OK for all cards...
1141 */
1092 ret = ath5k_hw_noise_floor_calibration(ah, channel->center_freq); 1142 ret = ath5k_hw_noise_floor_calibration(ah, channel->center_freq);
1093 if (ret) 1143 if (ret)
1094 return ret; 1144 return ret;
1095 1145
1096 ah->ah_calibration = false;
1097
1098 /* A and G modes can use QAM modulation which requires enabling
1099 * I and Q calibration. Don't bother in B mode. */
1100 if (!(mode == AR5K_MODE_11B)) {
1101 ah->ah_calibration = true;
1102 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ,
1103 AR5K_PHY_IQ_CAL_NUM_LOG_MAX, 15);
1104 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ,
1105 AR5K_PHY_IQ_RUN);
1106 }
1107
1108 /* 1146 /*
1109 * Reset queues and start beacon timers at the end of the reset routine 1147 * Reset queues and start beacon timers at the end of the reset routine
1110 */ 1148 */
@@ -1154,6 +1192,12 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode,
1154 ath5k_hw_reg_write(ah, AR5K_PHY_SCLOCK_32MHZ, AR5K_PHY_SCLOCK); 1192 ath5k_hw_reg_write(ah, AR5K_PHY_SCLOCK_32MHZ, AR5K_PHY_SCLOCK);
1155 ath5k_hw_reg_write(ah, AR5K_PHY_SDELAY_32MHZ, AR5K_PHY_SDELAY); 1193 ath5k_hw_reg_write(ah, AR5K_PHY_SDELAY_32MHZ, AR5K_PHY_SDELAY);
1156 ath5k_hw_reg_write(ah, ah->ah_phy_spending, AR5K_PHY_SPENDING); 1194 ath5k_hw_reg_write(ah, ah->ah_phy_spending, AR5K_PHY_SPENDING);
1195
1196 data = ath5k_hw_reg_read(ah, AR5K_USEC_5211) & 0xffffc07f ;
1197 data |= (ah->ah_phy_spending == AR5K_PHY_SPENDING_18) ?
1198 0x00000f80 : 0x00001380 ;
1199 ath5k_hw_reg_write(ah, data, AR5K_USEC_5211);
1200 data = 0;
1157 } 1201 }
1158 1202
1159 if (ah->ah_version == AR5K_AR5212) { 1203 if (ah->ah_version == AR5K_AR5212) {
@@ -1226,7 +1270,7 @@ int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
1226 bool set_chip, u16 sleep_duration) 1270 bool set_chip, u16 sleep_duration)
1227{ 1271{
1228 unsigned int i; 1272 unsigned int i;
1229 u32 staid; 1273 u32 staid, data;
1230 1274
1231 ATH5K_TRACE(ah->ah_sc); 1275 ATH5K_TRACE(ah->ah_sc);
1232 staid = ath5k_hw_reg_read(ah, AR5K_STA_ID1); 1276 staid = ath5k_hw_reg_read(ah, AR5K_STA_ID1);
@@ -1238,7 +1282,8 @@ int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
1238 case AR5K_PM_NETWORK_SLEEP: 1282 case AR5K_PM_NETWORK_SLEEP:
1239 if (set_chip) 1283 if (set_chip)
1240 ath5k_hw_reg_write(ah, 1284 ath5k_hw_reg_write(ah,
1241 AR5K_SLEEP_CTL_SLE | sleep_duration, 1285 AR5K_SLEEP_CTL_SLE_ALLOW |
1286 sleep_duration,
1242 AR5K_SLEEP_CTL); 1287 AR5K_SLEEP_CTL);
1243 1288
1244 staid |= AR5K_STA_ID1_PWR_SV; 1289 staid |= AR5K_STA_ID1_PWR_SV;
@@ -1253,13 +1298,24 @@ int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
1253 break; 1298 break;
1254 1299
1255 case AR5K_PM_AWAKE: 1300 case AR5K_PM_AWAKE:
1301
1302 staid &= ~AR5K_STA_ID1_PWR_SV;
1303
1256 if (!set_chip) 1304 if (!set_chip)
1257 goto commit; 1305 goto commit;
1258 1306
1259 ath5k_hw_reg_write(ah, AR5K_SLEEP_CTL_SLE_WAKE, 1307 /* Preserve sleep duration */
1260 AR5K_SLEEP_CTL); 1308 data = ath5k_hw_reg_read(ah, AR5K_SLEEP_CTL);
1309 if( data & 0xffc00000 ){
1310 data = 0;
1311 } else {
1312 data = data & 0xfffcffff;
1313 }
1261 1314
1262 for (i = 5000; i > 0; i--) { 1315 ath5k_hw_reg_write(ah, data, AR5K_SLEEP_CTL);
1316 udelay(15);
1317
1318 for (i = 50; i > 0; i--) {
1263 /* Check if the chip did wake up */ 1319 /* Check if the chip did wake up */
1264 if ((ath5k_hw_reg_read(ah, AR5K_PCICFG) & 1320 if ((ath5k_hw_reg_read(ah, AR5K_PCICFG) &
1265 AR5K_PCICFG_SPWR_DN) == 0) 1321 AR5K_PCICFG_SPWR_DN) == 0)
@@ -1267,15 +1323,13 @@ int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
1267 1323
1268 /* Wait a bit and retry */ 1324 /* Wait a bit and retry */
1269 udelay(200); 1325 udelay(200);
1270 ath5k_hw_reg_write(ah, AR5K_SLEEP_CTL_SLE_WAKE, 1326 ath5k_hw_reg_write(ah, data, AR5K_SLEEP_CTL);
1271 AR5K_SLEEP_CTL);
1272 } 1327 }
1273 1328
1274 /* Fail if the chip didn't wake up */ 1329 /* Fail if the chip didn't wake up */
1275 if (i <= 0) 1330 if (i <= 0)
1276 return -EIO; 1331 return -EIO;
1277 1332
1278 staid &= ~AR5K_STA_ID1_PWR_SV;
1279 break; 1333 break;
1280 1334
1281 default: 1335 default:
@@ -1304,6 +1358,7 @@ void ath5k_hw_start_rx(struct ath5k_hw *ah)
1304{ 1358{
1305 ATH5K_TRACE(ah->ah_sc); 1359 ATH5K_TRACE(ah->ah_sc);
1306 ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR); 1360 ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR);
1361 ath5k_hw_reg_read(ah, AR5K_CR);
1307} 1362}
1308 1363
1309/* 1364/*
@@ -1390,6 +1445,7 @@ int ath5k_hw_tx_start(struct ath5k_hw *ah, unsigned int queue)
1390 } 1445 }
1391 /* Start queue */ 1446 /* Start queue */
1392 ath5k_hw_reg_write(ah, tx_queue, AR5K_CR); 1447 ath5k_hw_reg_write(ah, tx_queue, AR5K_CR);
1448 ath5k_hw_reg_read(ah, AR5K_CR);
1393 } else { 1449 } else {
1394 /* Return if queue is disabled */ 1450 /* Return if queue is disabled */
1395 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXD, queue)) 1451 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXD, queue))
@@ -1440,6 +1496,7 @@ int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
1440 1496
1441 /* Stop queue */ 1497 /* Stop queue */
1442 ath5k_hw_reg_write(ah, tx_queue, AR5K_CR); 1498 ath5k_hw_reg_write(ah, tx_queue, AR5K_CR);
1499 ath5k_hw_reg_read(ah, AR5K_CR);
1443 } else { 1500 } else {
1444 /* 1501 /*
1445 * Schedule TX disable and wait until queue is empty 1502 * Schedule TX disable and wait until queue is empty
@@ -1456,6 +1513,8 @@ int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
1456 1513
1457 /* Clear register */ 1514 /* Clear register */
1458 ath5k_hw_reg_write(ah, 0, AR5K_QCU_TXD); 1515 ath5k_hw_reg_write(ah, 0, AR5K_QCU_TXD);
1516 if (pending)
1517 return -EBUSY;
1459 } 1518 }
1460 1519
1461 /* TODO: Check for success else return error */ 1520 /* TODO: Check for success else return error */
@@ -1684,6 +1743,7 @@ enum ath5k_int ath5k_hw_set_intr(struct ath5k_hw *ah, enum ath5k_int new_mask)
1684 * (they will be re-enabled afterwards). 1743 * (they will be re-enabled afterwards).
1685 */ 1744 */
1686 ath5k_hw_reg_write(ah, AR5K_IER_DISABLE, AR5K_IER); 1745 ath5k_hw_reg_write(ah, AR5K_IER_DISABLE, AR5K_IER);
1746 ath5k_hw_reg_read(ah, AR5K_IER);
1687 1747
1688 old_mask = ah->ah_imr; 1748 old_mask = ah->ah_imr;
1689 1749
@@ -1716,6 +1776,7 @@ enum ath5k_int ath5k_hw_set_intr(struct ath5k_hw *ah, enum ath5k_int new_mask)
1716 1776
1717 /* ..re-enable interrupts */ 1777 /* ..re-enable interrupts */
1718 ath5k_hw_reg_write(ah, AR5K_IER_ENABLE, AR5K_IER); 1778 ath5k_hw_reg_write(ah, AR5K_IER_ENABLE, AR5K_IER);
1779 ath5k_hw_reg_read(ah, AR5K_IER);
1719 1780
1720 return old_mask; 1781 return old_mask;
1721} 1782}
@@ -3359,11 +3420,13 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
3359 ath5k_hw_reg_write(ah, ah->ah_turbo ? 3420 ath5k_hw_reg_write(ah, ah->ah_turbo ?
3360 AR5K_INIT_PROTO_TIME_CNTRL_TURBO : 3421 AR5K_INIT_PROTO_TIME_CNTRL_TURBO :
3361 AR5K_INIT_PROTO_TIME_CNTRL, AR5K_IFS1); 3422 AR5K_INIT_PROTO_TIME_CNTRL, AR5K_IFS1);
3362 /* Set PHY register 0x9844 (??) */ 3423 /* Set AR5K_PHY_SETTLING */
3363 ath5k_hw_reg_write(ah, ah->ah_turbo ? 3424 ath5k_hw_reg_write(ah, ah->ah_turbo ?
3364 (ath5k_hw_reg_read(ah, AR5K_PHY(17)) & ~0x7F) | 0x38 : 3425 (ath5k_hw_reg_read(ah, AR5K_PHY_SETTLING) & ~0x7F)
3365 (ath5k_hw_reg_read(ah, AR5K_PHY(17)) & ~0x7F) | 0x1C, 3426 | 0x38 :
3366 AR5K_PHY(17)); 3427 (ath5k_hw_reg_read(ah, AR5K_PHY_SETTLING) & ~0x7F)
3428 | 0x1C,
3429 AR5K_PHY_SETTLING);
3367 /* Set Frame Control Register */ 3430 /* Set Frame Control Register */
3368 ath5k_hw_reg_write(ah, ah->ah_turbo ? 3431 ath5k_hw_reg_write(ah, ah->ah_turbo ?
3369 (AR5K_PHY_FRAME_CTL_INI | AR5K_PHY_TURBO_MODE | 3432 (AR5K_PHY_FRAME_CTL_INI | AR5K_PHY_TURBO_MODE |
@@ -3484,7 +3547,7 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
3484 if (tq->tqi_flags & AR5K_TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE) 3547 if (tq->tqi_flags & AR5K_TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)
3485 AR5K_REG_ENABLE_BITS(ah, 3548 AR5K_REG_ENABLE_BITS(ah,
3486 AR5K_QUEUE_MISC(queue), 3549 AR5K_QUEUE_MISC(queue),
3487 AR5K_QCU_MISC_TXE); 3550 AR5K_QCU_MISC_RDY_VEOL_POLICY);
3488 } 3551 }
3489 3552
3490 if (tq->tqi_flags & AR5K_TXQ_FLAG_BACKOFF_DISABLE) 3553 if (tq->tqi_flags & AR5K_TXQ_FLAG_BACKOFF_DISABLE)
diff --git a/drivers/net/wireless/ath5k/initvals.c b/drivers/net/wireless/ath5k/initvals.c
index 04c84e9da89d..2806b21bf90b 100644
--- a/drivers/net/wireless/ath5k/initvals.c
+++ b/drivers/net/wireless/ath5k/initvals.c
@@ -489,7 +489,7 @@ static const struct ath5k_ini ar5212_ini[] = {
489 { AR5K_QUEUE_TXDP(9), 0x00000000 }, 489 { AR5K_QUEUE_TXDP(9), 0x00000000 },
490 { AR5K_DCU_FP, 0x00000000 }, 490 { AR5K_DCU_FP, 0x00000000 },
491 { AR5K_DCU_TXP, 0x00000000 }, 491 { AR5K_DCU_TXP, 0x00000000 },
492 { AR5K_DCU_TX_FILTER, 0x00000000 }, 492 { AR5K_DCU_TX_FILTER_0_BASE, 0x00000000 },
493 /* Unknown table */ 493 /* Unknown table */
494 { 0x1078, 0x00000000 }, 494 { 0x1078, 0x00000000 },
495 { 0x10b8, 0x00000000 }, 495 { 0x10b8, 0x00000000 },
@@ -679,7 +679,7 @@ static const struct ath5k_ini ar5212_ini[] = {
679 { AR5K_PHY(645), 0x00106c10 }, 679 { AR5K_PHY(645), 0x00106c10 },
680 { AR5K_PHY(646), 0x009c4060 }, 680 { AR5K_PHY(646), 0x009c4060 },
681 { AR5K_PHY(647), 0x1483800a }, 681 { AR5K_PHY(647), 0x1483800a },
682 /* { AR5K_PHY(648), 0x018830c6 },*/ /* 2413 */ 682 /* { AR5K_PHY(648), 0x018830c6 },*/ /* 2413/2425 */
683 { AR5K_PHY(648), 0x01831061 }, 683 { AR5K_PHY(648), 0x01831061 },
684 { AR5K_PHY(649), 0x00000400 }, 684 { AR5K_PHY(649), 0x00000400 },
685 /*{ AR5K_PHY(650), 0x000001b5 },*/ 685 /*{ AR5K_PHY(650), 0x000001b5 },*/
diff --git a/drivers/net/wireless/ath5k/phy.c b/drivers/net/wireless/ath5k/phy.c
index afd8689e5c03..fa0d47faf574 100644
--- a/drivers/net/wireless/ath5k/phy.c
+++ b/drivers/net/wireless/ath5k/phy.c
@@ -1020,6 +1020,74 @@ static const struct ath5k_ini_rfgain rfgain_2413[] = {
1020 { AR5K_RF_GAIN(63), { 0x000000f9 } }, 1020 { AR5K_RF_GAIN(63), { 0x000000f9 } },
1021}; 1021};
1022 1022
1023/* Initial RF Gain settings for RF2425 */
1024static const struct ath5k_ini_rfgain rfgain_2425[] = {
1025 { AR5K_RF_GAIN(0), { 0x00000000 } },
1026 { AR5K_RF_GAIN(1), { 0x00000040 } },
1027 { AR5K_RF_GAIN(2), { 0x00000080 } },
1028 { AR5K_RF_GAIN(3), { 0x00000181 } },
1029 { AR5K_RF_GAIN(4), { 0x000001c1 } },
1030 { AR5K_RF_GAIN(5), { 0x00000001 } },
1031 { AR5K_RF_GAIN(6), { 0x00000041 } },
1032 { AR5K_RF_GAIN(7), { 0x00000081 } },
1033 { AR5K_RF_GAIN(8), { 0x00000188 } },
1034 { AR5K_RF_GAIN(9), { 0x000001c8 } },
1035 { AR5K_RF_GAIN(10), { 0x00000008 } },
1036 { AR5K_RF_GAIN(11), { 0x00000048 } },
1037 { AR5K_RF_GAIN(12), { 0x00000088 } },
1038 { AR5K_RF_GAIN(13), { 0x00000189 } },
1039 { AR5K_RF_GAIN(14), { 0x000001c9 } },
1040 { AR5K_RF_GAIN(15), { 0x00000009 } },
1041 { AR5K_RF_GAIN(16), { 0x00000049 } },
1042 { AR5K_RF_GAIN(17), { 0x00000089 } },
1043 { AR5K_RF_GAIN(18), { 0x000001b0 } },
1044 { AR5K_RF_GAIN(19), { 0x000001f0 } },
1045 { AR5K_RF_GAIN(20), { 0x00000030 } },
1046 { AR5K_RF_GAIN(21), { 0x00000070 } },
1047 { AR5K_RF_GAIN(22), { 0x00000171 } },
1048 { AR5K_RF_GAIN(23), { 0x000001b1 } },
1049 { AR5K_RF_GAIN(24), { 0x000001f1 } },
1050 { AR5K_RF_GAIN(25), { 0x00000031 } },
1051 { AR5K_RF_GAIN(26), { 0x00000071 } },
1052 { AR5K_RF_GAIN(27), { 0x000001b8 } },
1053 { AR5K_RF_GAIN(28), { 0x000001f8 } },
1054 { AR5K_RF_GAIN(29), { 0x00000038 } },
1055 { AR5K_RF_GAIN(30), { 0x00000078 } },
1056 { AR5K_RF_GAIN(31), { 0x000000b8 } },
1057 { AR5K_RF_GAIN(32), { 0x000001b9 } },
1058 { AR5K_RF_GAIN(33), { 0x000001f9 } },
1059 { AR5K_RF_GAIN(34), { 0x00000039 } },
1060 { AR5K_RF_GAIN(35), { 0x00000079 } },
1061 { AR5K_RF_GAIN(36), { 0x000000b9 } },
1062 { AR5K_RF_GAIN(37), { 0x000000f9 } },
1063 { AR5K_RF_GAIN(38), { 0x000000f9 } },
1064 { AR5K_RF_GAIN(39), { 0x000000f9 } },
1065 { AR5K_RF_GAIN(40), { 0x000000f9 } },
1066 { AR5K_RF_GAIN(41), { 0x000000f9 } },
1067 { AR5K_RF_GAIN(42), { 0x000000f9 } },
1068 { AR5K_RF_GAIN(43), { 0x000000f9 } },
1069 { AR5K_RF_GAIN(44), { 0x000000f9 } },
1070 { AR5K_RF_GAIN(45), { 0x000000f9 } },
1071 { AR5K_RF_GAIN(46), { 0x000000f9 } },
1072 { AR5K_RF_GAIN(47), { 0x000000f9 } },
1073 { AR5K_RF_GAIN(48), { 0x000000f9 } },
1074 { AR5K_RF_GAIN(49), { 0x000000f9 } },
1075 { AR5K_RF_GAIN(50), { 0x000000f9 } },
1076 { AR5K_RF_GAIN(51), { 0x000000f9 } },
1077 { AR5K_RF_GAIN(52), { 0x000000f9 } },
1078 { AR5K_RF_GAIN(53), { 0x000000f9 } },
1079 { AR5K_RF_GAIN(54), { 0x000000f9 } },
1080 { AR5K_RF_GAIN(55), { 0x000000f9 } },
1081 { AR5K_RF_GAIN(56), { 0x000000f9 } },
1082 { AR5K_RF_GAIN(57), { 0x000000f9 } },
1083 { AR5K_RF_GAIN(58), { 0x000000f9 } },
1084 { AR5K_RF_GAIN(59), { 0x000000f9 } },
1085 { AR5K_RF_GAIN(60), { 0x000000f9 } },
1086 { AR5K_RF_GAIN(61), { 0x000000f9 } },
1087 { AR5K_RF_GAIN(62), { 0x000000f9 } },
1088 { AR5K_RF_GAIN(63), { 0x000000f9 } },
1089};
1090
1023static const struct ath5k_gain_opt rfgain_opt_5112 = { 1091static const struct ath5k_gain_opt rfgain_opt_5112 = {
1024 1, 1092 1,
1025 8, 1093 8,
@@ -1588,8 +1656,8 @@ int ath5k_hw_rfgain(struct ath5k_hw *ah, unsigned int freq)
1588 freq = 0; /* only 2Ghz */ 1656 freq = 0; /* only 2Ghz */
1589 break; 1657 break;
1590 case AR5K_RF2425: 1658 case AR5K_RF2425:
1591 ath5k_rfg = rfgain_2413; 1659 ath5k_rfg = rfgain_2425;
1592 size = ARRAY_SIZE(rfgain_2413); 1660 size = ARRAY_SIZE(rfgain_2425);
1593 freq = 0; /* only 2Ghz */ 1661 freq = 0; /* only 2Ghz */
1594 break; 1662 break;
1595 default: 1663 default:
@@ -1830,9 +1898,6 @@ static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
1830 data = data0 = data1 = data2 = 0; 1898 data = data0 = data1 = data2 = 0;
1831 c = channel->center_freq; 1899 c = channel->center_freq;
1832 1900
1833 /*
1834 * Set the channel on the RF5112 or newer
1835 */
1836 if (c < 4800) { 1901 if (c < 4800) {
1837 if (!((c - 2224) % 5)) { 1902 if (!((c - 2224) % 5)) {
1838 data0 = ((2 * (c - 704)) - 3040) / 10; 1903 data0 = ((2 * (c - 704)) - 3040) / 10;
@@ -1844,7 +1909,7 @@ static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
1844 return -EINVAL; 1909 return -EINVAL;
1845 1910
1846 data0 = ath5k_hw_bitswap((data0 << 2) & 0xff, 8); 1911 data0 = ath5k_hw_bitswap((data0 << 2) & 0xff, 8);
1847 } else { 1912 } else if ((c - (c % 5)) != 2 || c > 5435) {
1848 if (!(c % 20) && c >= 5120) { 1913 if (!(c % 20) && c >= 5120) {
1849 data0 = ath5k_hw_bitswap(((c - 4800) / 20 << 2), 8); 1914 data0 = ath5k_hw_bitswap(((c - 4800) / 20 << 2), 8);
1850 data2 = ath5k_hw_bitswap(3, 2); 1915 data2 = ath5k_hw_bitswap(3, 2);
@@ -1856,6 +1921,9 @@ static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
1856 data2 = ath5k_hw_bitswap(1, 2); 1921 data2 = ath5k_hw_bitswap(1, 2);
1857 } else 1922 } else
1858 return -EINVAL; 1923 return -EINVAL;
1924 } else {
1925 data0 = ath5k_hw_bitswap((10 * (c - 2) - 4800) / 25 + 1, 8);
1926 data2 = ath5k_hw_bitswap(0, 2);
1859 } 1927 }
1860 1928
1861 data = (data0 << 4) | (data1 << 1) | (data2 << 2) | 0x1001; 1929 data = (data0 << 4) | (data1 << 1) | (data2 << 2) | 0x1001;
@@ -1867,6 +1935,45 @@ static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
1867} 1935}
1868 1936
1869/* 1937/*
1938 * Set the channel on the RF2425
1939 */
1940static int ath5k_hw_rf2425_channel(struct ath5k_hw *ah,
1941 struct ieee80211_channel *channel)
1942{
1943 u32 data, data0, data2;
1944 u16 c;
1945
1946 data = data0 = data2 = 0;
1947 c = channel->center_freq;
1948
1949 if (c < 4800) {
1950 data0 = ath5k_hw_bitswap((c - 2272), 8);
1951 data2 = 0;
1952 /* ? 5GHz ? */
1953 } else if ((c - (c % 5)) != 2 || c > 5435) {
1954 if (!(c % 20) && c < 5120)
1955 data0 = ath5k_hw_bitswap(((c - 4800) / 20 << 2), 8);
1956 else if (!(c % 10))
1957 data0 = ath5k_hw_bitswap(((c - 4800) / 10 << 1), 8);
1958 else if (!(c % 5))
1959 data0 = ath5k_hw_bitswap((c - 4800) / 5, 8);
1960 else
1961 return -EINVAL;
1962 data2 = ath5k_hw_bitswap(1, 2);
1963 } else {
1964 data0 = ath5k_hw_bitswap((10 * (c - 2) - 4800) / 25 + 1, 8);
1965 data2 = ath5k_hw_bitswap(0, 2);
1966 }
1967
1968 data = (data0 << 4) | data2 << 2 | 0x1001;
1969
1970 ath5k_hw_reg_write(ah, data & 0xff, AR5K_RF_BUFFER);
1971 ath5k_hw_reg_write(ah, (data >> 8) & 0x7f, AR5K_RF_BUFFER_CONTROL_5);
1972
1973 return 0;
1974}
1975
1976/*
1870 * Set a channel on the radio chip 1977 * Set a channel on the radio chip
1871 */ 1978 */
1872int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel) 1979int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel)
@@ -1895,6 +2002,9 @@ int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel)
1895 case AR5K_RF5111: 2002 case AR5K_RF5111:
1896 ret = ath5k_hw_rf5111_channel(ah, channel); 2003 ret = ath5k_hw_rf5111_channel(ah, channel);
1897 break; 2004 break;
2005 case AR5K_RF2425:
2006 ret = ath5k_hw_rf2425_channel(ah, channel);
2007 break;
1898 default: 2008 default:
1899 ret = ath5k_hw_rf5112_channel(ah, channel); 2009 ret = ath5k_hw_rf5112_channel(ah, channel);
1900 break; 2010 break;
@@ -1903,6 +2013,15 @@ int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel)
1903 if (ret) 2013 if (ret)
1904 return ret; 2014 return ret;
1905 2015
2016 /* Set JAPAN setting for channel 14 */
2017 if (channel->center_freq == 2484) {
2018 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_CCKTXCTL,
2019 AR5K_PHY_CCKTXCTL_JAPAN);
2020 } else {
2021 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_CCKTXCTL,
2022 AR5K_PHY_CCKTXCTL_WORLD);
2023 }
2024
1906 ah->ah_current_channel.center_freq = channel->center_freq; 2025 ah->ah_current_channel.center_freq = channel->center_freq;
1907 ah->ah_current_channel.hw_value = channel->hw_value; 2026 ah->ah_current_channel.hw_value = channel->hw_value;
1908 ah->ah_turbo = channel->hw_value == CHANNEL_T ? true : false; 2027 ah->ah_turbo = channel->hw_value == CHANNEL_T ? true : false;
@@ -1933,6 +2052,8 @@ int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel)
1933 * http://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO1&Sect2=HITOFF&d=PALL \ 2052 * http://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO1&Sect2=HITOFF&d=PALL \
1934 * &p=1&u=%2Fnetahtml%2FPTO%2Fsrchnum.htm&r=1&f=G&l=50&s1=7245893.PN.&OS=PN/7 2053 * &p=1&u=%2Fnetahtml%2FPTO%2Fsrchnum.htm&r=1&f=G&l=50&s1=7245893.PN.&OS=PN/7
1935 * 2054 *
2055 * XXX: Since during noise floor calibration antennas are detached according to
2056 * the patent, we should stop tx queues here.
1936 */ 2057 */
1937int 2058int
1938ath5k_hw_noise_floor_calibration(struct ath5k_hw *ah, short freq) 2059ath5k_hw_noise_floor_calibration(struct ath5k_hw *ah, short freq)
@@ -1942,7 +2063,7 @@ ath5k_hw_noise_floor_calibration(struct ath5k_hw *ah, short freq)
1942 s32 noise_floor; 2063 s32 noise_floor;
1943 2064
1944 /* 2065 /*
1945 * Enable noise floor calibration and wait until completion 2066 * Enable noise floor calibration
1946 */ 2067 */
1947 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL, 2068 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
1948 AR5K_PHY_AGCCTL_NF); 2069 AR5K_PHY_AGCCTL_NF);
@@ -1952,7 +2073,7 @@ ath5k_hw_noise_floor_calibration(struct ath5k_hw *ah, short freq)
1952 if (ret) { 2073 if (ret) {
1953 ATH5K_ERR(ah->ah_sc, 2074 ATH5K_ERR(ah->ah_sc,
1954 "noise floor calibration timeout (%uMHz)\n", freq); 2075 "noise floor calibration timeout (%uMHz)\n", freq);
1955 return ret; 2076 return -EAGAIN;
1956 } 2077 }
1957 2078
1958 /* Wait until the noise floor is calibrated and read the value */ 2079 /* Wait until the noise floor is calibrated and read the value */
@@ -1974,7 +2095,7 @@ ath5k_hw_noise_floor_calibration(struct ath5k_hw *ah, short freq)
1974 if (noise_floor > AR5K_TUNE_NOISE_FLOOR) { 2095 if (noise_floor > AR5K_TUNE_NOISE_FLOOR) {
1975 ATH5K_ERR(ah->ah_sc, 2096 ATH5K_ERR(ah->ah_sc,
1976 "noise floor calibration failed (%uMHz)\n", freq); 2097 "noise floor calibration failed (%uMHz)\n", freq);
1977 return -EIO; 2098 return -EAGAIN;
1978 } 2099 }
1979 2100
1980 ah->ah_noise_floor = noise_floor; 2101 ah->ah_noise_floor = noise_floor;
@@ -2087,38 +2208,66 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
2087} 2208}
2088 2209
2089/* 2210/*
2090 * Perform a PHY calibration on RF5111/5112 2211 * Perform a PHY calibration on RF5111/5112 and newer chips
2091 */ 2212 */
2092static int ath5k_hw_rf511x_calibrate(struct ath5k_hw *ah, 2213static int ath5k_hw_rf511x_calibrate(struct ath5k_hw *ah,
2093 struct ieee80211_channel *channel) 2214 struct ieee80211_channel *channel)
2094{ 2215{
2095 u32 i_pwr, q_pwr; 2216 u32 i_pwr, q_pwr;
2096 s32 iq_corr, i_coff, i_coffd, q_coff, q_coffd; 2217 s32 iq_corr, i_coff, i_coffd, q_coff, q_coffd;
2218 int i;
2097 ATH5K_TRACE(ah->ah_sc); 2219 ATH5K_TRACE(ah->ah_sc);
2098 2220
2099 if (!ah->ah_calibration || 2221 if (!ah->ah_calibration ||
2100 ath5k_hw_reg_read(ah, AR5K_PHY_IQ) & AR5K_PHY_IQ_RUN) 2222 ath5k_hw_reg_read(ah, AR5K_PHY_IQ) & AR5K_PHY_IQ_RUN)
2101 goto done; 2223 goto done;
2102 2224
2103 ah->ah_calibration = false; 2225 /* Calibration has finished, get the results and re-run */
2226 for (i = 0; i <= 10; i++) {
2227 iq_corr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_CORR);
2228 i_pwr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_PWR_I);
2229 q_pwr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_PWR_Q);
2230 }
2104 2231
2105 iq_corr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_CORR);
2106 i_pwr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_PWR_I);
2107 q_pwr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_PWR_Q);
2108 i_coffd = ((i_pwr >> 1) + (q_pwr >> 1)) >> 7; 2232 i_coffd = ((i_pwr >> 1) + (q_pwr >> 1)) >> 7;
2109 q_coffd = q_pwr >> 6; 2233 q_coffd = q_pwr >> 7;
2110 2234
2235 /* No correction */
2111 if (i_coffd == 0 || q_coffd == 0) 2236 if (i_coffd == 0 || q_coffd == 0)
2112 goto done; 2237 goto done;
2113 2238
2114 i_coff = ((-iq_corr) / i_coffd) & 0x3f; 2239 i_coff = ((-iq_corr) / i_coffd) & 0x3f;
2115 q_coff = (((s32)i_pwr / q_coffd) - 64) & 0x1f;
2116 2240
2117 /* Commit new IQ value */ 2241 /* Boundary check */
2242 if (i_coff > 31)
2243 i_coff = 31;
2244 if (i_coff < -32)
2245 i_coff = -32;
2246
2247 q_coff = (((s32)i_pwr / q_coffd) - 128) & 0x1f;
2248
2249 /* Boundary check */
2250 if (q_coff > 15)
2251 q_coff = 15;
2252 if (q_coff < -16)
2253 q_coff = -16;
2254
2255 /* Commit new I/Q value */
2118 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ, AR5K_PHY_IQ_CORR_ENABLE | 2256 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ, AR5K_PHY_IQ_CORR_ENABLE |
2119 ((u32)q_coff) | ((u32)i_coff << AR5K_PHY_IQ_CORR_Q_I_COFF_S)); 2257 ((u32)q_coff) | ((u32)i_coff << AR5K_PHY_IQ_CORR_Q_I_COFF_S));
2120 2258
2259 /* Re-enable calibration -if we don't we'll commit
2260 * the same values again and again */
2261 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ,
2262 AR5K_PHY_IQ_CAL_NUM_LOG_MAX, 15);
2263 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ, AR5K_PHY_IQ_RUN);
2264
2121done: 2265done:
2266
2267 /* TODO: Separate noise floor calibration from I/Q calibration
2268 * since noise floor calibration interrupts rx path while I/Q
2269 * calibration doesn't. We don't need to run noise floor calibration
2270 * as often as I/Q calibration.*/
2122 ath5k_hw_noise_floor_calibration(ah, channel->center_freq); 2271 ath5k_hw_noise_floor_calibration(ah, channel->center_freq);
2123 2272
2124 /* Request RF gain */ 2273 /* Request RF gain */
diff --git a/drivers/net/wireless/ath5k/reg.h b/drivers/net/wireless/ath5k/reg.h
index 30629b3e37c2..7562bf173d3e 100644
--- a/drivers/net/wireless/ath5k/reg.h
+++ b/drivers/net/wireless/ath5k/reg.h
@@ -53,7 +53,7 @@
53#define AR5K_CR_TXD0 0x00000008 /* TX Disable for queue 0 on 5210 */ 53#define AR5K_CR_TXD0 0x00000008 /* TX Disable for queue 0 on 5210 */
54#define AR5K_CR_TXD1 0x00000010 /* TX Disable for queue 1 on 5210 */ 54#define AR5K_CR_TXD1 0x00000010 /* TX Disable for queue 1 on 5210 */
55#define AR5K_CR_RXD 0x00000020 /* RX Disable */ 55#define AR5K_CR_RXD 0x00000020 /* RX Disable */
56#define AR5K_CR_SWI 0x00000040 56#define AR5K_CR_SWI 0x00000040 /* Software Interrupt */
57 57
58/* 58/*
59 * RX Descriptor Pointer register 59 * RX Descriptor Pointer register
@@ -65,19 +65,19 @@
65 */ 65 */
66#define AR5K_CFG 0x0014 /* Register Address */ 66#define AR5K_CFG 0x0014 /* Register Address */
67#define AR5K_CFG_SWTD 0x00000001 /* Byte-swap TX descriptor (for big endian archs) */ 67#define AR5K_CFG_SWTD 0x00000001 /* Byte-swap TX descriptor (for big endian archs) */
68#define AR5K_CFG_SWTB 0x00000002 /* Byte-swap TX buffer (?) */ 68#define AR5K_CFG_SWTB 0x00000002 /* Byte-swap TX buffer */
69#define AR5K_CFG_SWRD 0x00000004 /* Byte-swap RX descriptor */ 69#define AR5K_CFG_SWRD 0x00000004 /* Byte-swap RX descriptor */
70#define AR5K_CFG_SWRB 0x00000008 /* Byte-swap RX buffer (?) */ 70#define AR5K_CFG_SWRB 0x00000008 /* Byte-swap RX buffer */
71#define AR5K_CFG_SWRG 0x00000010 /* Byte-swap Register values (?) */ 71#define AR5K_CFG_SWRG 0x00000010 /* Byte-swap Register access */
72#define AR5K_CFG_ADHOC 0x00000020 /* [5211+] */ 72#define AR5K_CFG_ADHOC 0x00000020 /* AP/Adhoc indication [5211+] */
73#define AR5K_CFG_PHY_OK 0x00000100 /* [5211+] */ 73#define AR5K_CFG_PHY_OK 0x00000100 /* [5211+] */
74#define AR5K_CFG_EEBS 0x00000200 /* EEPROM is busy */ 74#define AR5K_CFG_EEBS 0x00000200 /* EEPROM is busy */
75#define AR5K_CFG_CLKGD 0x00000400 /* Clock gated (?) */ 75#define AR5K_CFG_CLKGD 0x00000400 /* Clock gated (Disable dynamic clock) */
76#define AR5K_CFG_TXCNT 0x00007800 /* Tx frame count (?) [5210] */ 76#define AR5K_CFG_TXCNT 0x00007800 /* Tx frame count (?) [5210] */
77#define AR5K_CFG_TXCNT_S 11 77#define AR5K_CFG_TXCNT_S 11
78#define AR5K_CFG_TXFSTAT 0x00008000 /* Tx frame status (?) [5210] */ 78#define AR5K_CFG_TXFSTAT 0x00008000 /* Tx frame status (?) [5210] */
79#define AR5K_CFG_TXFSTRT 0x00010000 /* [5210] */ 79#define AR5K_CFG_TXFSTRT 0x00010000 /* [5210] */
80#define AR5K_CFG_PCI_THRES 0x00060000 /* [5211+] */ 80#define AR5K_CFG_PCI_THRES 0x00060000 /* PCI Master req q threshold [5211+] */
81#define AR5K_CFG_PCI_THRES_S 17 81#define AR5K_CFG_PCI_THRES_S 17
82 82
83/* 83/*
@@ -162,35 +162,40 @@
162/* 162/*
163 * Transmit configuration register 163 * Transmit configuration register
164 */ 164 */
165#define AR5K_TXCFG 0x0030 /* Register Address */ 165#define AR5K_TXCFG 0x0030 /* Register Address */
166#define AR5K_TXCFG_SDMAMR 0x00000007 /* DMA size */ 166#define AR5K_TXCFG_SDMAMR 0x00000007 /* DMA size (read) */
167#define AR5K_TXCFG_SDMAMR_S 0 167#define AR5K_TXCFG_SDMAMR_S 0
168#define AR5K_TXCFG_B_MODE 0x00000008 /* Set b mode for 5111 (enable 2111) */ 168#define AR5K_TXCFG_B_MODE 0x00000008 /* Set b mode for 5111 (enable 2111) */
169#define AR5K_TXCFG_TXFSTP 0x00000008 /* TX DMA full Stop [5210] */ 169#define AR5K_TXCFG_TXFSTP 0x00000008 /* TX DMA full Stop [5210] */
170#define AR5K_TXCFG_TXFULL 0x000003f0 /* TX Triger level mask */ 170#define AR5K_TXCFG_TXFULL 0x000003f0 /* TX Triger level mask */
171#define AR5K_TXCFG_TXFULL_S 4 171#define AR5K_TXCFG_TXFULL_S 4
172#define AR5K_TXCFG_TXFULL_0B 0x00000000 172#define AR5K_TXCFG_TXFULL_0B 0x00000000
173#define AR5K_TXCFG_TXFULL_64B 0x00000010 173#define AR5K_TXCFG_TXFULL_64B 0x00000010
174#define AR5K_TXCFG_TXFULL_128B 0x00000020 174#define AR5K_TXCFG_TXFULL_128B 0x00000020
175#define AR5K_TXCFG_TXFULL_192B 0x00000030 175#define AR5K_TXCFG_TXFULL_192B 0x00000030
176#define AR5K_TXCFG_TXFULL_256B 0x00000040 176#define AR5K_TXCFG_TXFULL_256B 0x00000040
177#define AR5K_TXCFG_TXCONT_EN 0x00000080 177#define AR5K_TXCFG_TXCONT_EN 0x00000080
178#define AR5K_TXCFG_DMASIZE 0x00000100 /* Flag for passing DMA size [5210] */ 178#define AR5K_TXCFG_DMASIZE 0x00000100 /* Flag for passing DMA size [5210] */
179#define AR5K_TXCFG_JUMBO_TXE 0x00000400 /* Enable jumbo frames transmition (?) [5211+] */ 179#define AR5K_TXCFG_JUMBO_DESC_EN 0x00000400 /* Enable jumbo tx descriptors [5211+] */
180#define AR5K_TXCFG_RTSRND 0x00001000 /* [5211+] */ 180#define AR5K_TXCFG_ADHOC_BCN_ATIM 0x00000800 /* Adhoc Beacon ATIM Policy */
181#define AR5K_TXCFG_FRMPAD_DIS 0x00002000 /* [5211+] */ 181#define AR5K_TXCFG_ATIM_WINDOW_DEF_DIS 0x00001000 /* Disable ATIM window defer [5211+] */
182#define AR5K_TXCFG_RDY_DIS 0x00004000 /* [5211+] */ 182#define AR5K_TXCFG_RTSRND 0x00001000 /* [5211+] */
183#define AR5K_TXCFG_FRMPAD_DIS 0x00002000 /* [5211+] */
184#define AR5K_TXCFG_RDY_CBR_DIS 0x00004000 /* Ready time CBR disable [5211+] */
185#define AR5K_TXCFG_JUMBO_FRM_MODE 0x00008000 /* Jumbo frame mode [5211+] */
186#define AR5K_TXCFG_DCU_CACHING_DIS 0x00010000 /* Disable DCU caching */
183 187
184/* 188/*
185 * Receive configuration register 189 * Receive configuration register
186 */ 190 */
187#define AR5K_RXCFG 0x0034 /* Register Address */ 191#define AR5K_RXCFG 0x0034 /* Register Address */
188#define AR5K_RXCFG_SDMAMW 0x00000007 /* DMA size */ 192#define AR5K_RXCFG_SDMAMW 0x00000007 /* DMA size (write) */
189#define AR5K_RXCFG_SDMAMW_S 0 193#define AR5K_RXCFG_SDMAMW_S 0
190#define AR5K_RXCFG_DEF_ANTENNA 0x00000008 /* Default antenna */ 194#define AR5K_RXCFG_ZLFDMA 0x00000008 /* Enable Zero-length frame DMA */
191#define AR5K_RXCFG_ZLFDMA 0x00000010 /* Zero-length DMA */ 195#define AR5K_RXCFG_DEF_ANTENNA 0x00000010 /* Default antenna (?) */
192#define AR5K_RXCFG_JUMBO_RXE 0x00000020 /* Enable jumbo frames reception (?) [5211+] */ 196#define AR5K_RXCFG_JUMBO_RXE 0x00000020 /* Enable jumbo rx descriptors [5211+] */
193#define AR5K_RXCFG_JUMBO_WRAP 0x00000040 /* Wrap jumbo frames (?) [5211+] */ 197#define AR5K_RXCFG_JUMBO_WRAP 0x00000040 /* Wrap jumbo frames [5211+] */
198#define AR5K_RXCFG_SLE_ENTRY 0x00000080 /* Sleep entry policy */
194 199
195/* 200/*
196 * Receive jumbo descriptor last address register 201 * Receive jumbo descriptor last address register
@@ -202,35 +207,35 @@
202 * MIB control register 207 * MIB control register
203 */ 208 */
204#define AR5K_MIBC 0x0040 /* Register Address */ 209#define AR5K_MIBC 0x0040 /* Register Address */
205#define AR5K_MIBC_COW 0x00000001 210#define AR5K_MIBC_COW 0x00000001 /* Warn test indicator */
206#define AR5K_MIBC_FMC 0x00000002 /* Freeze Mib Counters (?) */ 211#define AR5K_MIBC_FMC 0x00000002 /* Freeze MIB Counters */
207#define AR5K_MIBC_CMC 0x00000004 /* Clean Mib Counters (?) */ 212#define AR5K_MIBC_CMC 0x00000004 /* Clean MIB Counters */
208#define AR5K_MIBC_MCS 0x00000008 213#define AR5K_MIBC_MCS 0x00000008 /* MIB counter strobe */
209 214
210/* 215/*
211 * Timeout prescale register 216 * Timeout prescale register
212 */ 217 */
213#define AR5K_TOPS 0x0044 218#define AR5K_TOPS 0x0044
214#define AR5K_TOPS_M 0x0000ffff /* [5211+] (?) */ 219#define AR5K_TOPS_M 0x0000ffff
215 220
216/* 221/*
217 * Receive timeout register (no frame received) 222 * Receive timeout register (no frame received)
218 */ 223 */
219#define AR5K_RXNOFRM 0x0048 224#define AR5K_RXNOFRM 0x0048
220#define AR5K_RXNOFRM_M 0x000003ff /* [5211+] (?) */ 225#define AR5K_RXNOFRM_M 0x000003ff
221 226
222/* 227/*
223 * Transmit timeout register (no frame sent) 228 * Transmit timeout register (no frame sent)
224 */ 229 */
225#define AR5K_TXNOFRM 0x004c 230#define AR5K_TXNOFRM 0x004c
226#define AR5K_TXNOFRM_M 0x000003ff /* [5211+] (?) */ 231#define AR5K_TXNOFRM_M 0x000003ff
227#define AR5K_TXNOFRM_QCU 0x000ffc00 /* [5211+] (?) */ 232#define AR5K_TXNOFRM_QCU 0x000ffc00
228 233
229/* 234/*
230 * Receive frame gap timeout register 235 * Receive frame gap timeout register
231 */ 236 */
232#define AR5K_RPGTO 0x0050 237#define AR5K_RPGTO 0x0050
233#define AR5K_RPGTO_M 0x000003ff /* [5211+] (?) */ 238#define AR5K_RPGTO_M 0x000003ff
234 239
235/* 240/*
236 * Receive frame count limit register 241 * Receive frame count limit register
@@ -241,6 +246,7 @@
241 246
242/* 247/*
243 * Misc settings register 248 * Misc settings register
249 * (reserved0-3)
244 */ 250 */
245#define AR5K_MISC 0x0058 /* Register Address */ 251#define AR5K_MISC 0x0058 /* Register Address */
246#define AR5K_MISC_DMA_OBS_M 0x000001e0 252#define AR5K_MISC_DMA_OBS_M 0x000001e0
@@ -256,6 +262,7 @@
256 262
257/* 263/*
258 * QCU/DCU clock gating register (5311) 264 * QCU/DCU clock gating register (5311)
265 * (reserved4-5)
259 */ 266 */
260#define AR5K_QCUDCU_CLKGT 0x005c /* Register Address (?) */ 267#define AR5K_QCUDCU_CLKGT 0x005c /* Register Address (?) */
261#define AR5K_QCUDCU_CLKGT_QCU 0x0000ffff /* Mask for QCU clock */ 268#define AR5K_QCUDCU_CLKGT_QCU 0x0000ffff /* Mask for QCU clock */
@@ -284,18 +291,18 @@
284#define AR5K_ISR_TXEOL 0x00000400 /* Empty TX descriptor */ 291#define AR5K_ISR_TXEOL 0x00000400 /* Empty TX descriptor */
285#define AR5K_ISR_TXURN 0x00000800 /* Transmit FIFO underrun */ 292#define AR5K_ISR_TXURN 0x00000800 /* Transmit FIFO underrun */
286#define AR5K_ISR_MIB 0x00001000 /* Update MIB counters */ 293#define AR5K_ISR_MIB 0x00001000 /* Update MIB counters */
287#define AR5K_ISR_SWI 0x00002000 /* Software interrupt (?) */ 294#define AR5K_ISR_SWI 0x00002000 /* Software interrupt */
288#define AR5K_ISR_RXPHY 0x00004000 /* PHY error */ 295#define AR5K_ISR_RXPHY 0x00004000 /* PHY error */
289#define AR5K_ISR_RXKCM 0x00008000 296#define AR5K_ISR_RXKCM 0x00008000 /* RX Key cache miss */
290#define AR5K_ISR_SWBA 0x00010000 /* Software beacon alert */ 297#define AR5K_ISR_SWBA 0x00010000 /* Software beacon alert */
291#define AR5K_ISR_BRSSI 0x00020000 298#define AR5K_ISR_BRSSI 0x00020000
292#define AR5K_ISR_BMISS 0x00040000 /* Beacon missed */ 299#define AR5K_ISR_BMISS 0x00040000 /* Beacon missed */
293#define AR5K_ISR_HIUERR 0x00080000 /* Host Interface Unit error [5211+] */ 300#define AR5K_ISR_HIUERR 0x00080000 /* Host Interface Unit error [5211+] */
294#define AR5K_ISR_BNR 0x00100000 /* Beacon not ready [5211+] */ 301#define AR5K_ISR_BNR 0x00100000 /* Beacon not ready [5211+] */
295#define AR5K_ISR_MCABT 0x00100000 /* [5210] */ 302#define AR5K_ISR_MCABT 0x00100000 /* Master Cycle Abort [5210] */
296#define AR5K_ISR_RXCHIRP 0x00200000 /* [5212+] */ 303#define AR5K_ISR_RXCHIRP 0x00200000 /* CHIRP Received [5212+] */
297#define AR5K_ISR_SSERR 0x00200000 /* [5210] */ 304#define AR5K_ISR_SSERR 0x00200000 /* Signaled System Error [5210] */
298#define AR5K_ISR_DPERR 0x00400000 /* [5210] */ 305#define AR5K_ISR_DPERR 0x00400000 /* Det par Error (?) [5210] */
299#define AR5K_ISR_TIM 0x00800000 /* [5210] */ 306#define AR5K_ISR_TIM 0x00800000 /* [5210] */
300#define AR5K_ISR_BCNMISC 0x00800000 /* [5212+] */ 307#define AR5K_ISR_BCNMISC 0x00800000 /* [5212+] */
301#define AR5K_ISR_GPIO 0x01000000 /* GPIO (rf kill)*/ 308#define AR5K_ISR_GPIO 0x01000000 /* GPIO (rf kill)*/
@@ -320,14 +327,14 @@
320 327
321#define AR5K_SISR2 0x008c /* Register Address [5211+] */ 328#define AR5K_SISR2 0x008c /* Register Address [5211+] */
322#define AR5K_SISR2_QCU_TXURN 0x000003ff /* Mask for QCU_TXURN */ 329#define AR5K_SISR2_QCU_TXURN 0x000003ff /* Mask for QCU_TXURN */
323#define AR5K_SISR2_MCABT 0x00100000 330#define AR5K_SISR2_MCABT 0x00100000 /* Master Cycle Abort */
324#define AR5K_SISR2_SSERR 0x00200000 331#define AR5K_SISR2_SSERR 0x00200000 /* Signaled System Error */
325#define AR5K_SISR2_DPERR 0x00400000 332#define AR5K_SISR2_DPERR 0x00400000 /* Det par Error (?) */
326#define AR5K_SISR2_TIM 0x01000000 /* [5212+] */ 333#define AR5K_SISR2_TIM 0x01000000 /* [5212+] */
327#define AR5K_SISR2_CAB_END 0x02000000 /* [5212+] */ 334#define AR5K_SISR2_CAB_END 0x02000000 /* [5212+] */
328#define AR5K_SISR2_DTIM_SYNC 0x04000000 /* [5212+] */ 335#define AR5K_SISR2_DTIM_SYNC 0x04000000 /* DTIM sync lost [5212+] */
329#define AR5K_SISR2_BCN_TIMEOUT 0x08000000 /* [5212+] */ 336#define AR5K_SISR2_BCN_TIMEOUT 0x08000000 /* Beacon Timeout [5212+] */
330#define AR5K_SISR2_CAB_TIMEOUT 0x10000000 /* [5212+] */ 337#define AR5K_SISR2_CAB_TIMEOUT 0x10000000 /* CAB Timeout [5212+] */
331#define AR5K_SISR2_DTIM 0x20000000 /* [5212+] */ 338#define AR5K_SISR2_DTIM 0x20000000 /* [5212+] */
332 339
333#define AR5K_SISR3 0x0090 /* Register Address [5211+] */ 340#define AR5K_SISR3 0x0090 /* Register Address [5211+] */
@@ -368,18 +375,18 @@
368#define AR5K_IMR_TXEOL 0x00000400 /* Empty TX descriptor*/ 375#define AR5K_IMR_TXEOL 0x00000400 /* Empty TX descriptor*/
369#define AR5K_IMR_TXURN 0x00000800 /* Transmit FIFO underrun*/ 376#define AR5K_IMR_TXURN 0x00000800 /* Transmit FIFO underrun*/
370#define AR5K_IMR_MIB 0x00001000 /* Update MIB counters*/ 377#define AR5K_IMR_MIB 0x00001000 /* Update MIB counters*/
371#define AR5K_IMR_SWI 0x00002000 378#define AR5K_IMR_SWI 0x00002000 /* Software interrupt */
372#define AR5K_IMR_RXPHY 0x00004000 /* PHY error*/ 379#define AR5K_IMR_RXPHY 0x00004000 /* PHY error*/
373#define AR5K_IMR_RXKCM 0x00008000 380#define AR5K_IMR_RXKCM 0x00008000 /* RX Key cache miss */
374#define AR5K_IMR_SWBA 0x00010000 /* Software beacon alert*/ 381#define AR5K_IMR_SWBA 0x00010000 /* Software beacon alert*/
375#define AR5K_IMR_BRSSI 0x00020000 382#define AR5K_IMR_BRSSI 0x00020000
376#define AR5K_IMR_BMISS 0x00040000 /* Beacon missed*/ 383#define AR5K_IMR_BMISS 0x00040000 /* Beacon missed*/
377#define AR5K_IMR_HIUERR 0x00080000 /* Host Interface Unit error [5211+] */ 384#define AR5K_IMR_HIUERR 0x00080000 /* Host Interface Unit error [5211+] */
378#define AR5K_IMR_BNR 0x00100000 /* Beacon not ready [5211+] */ 385#define AR5K_IMR_BNR 0x00100000 /* Beacon not ready [5211+] */
379#define AR5K_IMR_MCABT 0x00100000 /* [5210] */ 386#define AR5K_IMR_MCABT 0x00100000 /* Master Cycle Abort [5210] */
380#define AR5K_IMR_RXCHIRP 0x00200000 /* [5212+]*/ 387#define AR5K_IMR_RXCHIRP 0x00200000 /* CHIRP Received [5212+]*/
381#define AR5K_IMR_SSERR 0x00200000 /* [5210] */ 388#define AR5K_IMR_SSERR 0x00200000 /* Signaled System Error [5210] */
382#define AR5K_IMR_DPERR 0x00400000 /* [5210] */ 389#define AR5K_IMR_DPERR 0x00400000 /* Det par Error (?) [5210] */
383#define AR5K_IMR_TIM 0x00800000 /* [5211+] */ 390#define AR5K_IMR_TIM 0x00800000 /* [5211+] */
384#define AR5K_IMR_BCNMISC 0x00800000 /* [5212+] */ 391#define AR5K_IMR_BCNMISC 0x00800000 /* [5212+] */
385#define AR5K_IMR_GPIO 0x01000000 /* GPIO (rf kill)*/ 392#define AR5K_IMR_GPIO 0x01000000 /* GPIO (rf kill)*/
@@ -405,14 +412,14 @@
405#define AR5K_SIMR2 0x00ac /* Register Address [5211+] */ 412#define AR5K_SIMR2 0x00ac /* Register Address [5211+] */
406#define AR5K_SIMR2_QCU_TXURN 0x000003ff /* Mask for QCU_TXURN */ 413#define AR5K_SIMR2_QCU_TXURN 0x000003ff /* Mask for QCU_TXURN */
407#define AR5K_SIMR2_QCU_TXURN_S 0 414#define AR5K_SIMR2_QCU_TXURN_S 0
408#define AR5K_SIMR2_MCABT 0x00100000 415#define AR5K_SIMR2_MCABT 0x00100000 /* Master Cycle Abort */
409#define AR5K_SIMR2_SSERR 0x00200000 416#define AR5K_SIMR2_SSERR 0x00200000 /* Signaled System Error */
410#define AR5K_SIMR2_DPERR 0x00400000 417#define AR5K_SIMR2_DPERR 0x00400000 /* Det par Error (?) */
411#define AR5K_SIMR2_TIM 0x01000000 /* [5212+] */ 418#define AR5K_SIMR2_TIM 0x01000000 /* [5212+] */
412#define AR5K_SIMR2_CAB_END 0x02000000 /* [5212+] */ 419#define AR5K_SIMR2_CAB_END 0x02000000 /* [5212+] */
413#define AR5K_SIMR2_DTIM_SYNC 0x04000000 /* [5212+] */ 420#define AR5K_SIMR2_DTIM_SYNC 0x04000000 /* DTIM Sync lost [5212+] */
414#define AR5K_SIMR2_BCN_TIMEOUT 0x08000000 /* [5212+] */ 421#define AR5K_SIMR2_BCN_TIMEOUT 0x08000000 /* Beacon Timeout [5212+] */
415#define AR5K_SIMR2_CAB_TIMEOUT 0x10000000 /* [5212+] */ 422#define AR5K_SIMR2_CAB_TIMEOUT 0x10000000 /* CAB Timeout [5212+] */
416#define AR5K_SIMR2_DTIM 0x20000000 /* [5212+] */ 423#define AR5K_SIMR2_DTIM 0x20000000 /* [5212+] */
417 424
418#define AR5K_SIMR3 0x00b0 /* Register Address [5211+] */ 425#define AR5K_SIMR3 0x00b0 /* Register Address [5211+] */
@@ -425,23 +432,69 @@
425#define AR5K_SIMR4_QTRIG 0x000003ff /* Mask for QTRIG */ 432#define AR5K_SIMR4_QTRIG 0x000003ff /* Mask for QTRIG */
426#define AR5K_SIMR4_QTRIG_S 0 433#define AR5K_SIMR4_QTRIG_S 0
427 434
435/*
436 * DMA Debug registers 0-7
437 * 0xe0 - 0xfc
438 */
428 439
429/* 440/*
430 * Decompression mask registers [5212+] 441 * Decompression mask registers [5212+]
431 */ 442 */
432#define AR5K_DCM_ADDR 0x0400 /*Decompression mask address (?)*/ 443#define AR5K_DCM_ADDR 0x0400 /*Decompression mask address (index) */
433#define AR5K_DCM_DATA 0x0404 /*Decompression mask data (?)*/ 444#define AR5K_DCM_DATA 0x0404 /*Decompression mask data */
445
446/*
447 * Wake On Wireless pattern control register [5212+]
448 */
449#define AR5K_WOW_PCFG 0x0410 /* Register Address */
450#define AR5K_WOW_PCFG_PAT_MATCH_EN 0x00000001 /* Pattern match enable */
451#define AR5K_WOW_PCFG_LONG_FRAME_POL 0x00000002 /* Long frame policy */
452#define AR5K_WOW_PCFG_WOBMISS 0x00000004 /* Wake on bea(con) miss (?) */
453#define AR5K_WOW_PCFG_PAT_0_EN 0x00000100 /* Enable pattern 0 */
454#define AR5K_WOW_PCFG_PAT_1_EN 0x00000200 /* Enable pattern 1 */
455#define AR5K_WOW_PCFG_PAT_2_EN 0x00000400 /* Enable pattern 2 */
456#define AR5K_WOW_PCFG_PAT_3_EN 0x00000800 /* Enable pattern 3 */
457#define AR5K_WOW_PCFG_PAT_4_EN 0x00001000 /* Enable pattern 4 */
458#define AR5K_WOW_PCFG_PAT_5_EN 0x00002000 /* Enable pattern 5 */
459
460/*
461 * Wake On Wireless pattern index register (?) [5212+]
462 */
463#define AR5K_WOW_PAT_IDX 0x0414
464
465/*
466 * Wake On Wireless pattern data register [5212+]
467 */
468#define AR5K_WOW_PAT_DATA 0x0418 /* Register Address */
469#define AR5K_WOW_PAT_DATA_0_3_V 0x00000001 /* Pattern 0, 3 value */
470#define AR5K_WOW_PAT_DATA_1_4_V 0x00000100 /* Pattern 1, 4 value */
471#define AR5K_WOW_PAT_DATA_2_5_V 0x00010000 /* Pattern 2, 5 value */
472#define AR5K_WOW_PAT_DATA_0_3_M 0x01000000 /* Pattern 0, 3 mask */
473#define AR5K_WOW_PAT_DATA_1_4_M 0x04000000 /* Pattern 1, 4 mask */
474#define AR5K_WOW_PAT_DATA_2_5_M 0x10000000 /* Pattern 2, 5 mask */
434 475
435/* 476/*
436 * Decompression configuration registers [5212+] 477 * Decompression configuration registers [5212+]
437 */ 478 */
438#define AR5K_DCCFG 0x0420 479#define AR5K_DCCFG 0x0420 /* Register Address */
480#define AR5K_DCCFG_GLOBAL_EN 0x00000001 /* Enable decompression on all queues */
481#define AR5K_DCCFG_BYPASS_EN 0x00000002 /* Bypass decompression */
482#define AR5K_DCCFG_BCAST_EN 0x00000004 /* Enable decompression for bcast frames */
483#define AR5K_DCCFG_MCAST_EN 0x00000008 /* Enable decompression for mcast frames */
439 484
440/* 485/*
441 * Compression configuration registers [5212+] 486 * Compression configuration registers [5212+]
442 */ 487 */
443#define AR5K_CCFG 0x0600 488#define AR5K_CCFG 0x0600 /* Register Address */
444#define AR5K_CCFG_CUP 0x0604 489#define AR5K_CCFG_WINDOW_SIZE 0x00000007 /* Compression window size */
490#define AR5K_CCFG_CPC_EN 0x00000008 /* Enable performance counters */
491
492#define AR5K_CCFG_CCU 0x0604 /* Register Address */
493#define AR5K_CCFG_CCU_CUP_EN 0x00000001 /* CCU Catchup enable */
494#define AR5K_CCFG_CCU_CREDIT 0x00000002 /* CCU Credit (field) */
495#define AR5K_CCFG_CCU_CD_THRES 0x00000080 /* CCU Cyc(lic?) debt threshold (field) */
496#define AR5K_CCFG_CCU_CUP_LCNT 0x00010000 /* CCU Catchup lit(?) count */
497#define AR5K_CCFG_CCU_INIT 0x00100200 /* Initial value during reset */
445 498
446/* 499/*
447 * Compression performance counter registers [5212+] 500 * Compression performance counter registers [5212+]
@@ -450,7 +503,7 @@
450#define AR5K_CPC1 0x0614 /* Compression performance counter 1*/ 503#define AR5K_CPC1 0x0614 /* Compression performance counter 1*/
451#define AR5K_CPC2 0x0618 /* Compression performance counter 2 */ 504#define AR5K_CPC2 0x0618 /* Compression performance counter 2 */
452#define AR5K_CPC3 0x061c /* Compression performance counter 3 */ 505#define AR5K_CPC3 0x061c /* Compression performance counter 3 */
453#define AR5K_CPCORN 0x0620 /* Compression performance overrun (?) */ 506#define AR5K_CPCOVF 0x0620 /* Compression performance overflow */
454 507
455 508
456/* 509/*
@@ -466,8 +519,6 @@
466 * set/clear, which contain status for all queues (we shift by 1 for each 519 * set/clear, which contain status for all queues (we shift by 1 for each
467 * queue). To access these registers easily we define some macros here 520 * queue). To access these registers easily we define some macros here
468 * that are used inside HAL. For more infos check out *_tx_queue functs. 521 * that are used inside HAL. For more infos check out *_tx_queue functs.
469 *
470 * TODO: Boundary checking on macros (here?)
471 */ 522 */
472 523
473/* 524/*
@@ -513,7 +564,6 @@
513#define AR5K_QCU_RDYTIMECFG_BASE 0x0900 /* Register Address - Queue0 RDYTIMECFG */ 564#define AR5K_QCU_RDYTIMECFG_BASE 0x0900 /* Register Address - Queue0 RDYTIMECFG */
514#define AR5K_QCU_RDYTIMECFG_INTVAL 0x00ffffff /* Ready time interval mask */ 565#define AR5K_QCU_RDYTIMECFG_INTVAL 0x00ffffff /* Ready time interval mask */
515#define AR5K_QCU_RDYTIMECFG_INTVAL_S 0 566#define AR5K_QCU_RDYTIMECFG_INTVAL_S 0
516#define AR5K_QCU_RDYTIMECFG_DURATION 0x00ffffff /* Ready time duration mask */
517#define AR5K_QCU_RDYTIMECFG_ENABLE 0x01000000 /* Ready time enable mask */ 567#define AR5K_QCU_RDYTIMECFG_ENABLE 0x01000000 /* Ready time enable mask */
518#define AR5K_QUEUE_RDYTIMECFG(_q) AR5K_QUEUE_REG(AR5K_QCU_RDYTIMECFG_BASE, _q) 568#define AR5K_QUEUE_RDYTIMECFG(_q) AR5K_QUEUE_REG(AR5K_QCU_RDYTIMECFG_BASE, _q)
519 569
@@ -534,19 +584,20 @@
534 */ 584 */
535#define AR5K_QCU_MISC_BASE 0x09c0 /* Register Address -Queue0 MISC */ 585#define AR5K_QCU_MISC_BASE 0x09c0 /* Register Address -Queue0 MISC */
536#define AR5K_QCU_MISC_FRSHED_M 0x0000000f /* Frame sheduling mask */ 586#define AR5K_QCU_MISC_FRSHED_M 0x0000000f /* Frame sheduling mask */
537#define AR5K_QCU_MISC_FRSHED_ASAP 0 /* ASAP */ 587#define AR5K_QCU_MISC_FRSHED_ASAP 0 /* ASAP */
538#define AR5K_QCU_MISC_FRSHED_CBR 1 /* Constant Bit Rate */ 588#define AR5K_QCU_MISC_FRSHED_CBR 1 /* Constant Bit Rate */
539#define AR5K_QCU_MISC_FRSHED_DBA_GT 2 /* DMA Beacon alert gated (?) */ 589#define AR5K_QCU_MISC_FRSHED_DBA_GT 2 /* DMA Beacon alert gated (?) */
540#define AR5K_QCU_MISC_FRSHED_TIM_GT 3 /* Time gated (?) */ 590#define AR5K_QCU_MISC_FRSHED_TIM_GT 3 /* Time gated (?) */
541#define AR5K_QCU_MISC_FRSHED_BCN_SENT_GT 4 /* Beacon sent gated (?) */ 591#define AR5K_QCU_MISC_FRSHED_BCN_SENT_GT 4 /* Beacon sent gated (?) */
542#define AR5K_QCU_MISC_ONESHOT_ENABLE 0x00000010 /* Oneshot enable */ 592#define AR5K_QCU_MISC_ONESHOT_ENABLE 0x00000010 /* Oneshot enable */
543#define AR5K_QCU_MISC_CBREXP 0x00000020 /* CBR expired (normal queue) */ 593#define AR5K_QCU_MISC_CBREXP 0x00000020 /* CBR expired (normal queue) */
544#define AR5K_QCU_MISC_CBREXP_BCN 0x00000040 /* CBR expired (beacon queue) */ 594#define AR5K_QCU_MISC_CBREXP_BCN 0x00000040 /* CBR expired (beacon queue) */
545#define AR5K_QCU_MISC_BCN_ENABLE 0x00000080 /* Beacons enabled */ 595#define AR5K_QCU_MISC_BCN_ENABLE 0x00000080 /* Enable Beacon use */
546#define AR5K_QCU_MISC_CBR_THRES_ENABLE 0x00000100 /* CBR threshold enabled (?) */ 596#define AR5K_QCU_MISC_CBR_THRES_ENABLE 0x00000100 /* CBR threshold enabled */
547#define AR5K_QCU_MISC_TXE 0x00000200 /* TXE reset when RDYTIME enalbed (?) */ 597#define AR5K_QCU_MISC_RDY_VEOL_POLICY 0x00000200 /* TXE reset when RDYTIME enalbed */
548#define AR5K_QCU_MISC_CBR 0x00000400 /* CBR threshold reset (?) */ 598#define AR5K_QCU_MISC_CBR_RESET_CNT 0x00000400 /* CBR threshold (counter) reset */
549#define AR5K_QCU_MISC_DCU_EARLY 0x00000800 /* DCU reset (?) */ 599#define AR5K_QCU_MISC_DCU_EARLY 0x00000800 /* DCU early termination */
600#define AR5K_QCU_MISC_DCU_CMP_EN 0x00001000 /* Enable frame compression */
550#define AR5K_QUEUE_MISC(_q) AR5K_QUEUE_REG(AR5K_QCU_MISC_BASE, _q) 601#define AR5K_QUEUE_MISC(_q) AR5K_QUEUE_REG(AR5K_QCU_MISC_BASE, _q)
551 602
552 603
@@ -555,7 +606,7 @@
555 */ 606 */
556#define AR5K_QCU_STS_BASE 0x0a00 /* Register Address - Queue0 STS */ 607#define AR5K_QCU_STS_BASE 0x0a00 /* Register Address - Queue0 STS */
557#define AR5K_QCU_STS_FRMPENDCNT 0x00000003 /* Frames pending counter */ 608#define AR5K_QCU_STS_FRMPENDCNT 0x00000003 /* Frames pending counter */
558#define AR5K_QCU_STS_CBREXPCNT 0x0000ff00 /* CBR expired counter (?) */ 609#define AR5K_QCU_STS_CBREXPCNT 0x0000ff00 /* CBR expired counter */
559#define AR5K_QUEUE_STATUS(_q) AR5K_QUEUE_REG(AR5K_QCU_STS_BASE, _q) 610#define AR5K_QUEUE_STATUS(_q) AR5K_QUEUE_REG(AR5K_QCU_STS_BASE, _q)
560 611
561/* 612/*
@@ -569,9 +620,11 @@
569 */ 620 */
570#define AR5K_QCU_CBB_SELECT 0x0b00 621#define AR5K_QCU_CBB_SELECT 0x0b00
571#define AR5K_QCU_CBB_ADDR 0x0b04 622#define AR5K_QCU_CBB_ADDR 0x0b04
623#define AR5K_QCU_CBB_ADDR_S 9
572 624
573/* 625/*
574 * QCU compression buffer configuration register [5212+] 626 * QCU compression buffer configuration register [5212+]
627 * (buffer size)
575 */ 628 */
576#define AR5K_QCU_CBCFG 0x0b08 629#define AR5K_QCU_CBCFG 0x0b08
577 630
@@ -652,80 +705,100 @@
652 * No lockout means there is no special handling. 705 * No lockout means there is no special handling.
653 */ 706 */
654#define AR5K_DCU_MISC_BASE 0x1100 /* Register Address -Queue0 DCU_MISC */ 707#define AR5K_DCU_MISC_BASE 0x1100 /* Register Address -Queue0 DCU_MISC */
655#define AR5K_DCU_MISC_BACKOFF 0x000007ff /* Mask for backoff setting (?) */ 708#define AR5K_DCU_MISC_BACKOFF 0x000007ff /* Mask for backoff threshold */
656#define AR5K_DCU_MISC_BACKOFF_FRAG 0x00000200 /* Enable backoff while bursting */ 709#define AR5K_DCU_MISC_BACKOFF_FRAG 0x00000200 /* Enable backoff while bursting */
657#define AR5K_DCU_MISC_HCFPOLL_ENABLE 0x00000800 /* CF - Poll (?) */ 710#define AR5K_DCU_MISC_HCFPOLL_ENABLE 0x00000800 /* CF - Poll enable */
658#define AR5K_DCU_MISC_BACKOFF_PERSIST 0x00001000 /* Persistent backoff (?) */ 711#define AR5K_DCU_MISC_BACKOFF_PERSIST 0x00001000 /* Persistent backoff */
659#define AR5K_DCU_MISC_FRMPRFTCH_ENABLE 0x00002000 /* Enable frame pre-fetch (?) */ 712#define AR5K_DCU_MISC_FRMPRFTCH_ENABLE 0x00002000 /* Enable frame pre-fetch */
660#define AR5K_DCU_MISC_VIRTCOL 0x0000c000 /* Mask for Virtual Collision (?) */ 713#define AR5K_DCU_MISC_VIRTCOL 0x0000c000 /* Mask for Virtual Collision (?) */
661#define AR5K_DCU_MISC_VIRTCOL_NORMAL 0 714#define AR5K_DCU_MISC_VIRTCOL_NORMAL 0
662#define AR5K_DCU_MISC_VIRTCOL_MODIFIED 1 715#define AR5K_DCU_MISC_VIRTCOL_MODIFIED 1
663#define AR5K_DCU_MISC_VIRTCOL_IGNORE 2 716#define AR5K_DCU_MISC_VIRTCOL_IGNORE 2
664#define AR5K_DCU_MISC_BCN_ENABLE 0x00010000 /* Beacon enable (?) */ 717#define AR5K_DCU_MISC_BCN_ENABLE 0x00010000 /* Enable Beacon use */
665#define AR5K_DCU_MISC_ARBLOCK_CTL 0x00060000 /* Arbiter lockout control mask */ 718#define AR5K_DCU_MISC_ARBLOCK_CTL 0x00060000 /* Arbiter lockout control mask */
666#define AR5K_DCU_MISC_ARBLOCK_CTL_S 17 719#define AR5K_DCU_MISC_ARBLOCK_CTL_S 17
667#define AR5K_DCU_MISC_ARBLOCK_CTL_NONE 0 /* No arbiter lockout */ 720#define AR5K_DCU_MISC_ARBLOCK_CTL_NONE 0 /* No arbiter lockout */
668#define AR5K_DCU_MISC_ARBLOCK_CTL_INTFRM 1 /* Intra-frame lockout */ 721#define AR5K_DCU_MISC_ARBLOCK_CTL_INTFRM 1 /* Intra-frame lockout */
669#define AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL 2 /* Global lockout */ 722#define AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL 2 /* Global lockout */
670#define AR5K_DCU_MISC_ARBLOCK_IGNORE 0x00080000 723#define AR5K_DCU_MISC_ARBLOCK_IGNORE 0x00080000 /* Ignore Arbiter lockout */
671#define AR5K_DCU_MISC_SEQ_NUM_INCR_DIS 0x00100000 /* Disable sequence number increment (?) */ 724#define AR5K_DCU_MISC_SEQ_NUM_INCR_DIS 0x00100000 /* Disable sequence number increment */
672#define AR5K_DCU_MISC_POST_FR_BKOFF_DIS 0x00200000 /* Disable post-frame backoff (?) */ 725#define AR5K_DCU_MISC_POST_FR_BKOFF_DIS 0x00200000 /* Disable post-frame backoff */
673#define AR5K_DCU_MISC_VIRT_COLL_POLICY 0x00400000 /* Virtual Collision policy (?) */ 726#define AR5K_DCU_MISC_VIRT_COLL_POLICY 0x00400000 /* Virtual Collision cw policy */
674#define AR5K_DCU_MISC_BLOWN_IFS_POLICY 0x00800000 727#define AR5K_DCU_MISC_BLOWN_IFS_POLICY 0x00800000 /* Blown IFS policy (?) */
675#define AR5K_DCU_MISC_SEQNUM_CTL 0x01000000 /* Sequence number control (?) */ 728#define AR5K_DCU_MISC_SEQNUM_CTL 0x01000000 /* Sequence number control (?) */
676#define AR5K_QUEUE_DFS_MISC(_q) AR5K_QUEUE_REG(AR5K_DCU_MISC_BASE, _q) 729#define AR5K_QUEUE_DFS_MISC(_q) AR5K_QUEUE_REG(AR5K_DCU_MISC_BASE, _q)
677 730
678/* 731/*
679 * DCU frame sequence number registers 732 * DCU frame sequence number registers
680 */ 733 */
681#define AR5K_DCU_SEQNUM_BASE 0x1140 734#define AR5K_DCU_SEQNUM_BASE 0x1140
682#define AR5K_DCU_SEQNUM_M 0x00000fff 735#define AR5K_DCU_SEQNUM_M 0x00000fff
683#define AR5K_QUEUE_DFS_SEQNUM(_q) AR5K_QUEUE_REG(AR5K_DCU_SEQNUM_BASE, _q) 736#define AR5K_QUEUE_DFS_SEQNUM(_q) AR5K_QUEUE_REG(AR5K_DCU_SEQNUM_BASE, _q)
684 737
685/* 738/*
686 * DCU global IFS SIFS registers 739 * DCU global IFS SIFS register
687 */ 740 */
688#define AR5K_DCU_GBL_IFS_SIFS 0x1030 741#define AR5K_DCU_GBL_IFS_SIFS 0x1030
689#define AR5K_DCU_GBL_IFS_SIFS_M 0x0000ffff 742#define AR5K_DCU_GBL_IFS_SIFS_M 0x0000ffff
690 743
691/* 744/*
692 * DCU global IFS slot interval registers 745 * DCU global IFS slot interval register
693 */ 746 */
694#define AR5K_DCU_GBL_IFS_SLOT 0x1070 747#define AR5K_DCU_GBL_IFS_SLOT 0x1070
695#define AR5K_DCU_GBL_IFS_SLOT_M 0x0000ffff 748#define AR5K_DCU_GBL_IFS_SLOT_M 0x0000ffff
696 749
697/* 750/*
698 * DCU global IFS EIFS registers 751 * DCU global IFS EIFS register
699 */ 752 */
700#define AR5K_DCU_GBL_IFS_EIFS 0x10b0 753#define AR5K_DCU_GBL_IFS_EIFS 0x10b0
701#define AR5K_DCU_GBL_IFS_EIFS_M 0x0000ffff 754#define AR5K_DCU_GBL_IFS_EIFS_M 0x0000ffff
702 755
703/* 756/*
704 * DCU global IFS misc registers 757 * DCU global IFS misc register
758 *
759 * LFSR stands for Linear Feedback Shift Register
760 * and it's used for generating pseudo-random
761 * number sequences.
762 *
763 * (If i understand corectly, random numbers are
764 * used for idle sensing -multiplied with cwmin/max etc-)
705 */ 765 */
706#define AR5K_DCU_GBL_IFS_MISC 0x10f0 /* Register Address */ 766#define AR5K_DCU_GBL_IFS_MISC 0x10f0 /* Register Address */
707#define AR5K_DCU_GBL_IFS_MISC_LFSR_SLICE 0x00000007 767#define AR5K_DCU_GBL_IFS_MISC_LFSR_SLICE 0x00000007 /* LFSR Slice Select */
708#define AR5K_DCU_GBL_IFS_MISC_TURBO_MODE 0x00000008 /* Turbo mode (?) */ 768#define AR5K_DCU_GBL_IFS_MISC_TURBO_MODE 0x00000008 /* Turbo mode */
709#define AR5K_DCU_GBL_IFS_MISC_SIFS_DUR_USEC 0x000003f0 /* SIFS Duration mask (?) */ 769#define AR5K_DCU_GBL_IFS_MISC_SIFS_DUR_USEC 0x000003f0 /* SIFS Duration mask */
710#define AR5K_DCU_GBL_IFS_MISC_USEC_DUR 0x000ffc00 770#define AR5K_DCU_GBL_IFS_MISC_USEC_DUR 0x000ffc00 /* USEC Duration mask */
711#define AR5K_DCU_GBL_IFS_MISC_DCU_ARB_DELAY 0x00300000 771#define AR5K_DCU_GBL_IFS_MISC_DCU_ARB_DELAY 0x00300000 /* DCU Arbiter delay mask */
772#define AR5K_DCU_GBL_IFS_MISC_SIFS_CNT_RST 0x00400000 /* SIFC cnt reset policy (?) */
773#define AR5K_DCU_GBL_IFS_MISC_AIFS_CNT_RST 0x00800000 /* AIFS cnt reset policy (?) */
774#define AR5K_DCU_GBL_IFS_MISC_RND_LFSR_SL_DIS 0x01000000 /* Disable random LFSR slice */
712 775
713/* 776/*
714 * DCU frame prefetch control register 777 * DCU frame prefetch control register
715 */ 778 */
716#define AR5K_DCU_FP 0x1230 779#define AR5K_DCU_FP 0x1230 /* Register Address */
780#define AR5K_DCU_FP_NOBURST_DCU_EN 0x00000001 /* Enable non-burst prefetch on DCU (?) */
781#define AR5K_DCU_FP_NOBURST_EN 0x00000010 /* Enable non-burst prefetch (?) */
782#define AR5K_DCU_FP_BURST_DCU_EN 0x00000020 /* Enable burst prefetch on DCU (?) */
717 783
718/* 784/*
719 * DCU transmit pause control/status register 785 * DCU transmit pause control/status register
720 */ 786 */
721#define AR5K_DCU_TXP 0x1270 /* Register Address */ 787#define AR5K_DCU_TXP 0x1270 /* Register Address */
722#define AR5K_DCU_TXP_M 0x000003ff /* Tx pause mask (?) */ 788#define AR5K_DCU_TXP_M 0x000003ff /* Tx pause mask */
723#define AR5K_DCU_TXP_STATUS 0x00010000 /* Tx pause status (?) */ 789#define AR5K_DCU_TXP_STATUS 0x00010000 /* Tx pause status */
790
791/*
792 * DCU transmit filter table 0 (32 entries)
793 */
794#define AR5K_DCU_TX_FILTER_0_BASE 0x1038
795#define AR5K_DCU_TX_FILTER_0(_n) (AR5K_DCU_TX_FILTER_0_BASE + (_n * 64))
724 796
725/* 797/*
726 * DCU transmit filter register 798 * DCU transmit filter table 1 (16 entries)
727 */ 799 */
728#define AR5K_DCU_TX_FILTER 0x1038 800#define AR5K_DCU_TX_FILTER_1_BASE 0x103c
801#define AR5K_DCU_TX_FILTER_1(_n) (AR5K_DCU_TX_FILTER_1_BASE + ((_n - 32) * 64))
729 802
730/* 803/*
731 * DCU clear transmit filter register 804 * DCU clear transmit filter register
@@ -739,9 +812,6 @@
739 812
740/* 813/*
741 * Reset control register 814 * Reset control register
742 *
743 * 4 and 8 are not used in 5211/5212 and
744 * 2 means "baseband reset" on 5211/5212.
745 */ 815 */
746#define AR5K_RESET_CTL 0x4000 /* Register Address */ 816#define AR5K_RESET_CTL 0x4000 /* Register Address */
747#define AR5K_RESET_CTL_PCU 0x00000001 /* Protocol Control Unit reset */ 817#define AR5K_RESET_CTL_PCU 0x00000001 /* Protocol Control Unit reset */
@@ -765,6 +835,7 @@
765#define AR5K_SLEEP_CTL_SLE_SLP 0x00010000 /* Force chip sleep */ 835#define AR5K_SLEEP_CTL_SLE_SLP 0x00010000 /* Force chip sleep */
766#define AR5K_SLEEP_CTL_SLE_ALLOW 0x00020000 836#define AR5K_SLEEP_CTL_SLE_ALLOW 0x00020000
767#define AR5K_SLEEP_CTL_SLE_UNITS 0x00000008 /* [5211+] */ 837#define AR5K_SLEEP_CTL_SLE_UNITS 0x00000008 /* [5211+] */
838/* more bits */
768 839
769/* 840/*
770 * Interrupt pending register 841 * Interrupt pending register
@@ -776,13 +847,14 @@
776 * Sleep force register 847 * Sleep force register
777 */ 848 */
778#define AR5K_SFR 0x400c 849#define AR5K_SFR 0x400c
779#define AR5K_SFR_M 0x00000001 850#define AR5K_SFR_EN 0x00000001
780 851
781/* 852/*
782 * PCI configuration register 853 * PCI configuration register
783 */ 854 */
784#define AR5K_PCICFG 0x4010 /* Register Address */ 855#define AR5K_PCICFG 0x4010 /* Register Address */
785#define AR5K_PCICFG_EEAE 0x00000001 /* Eeprom access enable [5210] */ 856#define AR5K_PCICFG_EEAE 0x00000001 /* Eeprom access enable [5210] */
857#define AR5K_PCICFG_SLEEP_CLOCK_EN 0x00000002 /* Enable sleep clock (?) */
786#define AR5K_PCICFG_CLKRUNEN 0x00000004 /* CLKRUN enable [5211+] */ 858#define AR5K_PCICFG_CLKRUNEN 0x00000004 /* CLKRUN enable [5211+] */
787#define AR5K_PCICFG_EESIZE 0x00000018 /* Mask for EEPROM size [5211+] */ 859#define AR5K_PCICFG_EESIZE 0x00000018 /* Mask for EEPROM size [5211+] */
788#define AR5K_PCICFG_EESIZE_S 3 860#define AR5K_PCICFG_EESIZE_S 3
@@ -798,19 +870,21 @@
798#define AR5K_PCICFG_CBEFIX_DIS 0x00000400 /* Disable CBE fix (?) */ 870#define AR5K_PCICFG_CBEFIX_DIS 0x00000400 /* Disable CBE fix (?) */
799#define AR5K_PCICFG_SL_INTEN 0x00000800 /* Enable interrupts when asleep (?) */ 871#define AR5K_PCICFG_SL_INTEN 0x00000800 /* Enable interrupts when asleep (?) */
800#define AR5K_PCICFG_LED_BCTL 0x00001000 /* Led blink (?) [5210] */ 872#define AR5K_PCICFG_LED_BCTL 0x00001000 /* Led blink (?) [5210] */
801#define AR5K_PCICFG_SL_INPEN 0x00002800 /* Sleep even whith pending interrupts (?) */ 873#define AR5K_PCICFG_UNK 0x00001000 /* Passed on some parts durring attach (?) */
874#define AR5K_PCICFG_SL_INPEN 0x00002000 /* Sleep even whith pending interrupts (?) */
802#define AR5K_PCICFG_SPWR_DN 0x00010000 /* Mask for power status */ 875#define AR5K_PCICFG_SPWR_DN 0x00010000 /* Mask for power status */
803#define AR5K_PCICFG_LEDMODE 0x000e0000 /* Ledmode [5211+] */ 876#define AR5K_PCICFG_LEDMODE 0x000e0000 /* Ledmode [5211+] */
804#define AR5K_PCICFG_LEDMODE_PROP 0x00000000 /* Blink on standard traffic [5211+] */ 877#define AR5K_PCICFG_LEDMODE_PROP 0x00000000 /* Blink on standard traffic [5211+] */
805#define AR5K_PCICFG_LEDMODE_PROM 0x00020000 /* Default mode (blink on any traffic) [5211+] */ 878#define AR5K_PCICFG_LEDMODE_PROM 0x00020000 /* Default mode (blink on any traffic) [5211+] */
806#define AR5K_PCICFG_LEDMODE_PWR 0x00040000 /* Some other blinking mode (?) [5211+] */ 879#define AR5K_PCICFG_LEDMODE_PWR 0x00040000 /* Some other blinking mode (?) [5211+] */
807#define AR5K_PCICFG_LEDMODE_RAND 0x00060000 /* Random blinking (?) [5211+] */ 880#define AR5K_PCICFG_LEDMODE_RAND 0x00060000 /* Random blinking (?) [5211+] */
808#define AR5K_PCICFG_LEDBLINK 0x00700000 881#define AR5K_PCICFG_LEDBLINK 0x00700000 /* Led blink rate */
809#define AR5K_PCICFG_LEDBLINK_S 20 882#define AR5K_PCICFG_LEDBLINK_S 20
810#define AR5K_PCICFG_LEDSLOW 0x00800000 /* Slow led blink rate (?) [5211+] */ 883#define AR5K_PCICFG_LEDSLOW 0x00800000 /* Slowest led blink rate [5211+] */
811#define AR5K_PCICFG_LEDSTATE \ 884#define AR5K_PCICFG_LEDSTATE \
812 (AR5K_PCICFG_LED | AR5K_PCICFG_LEDMODE | \ 885 (AR5K_PCICFG_LED | AR5K_PCICFG_LEDMODE | \
813 AR5K_PCICFG_LEDBLINK | AR5K_PCICFG_LEDSLOW) 886 AR5K_PCICFG_LEDBLINK | AR5K_PCICFG_LEDSLOW)
887#define AR5K_PCICFG_SLEEP_CLOCK_RATE 0x03000000 /* Sleep clock rate (field) */
814 888
815/* 889/*
816 * "General Purpose Input/Output" (GPIO) control register 890 * "General Purpose Input/Output" (GPIO) control register
@@ -947,7 +1021,7 @@
947#define AR5K_EEPROM_VERSION_4_4 0x4004 1021#define AR5K_EEPROM_VERSION_4_4 0x4004
948#define AR5K_EEPROM_VERSION_4_5 0x4005 1022#define AR5K_EEPROM_VERSION_4_5 0x4005
949#define AR5K_EEPROM_VERSION_4_6 0x4006 /* has ee_scaled_cck_delta */ 1023#define AR5K_EEPROM_VERSION_4_6 0x4006 /* has ee_scaled_cck_delta */
950#define AR5K_EEPROM_VERSION_4_7 0x3007 1024#define AR5K_EEPROM_VERSION_4_7 0x4007
951 1025
952#define AR5K_EEPROM_MODE_11A 0 1026#define AR5K_EEPROM_MODE_11A 0
953#define AR5K_EEPROM_MODE_11B 1 1027#define AR5K_EEPROM_MODE_11B 1
@@ -1023,10 +1097,14 @@
1023#define AR5K_EEPROM_STAT_WRDONE 0x00000008 /* EEPROM write successful */ 1097#define AR5K_EEPROM_STAT_WRDONE 0x00000008 /* EEPROM write successful */
1024 1098
1025/* 1099/*
1026 * EEPROM config register (?) 1100 * EEPROM config register
1027 */ 1101 */
1028#define AR5K_EEPROM_CFG 0x6010 1102#define AR5K_EEPROM_CFG 0x6010 /* Register Addres */
1029 1103#define AR5K_EEPROM_CFG_SIZE_OVR 0x00000001
1104#define AR5K_EEPROM_CFG_WR_WAIT_DIS 0x00000004 /* Disable write wait */
1105#define AR5K_EEPROM_CFG_CLK_RATE 0x00000018 /* Clock rate */
1106#define AR5K_EEPROM_CFG_PROT_KEY 0x00ffff00 /* Protectio key */
1107#define AR5K_EEPROM_CFG_LIND_EN 0x01000000 /* Enable length indicator (?) */
1030 1108
1031 1109
1032/* 1110/*
@@ -1050,7 +1128,7 @@
1050#define AR5K_STA_ID1 0x8004 /* Register Address */ 1128#define AR5K_STA_ID1 0x8004 /* Register Address */
1051#define AR5K_STA_ID1_AP 0x00010000 /* Set AP mode */ 1129#define AR5K_STA_ID1_AP 0x00010000 /* Set AP mode */
1052#define AR5K_STA_ID1_ADHOC 0x00020000 /* Set Ad-Hoc mode */ 1130#define AR5K_STA_ID1_ADHOC 0x00020000 /* Set Ad-Hoc mode */
1053#define AR5K_STA_ID1_PWR_SV 0x00040000 /* Power save reporting (?) */ 1131#define AR5K_STA_ID1_PWR_SV 0x00040000 /* Power save reporting */
1054#define AR5K_STA_ID1_NO_KEYSRCH 0x00080000 /* No key search */ 1132#define AR5K_STA_ID1_NO_KEYSRCH 0x00080000 /* No key search */
1055#define AR5K_STA_ID1_NO_PSPOLL 0x00100000 /* No power save polling [5210] */ 1133#define AR5K_STA_ID1_NO_PSPOLL 0x00100000 /* No power save polling [5210] */
1056#define AR5K_STA_ID1_PCF_5211 0x00100000 /* Enable PCF on [5211+] */ 1134#define AR5K_STA_ID1_PCF_5211 0x00100000 /* Enable PCF on [5211+] */
@@ -1059,9 +1137,13 @@
1059 AR5K_STA_ID1_PCF_5210 : AR5K_STA_ID1_PCF_5211) 1137 AR5K_STA_ID1_PCF_5210 : AR5K_STA_ID1_PCF_5211)
1060#define AR5K_STA_ID1_DEFAULT_ANTENNA 0x00200000 /* Use default antenna */ 1138#define AR5K_STA_ID1_DEFAULT_ANTENNA 0x00200000 /* Use default antenna */
1061#define AR5K_STA_ID1_DESC_ANTENNA 0x00400000 /* Update antenna from descriptor */ 1139#define AR5K_STA_ID1_DESC_ANTENNA 0x00400000 /* Update antenna from descriptor */
1062#define AR5K_STA_ID1_RTS_DEF_ANTENNA 0x00800000 /* Use default antenna for RTS (?) */ 1140#define AR5K_STA_ID1_RTS_DEF_ANTENNA 0x00800000 /* Use default antenna for RTS */
1063#define AR5K_STA_ID1_ACKCTS_6MB 0x01000000 /* Use 6Mbit/s for ACK/CTS (?) */ 1141#define AR5K_STA_ID1_ACKCTS_6MB 0x01000000 /* Use 6Mbit/s for ACK/CTS */
1064#define AR5K_STA_ID1_BASE_RATE_11B 0x02000000 /* Use 11b base rate (for ACK/CTS ?) [5211+] */ 1142#define AR5K_STA_ID1_BASE_RATE_11B 0x02000000 /* Use 11b base rate (for ACK/CTS ?) [5211+] */
1143#define AR5K_STA_ID1_SELF_GEN_SECTORE 0x04000000 /* Self generate sectore (?) */
1144#define AR5K_STA_ID1_CRYPT_MIC_EN 0x08000000 /* Enable MIC */
1145#define AR5K_STA_ID1_KEYSRCH_MODE 0x10000000 /* Keysearch mode (?) */
1146#define AR5K_STA_ID1_PRESERVE_SEQ_NUM 0x20000000 /* Preserve sequence number */
1065 1147
1066/* 1148/*
1067 * First BSSID register (MAC address, lower 32bits) 1149 * First BSSID register (MAC address, lower 32bits)
@@ -1117,7 +1199,7 @@
1117 * 1199 *
1118 * Retry limit register for 5210 (no QCU/DCU so it's done in PCU) 1200 * Retry limit register for 5210 (no QCU/DCU so it's done in PCU)
1119 */ 1201 */
1120#define AR5K_NODCU_RETRY_LMT 0x801c /*Register Address */ 1202#define AR5K_NODCU_RETRY_LMT 0x801c /* Register Address */
1121#define AR5K_NODCU_RETRY_LMT_SH_RETRY 0x0000000f /* Short retry limit mask */ 1203#define AR5K_NODCU_RETRY_LMT_SH_RETRY 0x0000000f /* Short retry limit mask */
1122#define AR5K_NODCU_RETRY_LMT_SH_RETRY_S 0 1204#define AR5K_NODCU_RETRY_LMT_SH_RETRY_S 0
1123#define AR5K_NODCU_RETRY_LMT_LG_RETRY 0x000000f0 /* Long retry mask */ 1205#define AR5K_NODCU_RETRY_LMT_LG_RETRY 0x000000f0 /* Long retry mask */
@@ -1136,9 +1218,9 @@
1136#define AR5K_USEC_5211 0x801c /* Register Address [5211+] */ 1218#define AR5K_USEC_5211 0x801c /* Register Address [5211+] */
1137#define AR5K_USEC (ah->ah_version == AR5K_AR5210 ? \ 1219#define AR5K_USEC (ah->ah_version == AR5K_AR5210 ? \
1138 AR5K_USEC_5210 : AR5K_USEC_5211) 1220 AR5K_USEC_5210 : AR5K_USEC_5211)
1139#define AR5K_USEC_1 0x0000007f 1221#define AR5K_USEC_1 0x0000007f /* clock cycles for 1us */
1140#define AR5K_USEC_1_S 0 1222#define AR5K_USEC_1_S 0
1141#define AR5K_USEC_32 0x00003f80 1223#define AR5K_USEC_32 0x00003f80 /* clock cycles for 1us while on 32Mhz clock */
1142#define AR5K_USEC_32_S 7 1224#define AR5K_USEC_32_S 7
1143#define AR5K_USEC_TX_LATENCY_5211 0x007fc000 1225#define AR5K_USEC_TX_LATENCY_5211 0x007fc000
1144#define AR5K_USEC_TX_LATENCY_5211_S 14 1226#define AR5K_USEC_TX_LATENCY_5211_S 14
@@ -1152,16 +1234,16 @@
1152/* 1234/*
1153 * PCU beacon control register 1235 * PCU beacon control register
1154 */ 1236 */
1155#define AR5K_BEACON_5210 0x8024 1237#define AR5K_BEACON_5210 0x8024 /*Register Address [5210] */
1156#define AR5K_BEACON_5211 0x8020 1238#define AR5K_BEACON_5211 0x8020 /*Register Address [5211+] */
1157#define AR5K_BEACON (ah->ah_version == AR5K_AR5210 ? \ 1239#define AR5K_BEACON (ah->ah_version == AR5K_AR5210 ? \
1158 AR5K_BEACON_5210 : AR5K_BEACON_5211) 1240 AR5K_BEACON_5210 : AR5K_BEACON_5211)
1159#define AR5K_BEACON_PERIOD 0x0000ffff 1241#define AR5K_BEACON_PERIOD 0x0000ffff /* Mask for beacon period */
1160#define AR5K_BEACON_PERIOD_S 0 1242#define AR5K_BEACON_PERIOD_S 0
1161#define AR5K_BEACON_TIM 0x007f0000 1243#define AR5K_BEACON_TIM 0x007f0000 /* Mask for TIM offset */
1162#define AR5K_BEACON_TIM_S 16 1244#define AR5K_BEACON_TIM_S 16
1163#define AR5K_BEACON_ENABLE 0x00800000 1245#define AR5K_BEACON_ENABLE 0x00800000 /* Enable beacons */
1164#define AR5K_BEACON_RESET_TSF 0x01000000 1246#define AR5K_BEACON_RESET_TSF 0x01000000 /* Force TSF reset */
1165 1247
1166/* 1248/*
1167 * CFP period register 1249 * CFP period register
@@ -1234,7 +1316,6 @@
1234 1316
1235/* 1317/*
1236 * Receive filter register 1318 * Receive filter register
1237 * TODO: Get these out of ar5xxx.h on ath5k
1238 */ 1319 */
1239#define AR5K_RX_FILTER_5210 0x804c /* Register Address [5210] */ 1320#define AR5K_RX_FILTER_5210 0x804c /* Register Address [5210] */
1240#define AR5K_RX_FILTER_5211 0x803c /* Register Address [5211+] */ 1321#define AR5K_RX_FILTER_5211 0x803c /* Register Address [5211+] */
@@ -1307,11 +1388,11 @@
1307#define AR5K_DIAG_SW_5211 0x8048 /* Register Address [5211+] */ 1388#define AR5K_DIAG_SW_5211 0x8048 /* Register Address [5211+] */
1308#define AR5K_DIAG_SW (ah->ah_version == AR5K_AR5210 ? \ 1389#define AR5K_DIAG_SW (ah->ah_version == AR5K_AR5210 ? \
1309 AR5K_DIAG_SW_5210 : AR5K_DIAG_SW_5211) 1390 AR5K_DIAG_SW_5210 : AR5K_DIAG_SW_5211)
1310#define AR5K_DIAG_SW_DIS_WEP_ACK 0x00000001 1391#define AR5K_DIAG_SW_DIS_WEP_ACK 0x00000001 /* Disable ACKs if WEP key is invalid */
1311#define AR5K_DIAG_SW_DIS_ACK 0x00000002 /* Disable ACKs (?) */ 1392#define AR5K_DIAG_SW_DIS_ACK 0x00000002 /* Disable ACKs */
1312#define AR5K_DIAG_SW_DIS_CTS 0x00000004 /* Disable CTSs (?) */ 1393#define AR5K_DIAG_SW_DIS_CTS 0x00000004 /* Disable CTSs */
1313#define AR5K_DIAG_SW_DIS_ENC 0x00000008 /* Disable encryption (?) */ 1394#define AR5K_DIAG_SW_DIS_ENC 0x00000008 /* Disable encryption */
1314#define AR5K_DIAG_SW_DIS_DEC 0x00000010 /* Disable decryption (?) */ 1395#define AR5K_DIAG_SW_DIS_DEC 0x00000010 /* Disable decryption */
1315#define AR5K_DIAG_SW_DIS_TX 0x00000020 /* Disable transmit [5210] */ 1396#define AR5K_DIAG_SW_DIS_TX 0x00000020 /* Disable transmit [5210] */
1316#define AR5K_DIAG_SW_DIS_RX_5210 0x00000040 /* Disable recieve */ 1397#define AR5K_DIAG_SW_DIS_RX_5210 0x00000040 /* Disable recieve */
1317#define AR5K_DIAG_SW_DIS_RX_5211 0x00000020 1398#define AR5K_DIAG_SW_DIS_RX_5211 0x00000020
@@ -1329,13 +1410,13 @@
1329#define AR5K_DIAG_SW_CHAN_INFO_5211 0x00000100 1410#define AR5K_DIAG_SW_CHAN_INFO_5211 0x00000100
1330#define AR5K_DIAG_SW_CHAN_INFO (ah->ah_version == AR5K_AR5210 ? \ 1411#define AR5K_DIAG_SW_CHAN_INFO (ah->ah_version == AR5K_AR5210 ? \
1331 AR5K_DIAG_SW_CHAN_INFO_5210 : AR5K_DIAG_SW_CHAN_INFO_5211) 1412 AR5K_DIAG_SW_CHAN_INFO_5210 : AR5K_DIAG_SW_CHAN_INFO_5211)
1332#define AR5K_DIAG_SW_EN_SCRAM_SEED_5211 0x00000200 /* Scrambler seed (?) */ 1413#define AR5K_DIAG_SW_EN_SCRAM_SEED_5211 0x00000200 /* Enable scrambler seed */
1333#define AR5K_DIAG_SW_EN_SCRAM_SEED_5210 0x00000400 1414#define AR5K_DIAG_SW_EN_SCRAM_SEED_5210 0x00000400
1334#define AR5K_DIAG_SW_EN_SCRAM_SEED (ah->ah_version == AR5K_AR5210 ? \ 1415#define AR5K_DIAG_SW_EN_SCRAM_SEED (ah->ah_version == AR5K_AR5210 ? \
1335 AR5K_DIAG_SW_EN_SCRAM_SEED_5210 : AR5K_DIAG_SW_EN_SCRAM_SEED_5211) 1416 AR5K_DIAG_SW_EN_SCRAM_SEED_5210 : AR5K_DIAG_SW_EN_SCRAM_SEED_5211)
1336#define AR5K_DIAG_SW_ECO_ENABLE 0x00000400 /* [5211+] */ 1417#define AR5K_DIAG_SW_ECO_ENABLE 0x00000400 /* [5211+] */
1337#define AR5K_DIAG_SW_SCVRAM_SEED 0x0003f800 /* [5210] */ 1418#define AR5K_DIAG_SW_SCVRAM_SEED 0x0003f800 /* [5210] */
1338#define AR5K_DIAG_SW_SCRAM_SEED_M 0x0001fc00 /* Scrambler seed mask (?) */ 1419#define AR5K_DIAG_SW_SCRAM_SEED_M 0x0001fc00 /* Scrambler seed mask */
1339#define AR5K_DIAG_SW_SCRAM_SEED_S 10 1420#define AR5K_DIAG_SW_SCRAM_SEED_S 10
1340#define AR5K_DIAG_SW_DIS_SEQ_INC 0x00040000 /* Disable seqnum increment (?)[5210] */ 1421#define AR5K_DIAG_SW_DIS_SEQ_INC 0x00040000 /* Disable seqnum increment (?)[5210] */
1341#define AR5K_DIAG_SW_FRAME_NV0_5210 0x00080000 1422#define AR5K_DIAG_SW_FRAME_NV0_5210 0x00080000
@@ -1344,6 +1425,7 @@
1344 AR5K_DIAG_SW_FRAME_NV0_5210 : AR5K_DIAG_SW_FRAME_NV0_5211) 1425 AR5K_DIAG_SW_FRAME_NV0_5210 : AR5K_DIAG_SW_FRAME_NV0_5211)
1345#define AR5K_DIAG_SW_OBSPT_M 0x000c0000 1426#define AR5K_DIAG_SW_OBSPT_M 0x000c0000
1346#define AR5K_DIAG_SW_OBSPT_S 18 1427#define AR5K_DIAG_SW_OBSPT_S 18
1428/* more bits */
1347 1429
1348/* 1430/*
1349 * TSF (clock) register (lower 32 bits) 1431 * TSF (clock) register (lower 32 bits)
@@ -1369,15 +1451,34 @@
1369/* 1451/*
1370 * ADDAC test register [5211+] 1452 * ADDAC test register [5211+]
1371 */ 1453 */
1372#define AR5K_ADDAC_TEST 0x8054 1454#define AR5K_ADDAC_TEST 0x8054 /* Register Address */
1373#define AR5K_ADDAC_TEST_TXCONT 0x00000001 1455#define AR5K_ADDAC_TEST_TXCONT 0x00000001 /* Test continuous tx */
1456#define AR5K_ADDAC_TEST_TST_MODE 0x00000002 /* Test mode */
1457#define AR5K_ADDAC_TEST_LOOP_EN 0x00000004 /* Enable loop */
1458#define AR5K_ADDAC_TEST_LOOP_LEN 0x00000008 /* Loop length (field) */
1459#define AR5K_ADDAC_TEST_USE_U8 0x00004000 /* Use upper 8 bits */
1460#define AR5K_ADDAC_TEST_MSB 0x00008000 /* State of MSB */
1461#define AR5K_ADDAC_TEST_TRIG_SEL 0x00010000 /* Trigger select */
1462#define AR5K_ADDAC_TEST_TRIG_PTY 0x00020000 /* Trigger polarity */
1463#define AR5K_ADDAC_TEST_RXCONT 0x00040000 /* Continuous capture */
1464#define AR5K_ADDAC_TEST_CAPTURE 0x00080000 /* Begin capture */
1465#define AR5K_ADDAC_TEST_TST_ARM 0x00100000 /* Test ARM (Adaptive Radio Mode ?) */
1374 1466
1375/* 1467/*
1376 * Default antenna register [5211+] 1468 * Default antenna register [5211+]
1377 */ 1469 */
1378#define AR5K_DEFAULT_ANTENNA 0x8058 1470#define AR5K_DEFAULT_ANTENNA 0x8058
1379 1471
1472/*
1473 * Frame control QoS mask register (?) [5211+]
1474 * (FC_QOS_MASK)
1475 */
1476#define AR5K_FRAME_CTL_QOSM 0x805c
1380 1477
1478/*
1479 * Seq mask register (?) [5211+]
1480 */
1481#define AR5K_SEQ_MASK 0x8060
1381 1482
1382/* 1483/*
1383 * Retry count register [5210] 1484 * Retry count register [5210]
@@ -1449,124 +1550,242 @@
1449/* 1550/*
1450 * XR (eXtended Range) mode register 1551 * XR (eXtended Range) mode register
1451 */ 1552 */
1452#define AR5K_XRMODE 0x80c0 1553#define AR5K_XRMODE 0x80c0 /* Register Address */
1453#define AR5K_XRMODE_POLL_TYPE_M 0x0000003f 1554#define AR5K_XRMODE_POLL_TYPE_M 0x0000003f /* Mask for Poll type (?) */
1454#define AR5K_XRMODE_POLL_TYPE_S 0 1555#define AR5K_XRMODE_POLL_TYPE_S 0
1455#define AR5K_XRMODE_POLL_SUBTYPE_M 0x0000003c 1556#define AR5K_XRMODE_POLL_SUBTYPE_M 0x0000003c /* Mask for Poll subtype (?) */
1456#define AR5K_XRMODE_POLL_SUBTYPE_S 2 1557#define AR5K_XRMODE_POLL_SUBTYPE_S 2
1457#define AR5K_XRMODE_POLL_WAIT_ALL 0x00000080 1558#define AR5K_XRMODE_POLL_WAIT_ALL 0x00000080 /* Wait for poll */
1458#define AR5K_XRMODE_SIFS_DELAY 0x000fff00 1559#define AR5K_XRMODE_SIFS_DELAY 0x000fff00 /* Mask for SIFS delay */
1459#define AR5K_XRMODE_FRAME_HOLD_M 0xfff00000 1560#define AR5K_XRMODE_FRAME_HOLD_M 0xfff00000 /* Mask for frame hold (?) */
1460#define AR5K_XRMODE_FRAME_HOLD_S 20 1561#define AR5K_XRMODE_FRAME_HOLD_S 20
1461 1562
1462/* 1563/*
1463 * XR delay register 1564 * XR delay register
1464 */ 1565 */
1465#define AR5K_XRDELAY 0x80c4 1566#define AR5K_XRDELAY 0x80c4 /* Register Address */
1466#define AR5K_XRDELAY_SLOT_DELAY_M 0x0000ffff 1567#define AR5K_XRDELAY_SLOT_DELAY_M 0x0000ffff /* Mask for slot delay */
1467#define AR5K_XRDELAY_SLOT_DELAY_S 0 1568#define AR5K_XRDELAY_SLOT_DELAY_S 0
1468#define AR5K_XRDELAY_CHIRP_DELAY_M 0xffff0000 1569#define AR5K_XRDELAY_CHIRP_DELAY_M 0xffff0000 /* Mask for CHIRP data delay */
1469#define AR5K_XRDELAY_CHIRP_DELAY_S 16 1570#define AR5K_XRDELAY_CHIRP_DELAY_S 16
1470 1571
1471/* 1572/*
1472 * XR timeout register 1573 * XR timeout register
1473 */ 1574 */
1474#define AR5K_XRTIMEOUT 0x80c8 1575#define AR5K_XRTIMEOUT 0x80c8 /* Register Address */
1475#define AR5K_XRTIMEOUT_CHIRP_M 0x0000ffff 1576#define AR5K_XRTIMEOUT_CHIRP_M 0x0000ffff /* Mask for CHIRP timeout */
1476#define AR5K_XRTIMEOUT_CHIRP_S 0 1577#define AR5K_XRTIMEOUT_CHIRP_S 0
1477#define AR5K_XRTIMEOUT_POLL_M 0xffff0000 1578#define AR5K_XRTIMEOUT_POLL_M 0xffff0000 /* Mask for Poll timeout */
1478#define AR5K_XRTIMEOUT_POLL_S 16 1579#define AR5K_XRTIMEOUT_POLL_S 16
1479 1580
1480/* 1581/*
1481 * XR chirp register 1582 * XR chirp register
1482 */ 1583 */
1483#define AR5K_XRCHIRP 0x80cc 1584#define AR5K_XRCHIRP 0x80cc /* Register Address */
1484#define AR5K_XRCHIRP_SEND 0x00000001 1585#define AR5K_XRCHIRP_SEND 0x00000001 /* Send CHIRP */
1485#define AR5K_XRCHIRP_GAP 0xffff0000 1586#define AR5K_XRCHIRP_GAP 0xffff0000 /* Mask for CHIRP gap (?) */
1486 1587
1487/* 1588/*
1488 * XR stomp register 1589 * XR stomp register
1489 */ 1590 */
1490#define AR5K_XRSTOMP 0x80d0 1591#define AR5K_XRSTOMP 0x80d0 /* Register Address */
1491#define AR5K_XRSTOMP_TX 0x00000001 1592#define AR5K_XRSTOMP_TX 0x00000001 /* Stomp Tx (?) */
1492#define AR5K_XRSTOMP_RX_ABORT 0x00000002 1593#define AR5K_XRSTOMP_RX 0x00000002 /* Stomp Rx (?) */
1493#define AR5K_XRSTOMP_RSSI_THRES 0x0000ff00 1594#define AR5K_XRSTOMP_TX_RSSI 0x00000004 /* Stomp Tx RSSI (?) */
1595#define AR5K_XRSTOMP_TX_BSSID 0x00000008 /* Stomp Tx BSSID (?) */
1596#define AR5K_XRSTOMP_DATA 0x00000010 /* Stomp data (?)*/
1597#define AR5K_XRSTOMP_RSSI_THRES 0x0000ff00 /* Mask for XR RSSI threshold */
1494 1598
1495/* 1599/*
1496 * First enhanced sleep register 1600 * First enhanced sleep register
1497 */ 1601 */
1498#define AR5K_SLEEP0 0x80d4 1602#define AR5K_SLEEP0 0x80d4 /* Register Address */
1499#define AR5K_SLEEP0_NEXT_DTIM 0x0007ffff 1603#define AR5K_SLEEP0_NEXT_DTIM 0x0007ffff /* Mask for next DTIM (?) */
1500#define AR5K_SLEEP0_NEXT_DTIM_S 0 1604#define AR5K_SLEEP0_NEXT_DTIM_S 0
1501#define AR5K_SLEEP0_ASSUME_DTIM 0x00080000 1605#define AR5K_SLEEP0_ASSUME_DTIM 0x00080000 /* Assume DTIM */
1502#define AR5K_SLEEP0_ENH_SLEEP_EN 0x00100000 1606#define AR5K_SLEEP0_ENH_SLEEP_EN 0x00100000 /* Enable enchanced sleep control */
1503#define AR5K_SLEEP0_CABTO 0xff000000 1607#define AR5K_SLEEP0_CABTO 0xff000000 /* Mask for CAB Time Out */
1504#define AR5K_SLEEP0_CABTO_S 24 1608#define AR5K_SLEEP0_CABTO_S 24
1505 1609
1506/* 1610/*
1507 * Second enhanced sleep register 1611 * Second enhanced sleep register
1508 */ 1612 */
1509#define AR5K_SLEEP1 0x80d8 1613#define AR5K_SLEEP1 0x80d8 /* Register Address */
1510#define AR5K_SLEEP1_NEXT_TIM 0x0007ffff 1614#define AR5K_SLEEP1_NEXT_TIM 0x0007ffff /* Mask for next TIM (?) */
1511#define AR5K_SLEEP1_NEXT_TIM_S 0 1615#define AR5K_SLEEP1_NEXT_TIM_S 0
1512#define AR5K_SLEEP1_BEACON_TO 0xff000000 1616#define AR5K_SLEEP1_BEACON_TO 0xff000000 /* Mask for Beacon Time Out */
1513#define AR5K_SLEEP1_BEACON_TO_S 24 1617#define AR5K_SLEEP1_BEACON_TO_S 24
1514 1618
1515/* 1619/*
1516 * Third enhanced sleep register 1620 * Third enhanced sleep register
1517 */ 1621 */
1518#define AR5K_SLEEP2 0x80dc 1622#define AR5K_SLEEP2 0x80dc /* Register Address */
1519#define AR5K_SLEEP2_TIM_PER 0x0000ffff 1623#define AR5K_SLEEP2_TIM_PER 0x0000ffff /* Mask for TIM period (?) */
1520#define AR5K_SLEEP2_TIM_PER_S 0 1624#define AR5K_SLEEP2_TIM_PER_S 0
1521#define AR5K_SLEEP2_DTIM_PER 0xffff0000 1625#define AR5K_SLEEP2_DTIM_PER 0xffff0000 /* Mask for DTIM period (?) */
1522#define AR5K_SLEEP2_DTIM_PER_S 16 1626#define AR5K_SLEEP2_DTIM_PER_S 16
1523 1627
1524/* 1628/*
1525 * BSSID mask registers 1629 * BSSID mask registers
1526 */ 1630 */
1527#define AR5K_BSS_IDM0 0x80e0 1631#define AR5K_BSS_IDM0 0x80e0 /* Upper bits */
1528#define AR5K_BSS_IDM1 0x80e4 1632#define AR5K_BSS_IDM1 0x80e4 /* Lower bits */
1529 1633
1530/* 1634/*
1531 * TX power control (TPC) register 1635 * TX power control (TPC) register
1636 *
1637 * XXX: PCDAC steps (0.5dbm) or DBM ?
1638 *
1639 * XXX: Mask changes for newer chips to 7f
1640 * like tx power table ?
1532 */ 1641 */
1533#define AR5K_TXPC 0x80e8 1642#define AR5K_TXPC 0x80e8 /* Register Address */
1534#define AR5K_TXPC_ACK_M 0x0000003f 1643#define AR5K_TXPC_ACK_M 0x0000003f /* Mask for ACK tx power */
1535#define AR5K_TXPC_ACK_S 0 1644#define AR5K_TXPC_ACK_S 0
1536#define AR5K_TXPC_CTS_M 0x00003f00 1645#define AR5K_TXPC_CTS_M 0x00003f00 /* Mask for CTS tx power */
1537#define AR5K_TXPC_CTS_S 8 1646#define AR5K_TXPC_CTS_S 8
1538#define AR5K_TXPC_CHIRP_M 0x003f0000 1647#define AR5K_TXPC_CHIRP_M 0x003f0000 /* Mask for CHIRP tx power */
1539#define AR5K_TXPC_CHIRP_S 22 1648#define AR5K_TXPC_CHIRP_S 22
1540 1649
1541/* 1650/*
1542 * Profile count registers 1651 * Profile count registers
1543 */ 1652 */
1544#define AR5K_PROFCNT_TX 0x80ec 1653#define AR5K_PROFCNT_TX 0x80ec /* Tx count */
1545#define AR5K_PROFCNT_RX 0x80f0 1654#define AR5K_PROFCNT_RX 0x80f0 /* Rx count */
1546#define AR5K_PROFCNT_RXCLR 0x80f4 1655#define AR5K_PROFCNT_RXCLR 0x80f4 /* Clear Rx count */
1547#define AR5K_PROFCNT_CYCLE 0x80f8 1656#define AR5K_PROFCNT_CYCLE 0x80f8 /* Cycle count (?) */
1657
1658/*
1659 * Quiet (period) control registers (?)
1660 */
1661#define AR5K_QUIET_CTL1 0x80fc /* Register Address */
1662#define AR5K_QUIET_CTL1_NEXT_QT 0x0000ffff /* Mask for next quiet (period?) (?) */
1663#define AR5K_QUIET_CTL1_QT_EN 0x00010000 /* Enable quiet (period?) */
1664#define AR5K_QUIET_CTL2 0x8100 /* Register Address */
1665#define AR5K_QUIET_CTL2_QT_PER 0x0000ffff /* Mask for quiet period (?) */
1666#define AR5K_QUIET_CTL2_QT_DUR 0xffff0000 /* Mask for quiet duration (?) */
1548 1667
1549/* 1668/*
1550 * TSF parameter register 1669 * TSF parameter register
1551 */ 1670 */
1552#define AR5K_TSF_PARM 0x8104 1671#define AR5K_TSF_PARM 0x8104 /* Register Address */
1553#define AR5K_TSF_PARM_INC_M 0x000000ff 1672#define AR5K_TSF_PARM_INC_M 0x000000ff /* Mask for TSF increment */
1554#define AR5K_TSF_PARM_INC_S 0 1673#define AR5K_TSF_PARM_INC_S 0
1555 1674
1556/* 1675/*
1676 * QoS register (?)
1677 */
1678#define AR5K_QOS 0x8108 /* Register Address */
1679#define AR5K_QOS_NOACK_2BIT_VALUES 0x00000000 /* (field) */
1680#define AR5K_QOS_NOACK_BIT_OFFSET 0x00000020 /* (field) */
1681#define AR5K_QOS_NOACK_BYTE_OFFSET 0x00000080 /* (field) */
1682
1683/*
1557 * PHY error filter register 1684 * PHY error filter register
1558 */ 1685 */
1559#define AR5K_PHY_ERR_FIL 0x810c 1686#define AR5K_PHY_ERR_FIL 0x810c
1560#define AR5K_PHY_ERR_FIL_RADAR 0x00000020 1687#define AR5K_PHY_ERR_FIL_RADAR 0x00000020 /* Radar signal */
1561#define AR5K_PHY_ERR_FIL_OFDM 0x00020000 1688#define AR5K_PHY_ERR_FIL_OFDM 0x00020000 /* OFDM false detect (ANI) */
1562#define AR5K_PHY_ERR_FIL_CCK 0x02000000 1689#define AR5K_PHY_ERR_FIL_CCK 0x02000000 /* CCK false detect (ANI) */
1690
1691/*
1692 * XR latency register
1693 */
1694#define AR5K_XRLAT_TX 0x8110
1563 1695
1564/* 1696/*
1565 * Rate duration register 1697 * ACK SIFS register
1698 */
1699#define AR5K_ACKSIFS 0x8114 /* Register Address */
1700#define AR5K_ACKSIFS_INC 0x00000000 /* ACK SIFS Increment (field) */
1701
1702/*
1703 * MIC QoS control register (?)
1704 */
1705#define AR5K_MIC_QOS_CTL 0x8118 /* Register Address */
1706#define AR5K_MIC_QOS_CTL_0 0x00000001 /* MIC QoS control 0 (?) */
1707#define AR5K_MIC_QOS_CTL_1 0x00000004 /* MIC QoS control 1 (?) */
1708#define AR5K_MIC_QOS_CTL_2 0x00000010 /* MIC QoS control 2 (?) */
1709#define AR5K_MIC_QOS_CTL_3 0x00000040 /* MIC QoS control 3 (?) */
1710#define AR5K_MIC_QOS_CTL_4 0x00000100 /* MIC QoS control 4 (?) */
1711#define AR5K_MIC_QOS_CTL_5 0x00000400 /* MIC QoS control 5 (?) */
1712#define AR5K_MIC_QOS_CTL_6 0x00001000 /* MIC QoS control 6 (?) */
1713#define AR5K_MIC_QOS_CTL_7 0x00004000 /* MIC QoS control 7 (?) */
1714#define AR5K_MIC_QOS_CTL_MQ_EN 0x00010000 /* Enable MIC QoS */
1715
1716/*
1717 * MIC QoS select register (?)
1718 */
1719#define AR5K_MIC_QOS_SEL 0x811c
1720#define AR5K_MIC_QOS_SEL_0 0x00000001
1721#define AR5K_MIC_QOS_SEL_1 0x00000010
1722#define AR5K_MIC_QOS_SEL_2 0x00000100
1723#define AR5K_MIC_QOS_SEL_3 0x00001000
1724#define AR5K_MIC_QOS_SEL_4 0x00010000
1725#define AR5K_MIC_QOS_SEL_5 0x00100000
1726#define AR5K_MIC_QOS_SEL_6 0x01000000
1727#define AR5K_MIC_QOS_SEL_7 0x10000000
1728
1729/*
1730 * Misc mode control register (?)
1731 */
1732#define AR5K_MISC_MODE 0x8120 /* Register Address */
1733#define AR5K_MISC_MODE_FBSSID_MATCH 0x00000001 /* Force BSSID match */
1734#define AR5K_MISC_MODE_ACKSIFS_MEM 0x00000002 /* ACK SIFS memory (?) */
1735/* more bits */
1736
1737/*
1738 * OFDM Filter counter
1739 */
1740#define AR5K_OFDM_FIL_CNT 0x8124
1741
1742/*
1743 * CCK Filter counter
1744 */
1745#define AR5K_CCK_FIL_CNT 0x8128
1746
1747/*
1748 * PHY Error Counters (?)
1749 */
1750#define AR5K_PHYERR_CNT1 0x812c
1751#define AR5K_PHYERR_CNT1_MASK 0x8130
1752
1753#define AR5K_PHYERR_CNT2 0x8134
1754#define AR5K_PHYERR_CNT2_MASK 0x8138
1755
1756/*
1757 * TSF Threshold register (?)
1758 */
1759#define AR5K_TSF_THRES 0x813c
1760
1761/*
1762 * Rate -> ACK SIFS mapping table (32 entries)
1763 */
1764#define AR5K_RATE_ACKSIFS_BASE 0x8680 /* Register Address */
1765#define AR5K_RATE_ACKSIFS(_n) (AR5K_RATE_ACKSIFS_BSE + ((_n) << 2))
1766#define AR5K_RATE_ACKSIFS_NORMAL 0x00000001 /* Normal SIFS (field) */
1767#define AR5K_RATE_ACKSIFS_TURBO 0x00000400 /* Turbo SIFS (field) */
1768
1769/*
1770 * Rate -> duration mapping table (32 entries)
1566 */ 1771 */
1567#define AR5K_RATE_DUR_BASE 0x8700 1772#define AR5K_RATE_DUR_BASE 0x8700
1568#define AR5K_RATE_DUR(_n) (AR5K_RATE_DUR_BASE + ((_n) << 2)) 1773#define AR5K_RATE_DUR(_n) (AR5K_RATE_DUR_BASE + ((_n) << 2))
1569 1774
1775/*
1776 * Rate -> db mapping table
1777 * (8 entries, each one has 4 8bit fields)
1778 */
1779#define AR5K_RATE2DB_BASE 0x87c0
1780#define AR5K_RATE2DB(_n) (AR5K_RATE2DB_BASE + ((_n) << 2))
1781
1782/*
1783 * db -> Rate mapping table
1784 * (8 entries, each one has 4 8bit fields)
1785 */
1786#define AR5K_DB2RATE_BASE 0x87e0
1787#define AR5K_DB2RATE(_n) (AR5K_DB2RATE_BASE + ((_n) << 2))
1788
1570/*===5212 end===*/ 1789/*===5212 end===*/
1571 1790
1572/* 1791/*
@@ -1613,12 +1832,34 @@
1613/*===PHY REGISTERS===*/ 1832/*===PHY REGISTERS===*/
1614 1833
1615/* 1834/*
1616 * PHY register 1835 * PHY registers start
1617 */ 1836 */
1618#define AR5K_PHY_BASE 0x9800 1837#define AR5K_PHY_BASE 0x9800
1619#define AR5K_PHY(_n) (AR5K_PHY_BASE + ((_n) << 2)) 1838#define AR5K_PHY(_n) (AR5K_PHY_BASE + ((_n) << 2))
1620#define AR5K_PHY_SHIFT_2GHZ 0x00004007 1839
1621#define AR5K_PHY_SHIFT_5GHZ 0x00000007 1840/*
1841 * TST_2 (Misc config parameters)
1842 */
1843#define AR5K_PHY_TST2 0x9800 /* Register Address */
1844#define AR5K_PHY_TST2_TRIG_SEL 0x00000001 /* Trigger select (?) (field ?) */
1845#define AR5K_PHY_TST2_TRIG 0x00000010 /* Trigger (?) (field ?) */
1846#define AR5K_PHY_TST2_CBUS_MODE 0x00000100 /* Cardbus mode (?) */
1847/* bit reserved */
1848#define AR5K_PHY_TST2_CLK32 0x00000400 /* CLK_OUT is CLK32 (32Khz external) */
1849#define AR5K_PHY_TST2_CHANCOR_DUMP_EN 0x00000800 /* Enable Chancor dump (?) */
1850#define AR5K_PHY_TST2_EVEN_CHANCOR_DUMP 0x00001000 /* Even Chancor dump (?) */
1851#define AR5K_PHY_TST2_RFSILENT_EN 0x00002000 /* Enable RFSILENT */
1852#define AR5K_PHY_TST2_ALT_RFDATA 0x00004000 /* Alternate RFDATA (5-2GHz switch) */
1853#define AR5K_PHY_TST2_MINI_OBS_EN 0x00008000 /* Enable mini OBS (?) */
1854#define AR5K_PHY_TST2_RX2_IS_RX5_INV 0x00010000 /* 2GHz rx path is the 5GHz path inverted (?) */
1855#define AR5K_PHY_TST2_SLOW_CLK160 0x00020000 /* Slow CLK160 (?) */
1856#define AR5K_PHY_TST2_AGC_OBS_SEL_3 0x00040000 /* AGC OBS Select 3 (?) */
1857#define AR5K_PHY_TST2_BBB_OBS_SEL 0x00080000 /* BB OBS Select (field ?) */
1858#define AR5K_PHY_TST2_ADC_OBS_SEL 0x00800000 /* ADC OBS Select (field ?) */
1859#define AR5K_PHY_TST2_RX_CLR_SEL 0x08000000 /* RX Clear Select (?) */
1860#define AR5K_PHY_TST2_FORCE_AGC_CLR 0x10000000 /* Force AGC clear (?) */
1861#define AR5K_PHY_SHIFT_2GHZ 0x00004007 /* Used to access 2GHz radios */
1862#define AR5K_PHY_SHIFT_5GHZ 0x00000007 /* Used to access 5GHz radios (default) */
1622 1863
1623/* 1864/*
1624 * PHY frame control register [5110] /turbo mode register [5111+] 1865 * PHY frame control register [5110] /turbo mode register [5111+]
@@ -1630,18 +1871,21 @@
1630 * a "turbo mode register" for 5110. We treat this one as 1871 * a "turbo mode register" for 5110. We treat this one as
1631 * a frame control register for 5110 below. 1872 * a frame control register for 5110 below.
1632 */ 1873 */
1633#define AR5K_PHY_TURBO 0x9804 1874#define AR5K_PHY_TURBO 0x9804 /* Register Address */
1634#define AR5K_PHY_TURBO_MODE 0x00000001 1875#define AR5K_PHY_TURBO_MODE 0x00000001 /* Enable turbo mode */
1635#define AR5K_PHY_TURBO_SHORT 0x00000002 1876#define AR5K_PHY_TURBO_SHORT 0x00000002 /* Short mode (20Mhz channels) (?) */
1636 1877
1637/* 1878/*
1638 * PHY agility command register 1879 * PHY agility command register
1880 * (aka TST_1)
1639 */ 1881 */
1640#define AR5K_PHY_AGC 0x9808 1882#define AR5K_PHY_AGC 0x9808 /* Register Address */
1641#define AR5K_PHY_AGC_DISABLE 0x08000000 1883#define AR5K_PHY_TST1 0x9808
1884#define AR5K_PHY_AGC_DISABLE 0x08000000 /* Disable AGC to A2 (?)*/
1885#define AR5K_PHY_TST1_TXHOLD 0x00003800 /* Set tx hold (?) */
1642 1886
1643/* 1887/*
1644 * PHY timing register [5112+] 1888 * PHY timing register 3 [5112+]
1645 */ 1889 */
1646#define AR5K_PHY_TIMING_3 0x9814 1890#define AR5K_PHY_TIMING_3 0x9814
1647#define AR5K_PHY_TIMING_3_DSC_MAN 0xfffe0000 1891#define AR5K_PHY_TIMING_3_DSC_MAN 0xfffe0000
@@ -1657,26 +1901,81 @@
1657/* 1901/*
1658 * PHY activation register 1902 * PHY activation register
1659 */ 1903 */
1660#define AR5K_PHY_ACT 0x981c 1904#define AR5K_PHY_ACT 0x981c /* Register Address */
1661#define AR5K_PHY_ACT_ENABLE 0x00000001 1905#define AR5K_PHY_ACT_ENABLE 0x00000001 /* Activate PHY */
1662#define AR5K_PHY_ACT_DISABLE 0x00000002 1906#define AR5K_PHY_ACT_DISABLE 0x00000002 /* Deactivate PHY */
1907
1908/*
1909 * PHY RF control registers
1910 * (i think these are delay times,
1911 * these calibration values exist
1912 * in EEPROM)
1913 */
1914#define AR5K_PHY_RF_CTL2 0x9824 /* Register Address */
1915#define AR5K_PHY_RF_CTL2_TXF2TXD_START 0x0000000f /* Mask for TX frame to TX d(esc?) start */
1916
1917#define AR5K_PHY_RF_CTL3 0x9828 /* Register Address */
1918#define AR5K_PHY_RF_CTL3_TXE2XLNA_ON 0x0000000f /* Mask for TX end to XLNA on */
1919
1920#define AR5K_PHY_RF_CTL4 0x9834 /* Register Address */
1921#define AR5K_PHY_RF_CTL4_TXF2XPA_A_ON 0x00000001 /* TX frame to XPA A on (field) */
1922#define AR5K_PHY_RF_CTL4_TXF2XPA_B_ON 0x00000100 /* TX frame to XPA B on (field) */
1923#define AR5K_PHY_RF_CTL4_TXE2XPA_A_OFF 0x00010000 /* TX end to XPA A off (field) */
1924#define AR5K_PHY_RF_CTL4_TXE2XPA_B_OFF 0x01000000 /* TX end to XPA B off (field) */
1925
1926/*
1927 * Pre-Amplifier control register
1928 * (XPA -> external pre-amplifier)
1929 */
1930#define AR5K_PHY_PA_CTL 0x9838 /* Register Address */
1931#define AR5K_PHY_PA_CTL_XPA_A_HI 0x00000001 /* XPA A high (?) */
1932#define AR5K_PHY_PA_CTL_XPA_B_HI 0x00000002 /* XPA B high (?) */
1933#define AR5K_PHY_PA_CTL_XPA_A_EN 0x00000004 /* Enable XPA A */
1934#define AR5K_PHY_PA_CTL_XPA_B_EN 0x00000008 /* Enable XPA B */
1935
1936/*
1937 * PHY settling register
1938 */
1939#define AR5K_PHY_SETTLING 0x9844 /* Register Address */
1940#define AR5K_PHY_SETTLING_AGC 0x0000007f /* Mask for AGC settling time */
1941#define AR5K_PHY_SETTLING_SWITCH 0x00003f80 /* Mask for Switch settlig time */
1942
1943/*
1944 * PHY Gain registers
1945 */
1946#define AR5K_PHY_GAIN 0x9848 /* Register Address */
1947#define AR5K_PHY_GAIN_TXRX_ATTEN 0x0003f000 /* Mask for TX-RX Attenuation */
1948
1949#define AR5K_PHY_GAIN_OFFSET 0x984c /* Register Address */
1950#define AR5K_PHY_GAIN_OFFSET_RXTX_FLAG 0x00020000 /* RX-TX flag (?) */
1951
1952/*
1953 * Desired size register
1954 * (for more infos read ANI patent)
1955 */
1956#define AR5K_PHY_DESIRED_SIZE 0x9850 /* Register Address */
1957#define AR5K_PHY_DESIRED_SIZE_ADC 0x000000ff /* Mask for ADC desired size */
1958#define AR5K_PHY_DESIRED_SIZE_PGA 0x0000ff00 /* Mask for PGA desired size */
1959#define AR5K_PHY_DESIRED_SIZE_TOT 0x0ff00000 /* Mask for Total desired size (?) */
1663 1960
1664/* 1961/*
1665 * PHY signal register 1962 * PHY signal register
1963 * (for more infos read ANI patent)
1666 */ 1964 */
1667#define AR5K_PHY_SIG 0x9858 1965#define AR5K_PHY_SIG 0x9858 /* Register Address */
1668#define AR5K_PHY_SIG_FIRSTEP 0x0003f000 1966#define AR5K_PHY_SIG_FIRSTEP 0x0003f000 /* Mask for FIRSTEP */
1669#define AR5K_PHY_SIG_FIRSTEP_S 12 1967#define AR5K_PHY_SIG_FIRSTEP_S 12
1670#define AR5K_PHY_SIG_FIRPWR 0x03fc0000 1968#define AR5K_PHY_SIG_FIRPWR 0x03fc0000 /* Mask for FIPWR */
1671#define AR5K_PHY_SIG_FIRPWR_S 18 1969#define AR5K_PHY_SIG_FIRPWR_S 18
1672 1970
1673/* 1971/*
1674 * PHY coarse agility control register 1972 * PHY coarse agility control register
1973 * (for more infos read ANI patent)
1675 */ 1974 */
1676#define AR5K_PHY_AGCCOARSE 0x985c 1975#define AR5K_PHY_AGCCOARSE 0x985c /* Register Address */
1677#define AR5K_PHY_AGCCOARSE_LO 0x00007f80 1976#define AR5K_PHY_AGCCOARSE_LO 0x00007f80 /* Mask for AGC Coarse low */
1678#define AR5K_PHY_AGCCOARSE_LO_S 7 1977#define AR5K_PHY_AGCCOARSE_LO_S 7
1679#define AR5K_PHY_AGCCOARSE_HI 0x003f8000 1978#define AR5K_PHY_AGCCOARSE_HI 0x003f8000 /* Mask for AGC Coarse high */
1680#define AR5K_PHY_AGCCOARSE_HI_S 15 1979#define AR5K_PHY_AGCCOARSE_HI_S 15
1681 1980
1682/* 1981/*
@@ -1689,12 +1988,13 @@
1689/* 1988/*
1690 * PHY noise floor status register 1989 * PHY noise floor status register
1691 */ 1990 */
1692#define AR5K_PHY_NF 0x9864 1991#define AR5K_PHY_NF 0x9864 /* Register address */
1693#define AR5K_PHY_NF_M 0x000001ff 1992#define AR5K_PHY_NF_M 0x000001ff /* Noise floor mask */
1694#define AR5K_PHY_NF_ACTIVE 0x00000100 1993#define AR5K_PHY_NF_ACTIVE 0x00000100 /* Noise floor calibration still active */
1695#define AR5K_PHY_NF_RVAL(_n) (((_n) >> 19) & AR5K_PHY_NF_M) 1994#define AR5K_PHY_NF_RVAL(_n) (((_n) >> 19) & AR5K_PHY_NF_M)
1696#define AR5K_PHY_NF_AVAL(_n) (-((_n) ^ AR5K_PHY_NF_M) + 1) 1995#define AR5K_PHY_NF_AVAL(_n) (-((_n) ^ AR5K_PHY_NF_M) + 1)
1697#define AR5K_PHY_NF_SVAL(_n) (((_n) & AR5K_PHY_NF_M) | (1 << 9)) 1996#define AR5K_PHY_NF_SVAL(_n) (((_n) & AR5K_PHY_NF_M) | (1 << 9))
1997#define AR5K_PHY_NF_THRESH62 0x00001000 /* Thresh62 -check ANI patent- (field) */
1698 1998
1699/* 1999/*
1700 * PHY ADC saturation register [5110] 2000 * PHY ADC saturation register [5110]
@@ -1706,6 +2006,30 @@
1706#define AR5K_PHY_ADCSAT_THR_S 5 2006#define AR5K_PHY_ADCSAT_THR_S 5
1707 2007
1708/* 2008/*
2009 * PHY Weak ofdm signal detection threshold registers (ANI) [5212+]
2010 */
2011
2012/* High thresholds */
2013#define AR5K_PHY_WEAK_OFDM_HIGH_THR 0x9868
2014#define AR5K_PHY_WEAK_OFDM_HIGH_THR_M2_COUNT 0x0000001f
2015#define AR5K_PHY_WEAK_OFDM_HIGH_THR_M2_COUNT_S 0
2016#define AR5K_PHY_WEAK_OFDM_HIGH_THR_M1 0x00fe0000
2017#define AR5K_PHY_WEAK_OFDM_HIGH_THR_M1_S 17
2018#define AR5K_PHY_WEAK_OFDM_HIGH_THR_M2 0x7f000000
2019#define AR5K_PHY_WEAK_OFDM_HIGH_THR_M2_S 24
2020
2021/* Low thresholds */
2022#define AR5K_PHY_WEAK_OFDM_LOW_THR 0x986c
2023#define AR5K_PHY_WEAK_OFDM_LOW_THR_SELFCOR_EN 0x00000001
2024#define AR5K_PHY_WEAK_OFDM_LOW_THR_M2_COUNT 0x00003f00
2025#define AR5K_PHY_WEAK_OFDM_LOW_THR_M2_COUNT_S 8
2026#define AR5K_PHY_WEAK_OFDM_LOW_THR_M1 0x001fc000
2027#define AR5K_PHY_WEAK_OFDM_LOW_THR_M1_S 14
2028#define AR5K_PHY_WEAK_OFDM_LOW_THR_M2 0x0fe00000
2029#define AR5K_PHY_WEAK_OFDM_LOW_THR_M2_S 21
2030
2031
2032/*
1709 * PHY sleep registers [5112+] 2033 * PHY sleep registers [5112+]
1710 */ 2034 */
1711#define AR5K_PHY_SCR 0x9870 2035#define AR5K_PHY_SCR 0x9870
@@ -1730,6 +2054,8 @@
1730 AR5K_PHY_PLL_44MHZ_5211 : AR5K_PHY_PLL_44MHZ_5212) 2054 AR5K_PHY_PLL_44MHZ_5211 : AR5K_PHY_PLL_44MHZ_5212)
1731#define AR5K_PHY_PLL_RF5111 0x00000000 2055#define AR5K_PHY_PLL_RF5111 0x00000000
1732#define AR5K_PHY_PLL_RF5112 0x00000040 2056#define AR5K_PHY_PLL_RF5112 0x00000040
2057#define AR5K_PHY_PLL_HALF_RATE 0x00000100
2058#define AR5K_PHY_PLL_QUARTER_RATE 0x00000200
1733 2059
1734/* 2060/*
1735 * RF Buffer register 2061 * RF Buffer register
@@ -1792,23 +2118,74 @@
1792#define AR5K_PHY_RFSTG_DISABLE 0x00000021 2118#define AR5K_PHY_RFSTG_DISABLE 0x00000021
1793 2119
1794/* 2120/*
2121 * PHY Antenna control register
2122 */
2123#define AR5K_PHY_ANT_CTL 0x9910 /* Register Address */
2124#define AR5K_PHY_ANT_CTL_TXRX_EN 0x00000001 /* Enable TX/RX (?) */
2125#define AR5K_PHY_ANT_CTL_SECTORED_ANT 0x00000004 /* Sectored Antenna */
2126#define AR5K_PHY_ANT_CTL_HITUNE5 0x00000008 /* Hitune5 (?) */
2127#define AR5K_PHY_ANT_CTL_SWTABLE_IDLE 0x00000010 /* Switch table idle (?) */
2128
2129/*
1795 * PHY receiver delay register [5111+] 2130 * PHY receiver delay register [5111+]
1796 */ 2131 */
1797#define AR5K_PHY_RX_DELAY 0x9914 2132#define AR5K_PHY_RX_DELAY 0x9914 /* Register Address */
1798#define AR5K_PHY_RX_DELAY_M 0x00003fff 2133#define AR5K_PHY_RX_DELAY_M 0x00003fff /* Mask for RX activate to receive delay (/100ns) */
2134
2135/*
2136 * PHY max rx length register (?) [5111]
2137 */
2138#define AR5K_PHY_MAX_RX_LEN 0x991c
1799 2139
1800/* 2140/*
1801 * PHY timing I(nphase) Q(adrature) control register [5111+] 2141 * PHY timing register 4
2142 * I(nphase)/Q(adrature) calibration register [5111+]
1802 */ 2143 */
1803#define AR5K_PHY_IQ 0x9920 /* Register address */ 2144#define AR5K_PHY_IQ 0x9920 /* Register Address */
1804#define AR5K_PHY_IQ_CORR_Q_Q_COFF 0x0000001f /* Mask for q correction info */ 2145#define AR5K_PHY_IQ_CORR_Q_Q_COFF 0x0000001f /* Mask for q correction info */
1805#define AR5K_PHY_IQ_CORR_Q_I_COFF 0x000007e0 /* Mask for i correction info */ 2146#define AR5K_PHY_IQ_CORR_Q_I_COFF 0x000007e0 /* Mask for i correction info */
1806#define AR5K_PHY_IQ_CORR_Q_I_COFF_S 5 2147#define AR5K_PHY_IQ_CORR_Q_I_COFF_S 5
1807#define AR5K_PHY_IQ_CORR_ENABLE 0x00000800 /* Enable i/q correction */ 2148#define AR5K_PHY_IQ_CORR_ENABLE 0x00000800 /* Enable i/q correction */
1808#define AR5K_PHY_IQ_CAL_NUM_LOG_MAX 0x0000f000 2149#define AR5K_PHY_IQ_CAL_NUM_LOG_MAX 0x0000f000 /* Mask for max number of samples in log scale */
1809#define AR5K_PHY_IQ_CAL_NUM_LOG_MAX_S 12 2150#define AR5K_PHY_IQ_CAL_NUM_LOG_MAX_S 12
1810#define AR5K_PHY_IQ_RUN 0x00010000 /* Run i/q calibration */ 2151#define AR5K_PHY_IQ_RUN 0x00010000 /* Run i/q calibration */
2152#define AR5K_PHY_IQ_USE_PT_DF 0x00020000 /* Use pilot track df (?) */
2153#define AR5K_PHY_IQ_EARLY_TRIG_THR 0x00200000 /* Early trigger threshold (?) (field) */
2154#define AR5K_PHY_IQ_PILOT_MASK_EN 0x10000000 /* Enable pilot mask (?) */
2155#define AR5K_PHY_IQ_CHAN_MASK_EN 0x20000000 /* Enable channel mask (?) */
2156#define AR5K_PHY_IQ_SPUR_FILT_EN 0x40000000 /* Enable spur filter */
2157#define AR5K_PHY_IQ_SPUR_RSSI_EN 0x80000000 /* Enable spur rssi */
1811 2158
2159/*
2160 * PHY timing register 5
2161 * OFDM Self-correlator Cyclic RSSI threshold params
2162 * (Check out bb_cycpwr_thr1 on ANI patent)
2163 */
2164#define AR5K_PHY_OFDM_SELFCORR 0x9924 /* Register Address */
2165#define AR5K_PHY_OFDM_SELFCORR_CYPWR_THR1_EN 0x00000001 /* Enable cyclic RSSI thr 1 */
2166#define AR5K_PHY_OFDM_SELFCORR_CYPWR_THR1 0x000000fe /* Mask for Cyclic RSSI threshold 1 */
2167#define AR5K_PHY_OFDM_SELFCORR_CYPWR_THR3 0x00000100 /* Cyclic RSSI threshold 3 (field) (?) */
2168#define AR5K_PHY_OFDM_SELFCORR_RSSI_1ATHR_EN 0x00008000 /* Enable 1A RSSI threshold (?) */
2169#define AR5K_PHY_OFDM_SELFCORR_RSSI_1ATHR 0x00010000 /* 1A RSSI threshold (field) (?) */
2170#define AR5K_PHY_OFDM_SELFCORR_LSCTHR_HIRSSI 0x00800000 /* Long sc threshold hi rssi (?) */
2171
2172/*
2173 * PHY-only warm reset register
2174 */
2175#define AR5K_PHY_WARM_RESET 0x9928
2176
2177/*
2178 * PHY-only control register
2179 */
2180#define AR5K_PHY_CTL 0x992c /* Register Address */
2181#define AR5K_PHY_CTL_RX_DRAIN_RATE 0x00000001 /* RX drain rate (?) */
2182#define AR5K_PHY_CTL_LATE_TX_SIG_SYM 0x00000002 /* Late tx signal symbol (?) */
2183#define AR5K_PHY_CTL_GEN_SCRAMBLER 0x00000004 /* Generate scrambler */
2184#define AR5K_PHY_CTL_TX_ANT_SEL 0x00000008 /* TX antenna select */
2185#define AR5K_PHY_CTL_TX_ANT_STATIC 0x00000010 /* Static TX antenna */
2186#define AR5K_PHY_CTL_RX_ANT_SEL 0x00000020 /* RX antenna select */
2187#define AR5K_PHY_CTL_RX_ANT_STATIC 0x00000040 /* Static RX antenna */
2188#define AR5K_PHY_CTL_LOW_FREQ_SLE_EN 0x00000080 /* Enable low freq sleep */
1812 2189
1813/* 2190/*
1814 * PHY PAPD probe register [5111+ (?)] 2191 * PHY PAPD probe register [5111+ (?)]
@@ -1816,9 +2193,13 @@
1816 * Because it's always 0 in 5211 initialization code 2193 * Because it's always 0 in 5211 initialization code
1817 */ 2194 */
1818#define AR5K_PHY_PAPD_PROBE 0x9930 2195#define AR5K_PHY_PAPD_PROBE 0x9930
2196#define AR5K_PHY_PAPD_PROBE_SH_HI_PAR 0x00000001
2197#define AR5K_PHY_PAPD_PROBE_PCDAC_BIAS 0x00000002
2198#define AR5K_PHY_PAPD_PROBE_COMP_GAIN 0x00000040
1819#define AR5K_PHY_PAPD_PROBE_TXPOWER 0x00007e00 2199#define AR5K_PHY_PAPD_PROBE_TXPOWER 0x00007e00
1820#define AR5K_PHY_PAPD_PROBE_TXPOWER_S 9 2200#define AR5K_PHY_PAPD_PROBE_TXPOWER_S 9
1821#define AR5K_PHY_PAPD_PROBE_TX_NEXT 0x00008000 2201#define AR5K_PHY_PAPD_PROBE_TX_NEXT 0x00008000
2202#define AR5K_PHY_PAPD_PROBE_PREDIST_EN 0x00010000
1822#define AR5K_PHY_PAPD_PROBE_TYPE 0x01800000 /* [5112+] */ 2203#define AR5K_PHY_PAPD_PROBE_TYPE 0x01800000 /* [5112+] */
1823#define AR5K_PHY_PAPD_PROBE_TYPE_S 23 2204#define AR5K_PHY_PAPD_PROBE_TYPE_S 23
1824#define AR5K_PHY_PAPD_PROBE_TYPE_OFDM 0 2205#define AR5K_PHY_PAPD_PROBE_TYPE_OFDM 0
@@ -1848,15 +2229,16 @@
1848#define AR5K_PHY_FRAME_CTL (ah->ah_version == AR5K_AR5210 ? \ 2229#define AR5K_PHY_FRAME_CTL (ah->ah_version == AR5K_AR5210 ? \
1849 AR5K_PHY_FRAME_CTL_5210 : AR5K_PHY_FRAME_CTL_5211) 2230 AR5K_PHY_FRAME_CTL_5210 : AR5K_PHY_FRAME_CTL_5211)
1850/*---[5111+]---*/ 2231/*---[5111+]---*/
1851#define AR5K_PHY_FRAME_CTL_TX_CLIP 0x00000038 2232#define AR5K_PHY_FRAME_CTL_TX_CLIP 0x00000038 /* Mask for tx clip (?) */
1852#define AR5K_PHY_FRAME_CTL_TX_CLIP_S 3 2233#define AR5K_PHY_FRAME_CTL_TX_CLIP_S 3
2234#define AR5K_PHY_FRAME_CTL_PREP_CHINFO 0x00010000 /* Prepend chan info */
1853/*---[5110/5111]---*/ 2235/*---[5110/5111]---*/
1854#define AR5K_PHY_FRAME_CTL_TIMING_ERR 0x01000000 2236#define AR5K_PHY_FRAME_CTL_TIMING_ERR 0x01000000 /* PHY timing error */
1855#define AR5K_PHY_FRAME_CTL_PARITY_ERR 0x02000000 2237#define AR5K_PHY_FRAME_CTL_PARITY_ERR 0x02000000 /* Parity error */
1856#define AR5K_PHY_FRAME_CTL_ILLRATE_ERR 0x04000000 /* illegal rate */ 2238#define AR5K_PHY_FRAME_CTL_ILLRATE_ERR 0x04000000 /* Illegal rate */
1857#define AR5K_PHY_FRAME_CTL_ILLLEN_ERR 0x08000000 /* illegal length */ 2239#define AR5K_PHY_FRAME_CTL_ILLLEN_ERR 0x08000000 /* Illegal length */
1858#define AR5K_PHY_FRAME_CTL_SERVICE_ERR 0x20000000 2240#define AR5K_PHY_FRAME_CTL_SERVICE_ERR 0x20000000
1859#define AR5K_PHY_FRAME_CTL_TXURN_ERR 0x40000000 /* tx underrun */ 2241#define AR5K_PHY_FRAME_CTL_TXURN_ERR 0x40000000 /* TX underrun */
1860#define AR5K_PHY_FRAME_CTL_INI AR5K_PHY_FRAME_CTL_SERVICE_ERR | \ 2242#define AR5K_PHY_FRAME_CTL_INI AR5K_PHY_FRAME_CTL_SERVICE_ERR | \
1861 AR5K_PHY_FRAME_CTL_TXURN_ERR | \ 2243 AR5K_PHY_FRAME_CTL_TXURN_ERR | \
1862 AR5K_PHY_FRAME_CTL_ILLLEN_ERR | \ 2244 AR5K_PHY_FRAME_CTL_ILLLEN_ERR | \
@@ -1915,6 +2297,11 @@ after DFS is enabled */
1915#define AR5K_PHY_ANT_SWITCH_TABLE_1 0x9964 2297#define AR5K_PHY_ANT_SWITCH_TABLE_1 0x9964
1916 2298
1917/* 2299/*
2300 * PHY Noise floor threshold
2301 */
2302#define AR5K_PHY_NFTHRES 0x9968
2303
2304/*
1918 * PHY clock sleep registers [5112+] 2305 * PHY clock sleep registers [5112+]
1919 */ 2306 */
1920#define AR5K_PHY_SCLOCK 0x99f0 2307#define AR5K_PHY_SCLOCK 0x99f0
@@ -1922,56 +2309,116 @@ after DFS is enabled */
1922#define AR5K_PHY_SDELAY 0x99f4 2309#define AR5K_PHY_SDELAY 0x99f4
1923#define AR5K_PHY_SDELAY_32MHZ 0x000000ff 2310#define AR5K_PHY_SDELAY_32MHZ 0x000000ff
1924#define AR5K_PHY_SPENDING 0x99f8 2311#define AR5K_PHY_SPENDING 0x99f8
2312#define AR5K_PHY_SPENDING_14 0x00000014
2313#define AR5K_PHY_SPENDING_18 0x00000018
1925#define AR5K_PHY_SPENDING_RF5111 0x00000018 2314#define AR5K_PHY_SPENDING_RF5111 0x00000018
1926#define AR5K_PHY_SPENDING_RF5112 0x00000014 /* <- i 've only seen this on 2425 dumps ! */ 2315#define AR5K_PHY_SPENDING_RF5112 0x00000014
1927#define AR5K_PHY_SPENDING_RF5112A 0x0000000e /* but since i only have 5112A-based chips */ 2316/* #define AR5K_PHY_SPENDING_RF5112A 0x0000000e */
1928#define AR5K_PHY_SPENDING_RF5424 0x00000012 /* to test it might be also for old 5112. */ 2317/* #define AR5K_PHY_SPENDING_RF5424 0x00000012 */
2318#define AR5K_PHY_SPENDING_RF5413 0x00000014
2319#define AR5K_PHY_SPENDING_RF2413 0x00000014
2320#define AR5K_PHY_SPENDING_RF2425 0x00000018
1929 2321
1930/* 2322/*
1931 * Misc PHY/radio registers [5110 - 5111] 2323 * Misc PHY/radio registers [5110 - 5111]
1932 */ 2324 */
1933#define AR5K_BB_GAIN_BASE 0x9b00 /* BaseBand Amplifier Gain table base address */ 2325#define AR5K_BB_GAIN_BASE 0x9b00 /* BaseBand Amplifier Gain table base address */
1934#define AR5K_BB_GAIN(_n) (AR5K_BB_GAIN_BASE + ((_n) << 2)) 2326#define AR5K_BB_GAIN(_n) (AR5K_BB_GAIN_BASE + ((_n) << 2))
1935#define AR5K_RF_GAIN_BASE 0x9a00 /* RF Amplrifier Gain table base address */ 2327#define AR5K_RF_GAIN_BASE 0x9a00 /* RF Amplrifier Gain table base address */
1936#define AR5K_RF_GAIN(_n) (AR5K_RF_GAIN_BASE + ((_n) << 2)) 2328#define AR5K_RF_GAIN(_n) (AR5K_RF_GAIN_BASE + ((_n) << 2))
1937 2329
1938/* 2330/*
1939 * PHY timing IQ calibration result register [5111+] 2331 * PHY timing IQ calibration result register [5111+]
1940 */ 2332 */
1941#define AR5K_PHY_IQRES_CAL_PWR_I 0x9c10 /* I (Inphase) power value */ 2333#define AR5K_PHY_IQRES_CAL_PWR_I 0x9c10 /* I (Inphase) power value */
1942#define AR5K_PHY_IQRES_CAL_PWR_Q 0x9c14 /* Q (Quadrature) power value */ 2334#define AR5K_PHY_IQRES_CAL_PWR_Q 0x9c14 /* Q (Quadrature) power value */
1943#define AR5K_PHY_IQRES_CAL_CORR 0x9c18 /* I/Q Correlation */ 2335#define AR5K_PHY_IQRES_CAL_CORR 0x9c18 /* I/Q Correlation */
1944 2336
1945/* 2337/*
1946 * PHY current RSSI register [5111+] 2338 * PHY current RSSI register [5111+]
1947 */ 2339 */
1948#define AR5K_PHY_CURRENT_RSSI 0x9c1c 2340#define AR5K_PHY_CURRENT_RSSI 0x9c1c
2341
2342/*
2343 * PHY RF Bus grant register (?)
2344 */
2345#define AR5K_PHY_RFBUS_GRANT 0x9c20
2346
2347/*
2348 * PHY ADC test register
2349 */
2350#define AR5K_PHY_ADC_TEST 0x9c24
2351#define AR5K_PHY_ADC_TEST_I 0x00000001
2352#define AR5K_PHY_ADC_TEST_Q 0x00000200
2353
2354/*
2355 * PHY DAC test register
2356 */
2357#define AR5K_PHY_DAC_TEST 0x9c28
2358#define AR5K_PHY_DAC_TEST_I 0x00000001
2359#define AR5K_PHY_DAC_TEST_Q 0x00000200
2360
2361/*
2362 * PHY PTAT register (?)
2363 */
2364#define AR5K_PHY_PTAT 0x9c2c
2365
2366/*
2367 * PHY Illegal TX rate register [5112+]
2368 */
2369#define AR5K_PHY_BAD_TX_RATE 0x9c30
2370
2371/*
2372 * PHY SPUR Power register [5112+]
2373 */
2374#define AR5K_PHY_SPUR_PWR 0x9c34 /* Register Address */
2375#define AR5K_PHY_SPUR_PWR_I 0x00000001 /* SPUR Power estimate for I (field) */
2376#define AR5K_PHY_SPUR_PWR_Q 0x00000100 /* SPUR Power estimate for Q (field) */
2377#define AR5K_PHY_SPUR_PWR_FILT 0x00010000 /* Power with SPUR removed (field) */
2378
2379/*
2380 * PHY Channel status register [5112+] (?)
2381 */
2382#define AR5K_PHY_CHAN_STATUS 0x9c38
2383#define AR5K_PHY_CHAN_STATUS_BT_ACT 0x00000001
2384#define AR5K_PHY_CHAN_STATUS_RX_CLR_RAW 0x00000002
2385#define AR5K_PHY_CHAN_STATUS_RX_CLR_MAC 0x00000004
2386#define AR5K_PHY_CHAN_STATUS_RX_CLR_PAP 0x00000008
2387
2388/*
2389 * PHY PAPD I (power?) table (?)
2390 * (92! entries)
2391 */
2392#define AR5K_PHY_PAPD_I_BASE 0xa000
2393#define AR5K_PHY_PAPD_I(_n) (AR5K_PHY_PAPD_I_BASE + ((_n) << 2))
1949 2394
1950/* 2395/*
1951 * PHY PCDAC TX power table 2396 * PHY PCDAC TX power table
1952 */ 2397 */
1953#define AR5K_PHY_PCDAC_TXPOWER_BASE_5211 0xa180 2398#define AR5K_PHY_PCDAC_TXPOWER_BASE_5211 0xa180
1954#define AR5K_PHY_PCDAC_TXPOWER_BASE_5413 0xa280 2399#define AR5K_PHY_PCDAC_TXPOWER_BASE_2413 0xa280
1955#define AR5K_PHY_PCDAC_TXPOWER_BASE (ah->ah_radio >= AR5K_RF5413 ? \ 2400#define AR5K_PHY_PCDAC_TXPOWER_BASE (ah->ah_radio >= AR5K_RF2413 ? \
1956 AR5K_PHY_PCDAC_TXPOWER_BASE_5413 :\ 2401 AR5K_PHY_PCDAC_TXPOWER_BASE_2413 :\
1957 AR5K_PHY_PCDAC_TXPOWER_BASE_5211) 2402 AR5K_PHY_PCDAC_TXPOWER_BASE_5211)
1958#define AR5K_PHY_PCDAC_TXPOWER(_n) (AR5K_PHY_PCDAC_TXPOWER_BASE + ((_n) << 2)) 2403#define AR5K_PHY_PCDAC_TXPOWER(_n) (AR5K_PHY_PCDAC_TXPOWER_BASE + ((_n) << 2))
1959 2404
1960/* 2405/*
1961 * PHY mode register [5111+] 2406 * PHY mode register [5111+]
1962 */ 2407 */
1963#define AR5K_PHY_MODE 0x0a200 /* Register address */ 2408#define AR5K_PHY_MODE 0x0a200 /* Register Address */
1964#define AR5K_PHY_MODE_MOD 0x00000001 /* PHY Modulation mask*/ 2409#define AR5K_PHY_MODE_MOD 0x00000001 /* PHY Modulation bit */
1965#define AR5K_PHY_MODE_MOD_OFDM 0 2410#define AR5K_PHY_MODE_MOD_OFDM 0
1966#define AR5K_PHY_MODE_MOD_CCK 1 2411#define AR5K_PHY_MODE_MOD_CCK 1
1967#define AR5K_PHY_MODE_FREQ 0x00000002 /* Freq mode mask */ 2412#define AR5K_PHY_MODE_FREQ 0x00000002 /* Freq mode bit */
1968#define AR5K_PHY_MODE_FREQ_5GHZ 0 2413#define AR5K_PHY_MODE_FREQ_5GHZ 0
1969#define AR5K_PHY_MODE_FREQ_2GHZ 2 2414#define AR5K_PHY_MODE_FREQ_2GHZ 2
1970#define AR5K_PHY_MODE_MOD_DYN 0x00000004 /* Dynamic OFDM/CCK mode mask [5112+] */ 2415#define AR5K_PHY_MODE_MOD_DYN 0x00000004 /* Enable Dynamic OFDM/CCK mode [5112+] */
1971#define AR5K_PHY_MODE_RAD 0x00000008 /* [5212+] */ 2416#define AR5K_PHY_MODE_RAD 0x00000008 /* [5212+] */
1972#define AR5K_PHY_MODE_RAD_RF5111 0 2417#define AR5K_PHY_MODE_RAD_RF5111 0
1973#define AR5K_PHY_MODE_RAD_RF5112 8 2418#define AR5K_PHY_MODE_RAD_RF5112 8
1974#define AR5K_PHY_MODE_XR 0x00000010 /* [5112+] */ 2419#define AR5K_PHY_MODE_XR 0x00000010 /* Enable XR mode [5112+] */
2420#define AR5K_PHY_MODE_HALF_RATE 0x00000020 /* Enable Half rate (test) */
2421#define AR5K_PHY_MODE_QUARTER_RATE 0x00000040 /* Enable Quarter rat (test) */
1975 2422
1976/* 2423/*
1977 * PHY CCK transmit control register [5111+ (?)] 2424 * PHY CCK transmit control register [5111+ (?)]
@@ -1979,6 +2426,15 @@ after DFS is enabled */
1979#define AR5K_PHY_CCKTXCTL 0xa204 2426#define AR5K_PHY_CCKTXCTL 0xa204
1980#define AR5K_PHY_CCKTXCTL_WORLD 0x00000000 2427#define AR5K_PHY_CCKTXCTL_WORLD 0x00000000
1981#define AR5K_PHY_CCKTXCTL_JAPAN 0x00000010 2428#define AR5K_PHY_CCKTXCTL_JAPAN 0x00000010
2429#define AR5K_PHY_CCKTXCTL_SCRAMBLER_DIS 0x00000001
2430#define AR5K_PHY_CCKTXCTK_DAC_SCALE 0x00000004
2431
2432/*
2433 * PHY CCK Cross-correlator Barker RSSI threshold register [5212+]
2434 */
2435#define AR5K_PHY_CCK_CROSSCORR 0xa208
2436#define AR5K_PHY_CCK_CROSSCORR_WEAK_SIG_THR 0x0000000f
2437#define AR5K_PHY_CCK_CROSSCORR_WEAK_SIG_THR_S 0
1982 2438
1983/* 2439/*
1984 * PHY 2GHz gain register [5111+] 2440 * PHY 2GHz gain register [5111+]
diff --git a/drivers/net/wireless/ath9k/Kconfig b/drivers/net/wireless/ath9k/Kconfig
new file mode 100644
index 000000000000..9e19dcceb3a2
--- /dev/null
+++ b/drivers/net/wireless/ath9k/Kconfig
@@ -0,0 +1,8 @@
1config ATH9K
2 tristate "Atheros 802.11n wireless cards support"
3 depends on PCI && MAC80211 && WLAN_80211
4 ---help---
5 This module adds support for wireless adapters based on
6 Atheros IEEE 802.11n AR5008 and AR9001 family of chipsets.
7
8 If you choose to build a module, it'll be called ath9k.
diff --git a/drivers/net/wireless/ath9k/Makefile b/drivers/net/wireless/ath9k/Makefile
new file mode 100644
index 000000000000..a6411517e5f8
--- /dev/null
+++ b/drivers/net/wireless/ath9k/Makefile
@@ -0,0 +1,11 @@
1ath9k-y += hw.o \
2 phy.o \
3 regd.o \
4 beacon.o \
5 main.o \
6 recv.o \
7 xmit.o \
8 rc.o \
9 core.o
10
11obj-$(CONFIG_ATH9K) += ath9k.o
diff --git a/drivers/net/wireless/ath9k/ath9k.h b/drivers/net/wireless/ath9k/ath9k.h
new file mode 100644
index 000000000000..d1b0fbae5a32
--- /dev/null
+++ b/drivers/net/wireless/ath9k/ath9k.h
@@ -0,0 +1,1021 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef ATH9K_H
18#define ATH9K_H
19
20#include <linux/io.h>
21
22#define ATHEROS_VENDOR_ID 0x168c
23
24#define AR5416_DEVID_PCI 0x0023
25#define AR5416_DEVID_PCIE 0x0024
26#define AR9160_DEVID_PCI 0x0027
27#define AR9280_DEVID_PCI 0x0029
28#define AR9280_DEVID_PCIE 0x002a
29
30#define AR5416_AR9100_DEVID 0x000b
31
32#define AR_SUBVENDOR_ID_NOG 0x0e11
33#define AR_SUBVENDOR_ID_NEW_A 0x7065
34
35#define ATH9K_TXERR_XRETRY 0x01
36#define ATH9K_TXERR_FILT 0x02
37#define ATH9K_TXERR_FIFO 0x04
38#define ATH9K_TXERR_XTXOP 0x08
39#define ATH9K_TXERR_TIMER_EXPIRED 0x10
40
41#define ATH9K_TX_BA 0x01
42#define ATH9K_TX_PWRMGMT 0x02
43#define ATH9K_TX_DESC_CFG_ERR 0x04
44#define ATH9K_TX_DATA_UNDERRUN 0x08
45#define ATH9K_TX_DELIM_UNDERRUN 0x10
46#define ATH9K_TX_SW_ABORTED 0x40
47#define ATH9K_TX_SW_FILTERED 0x80
48
49#define NBBY 8
50
51struct ath_tx_status {
52 u32 ts_tstamp;
53 u16 ts_seqnum;
54 u8 ts_status;
55 u8 ts_ratecode;
56 u8 ts_rateindex;
57 int8_t ts_rssi;
58 u8 ts_shortretry;
59 u8 ts_longretry;
60 u8 ts_virtcol;
61 u8 ts_antenna;
62 u8 ts_flags;
63 int8_t ts_rssi_ctl0;
64 int8_t ts_rssi_ctl1;
65 int8_t ts_rssi_ctl2;
66 int8_t ts_rssi_ext0;
67 int8_t ts_rssi_ext1;
68 int8_t ts_rssi_ext2;
69 u8 pad[3];
70 u32 ba_low;
71 u32 ba_high;
72 u32 evm0;
73 u32 evm1;
74 u32 evm2;
75};
76
77struct ath_rx_status {
78 u32 rs_tstamp;
79 u16 rs_datalen;
80 u8 rs_status;
81 u8 rs_phyerr;
82 int8_t rs_rssi;
83 u8 rs_keyix;
84 u8 rs_rate;
85 u8 rs_antenna;
86 u8 rs_more;
87 int8_t rs_rssi_ctl0;
88 int8_t rs_rssi_ctl1;
89 int8_t rs_rssi_ctl2;
90 int8_t rs_rssi_ext0;
91 int8_t rs_rssi_ext1;
92 int8_t rs_rssi_ext2;
93 u8 rs_isaggr;
94 u8 rs_moreaggr;
95 u8 rs_num_delims;
96 u8 rs_flags;
97 u32 evm0;
98 u32 evm1;
99 u32 evm2;
100};
101
102#define ATH9K_RXERR_CRC 0x01
103#define ATH9K_RXERR_PHY 0x02
104#define ATH9K_RXERR_FIFO 0x04
105#define ATH9K_RXERR_DECRYPT 0x08
106#define ATH9K_RXERR_MIC 0x10
107
108#define ATH9K_RX_MORE 0x01
109#define ATH9K_RX_MORE_AGGR 0x02
110#define ATH9K_RX_GI 0x04
111#define ATH9K_RX_2040 0x08
112#define ATH9K_RX_DELIM_CRC_PRE 0x10
113#define ATH9K_RX_DELIM_CRC_POST 0x20
114#define ATH9K_RX_DECRYPT_BUSY 0x40
115
116#define ATH9K_RXKEYIX_INVALID ((u8)-1)
117#define ATH9K_TXKEYIX_INVALID ((u32)-1)
118
119struct ath_desc {
120 u32 ds_link;
121 u32 ds_data;
122 u32 ds_ctl0;
123 u32 ds_ctl1;
124 u32 ds_hw[20];
125 union {
126 struct ath_tx_status tx;
127 struct ath_rx_status rx;
128 void *stats;
129 } ds_us;
130 void *ds_vdata;
131} __packed;
132
133#define ds_txstat ds_us.tx
134#define ds_rxstat ds_us.rx
135#define ds_stat ds_us.stats
136
137#define ATH9K_TXDESC_CLRDMASK 0x0001
138#define ATH9K_TXDESC_NOACK 0x0002
139#define ATH9K_TXDESC_RTSENA 0x0004
140#define ATH9K_TXDESC_CTSENA 0x0008
141#define ATH9K_TXDESC_INTREQ 0x0010
142#define ATH9K_TXDESC_VEOL 0x0020
143#define ATH9K_TXDESC_EXT_ONLY 0x0040
144#define ATH9K_TXDESC_EXT_AND_CTL 0x0080
145#define ATH9K_TXDESC_VMF 0x0100
146#define ATH9K_TXDESC_FRAG_IS_ON 0x0200
147
148#define ATH9K_RXDESC_INTREQ 0x0020
149
150enum wireless_mode {
151 ATH9K_MODE_11A = 0,
152 ATH9K_MODE_11B = 2,
153 ATH9K_MODE_11G = 3,
154 ATH9K_MODE_11NA_HT20 = 6,
155 ATH9K_MODE_11NG_HT20 = 7,
156 ATH9K_MODE_11NA_HT40PLUS = 8,
157 ATH9K_MODE_11NA_HT40MINUS = 9,
158 ATH9K_MODE_11NG_HT40PLUS = 10,
159 ATH9K_MODE_11NG_HT40MINUS = 11,
160 ATH9K_MODE_MAX
161};
162
163enum ath9k_hw_caps {
164 ATH9K_HW_CAP_CHAN_SPREAD = BIT(0),
165 ATH9K_HW_CAP_MIC_AESCCM = BIT(1),
166 ATH9K_HW_CAP_MIC_CKIP = BIT(2),
167 ATH9K_HW_CAP_MIC_TKIP = BIT(3),
168 ATH9K_HW_CAP_CIPHER_AESCCM = BIT(4),
169 ATH9K_HW_CAP_CIPHER_CKIP = BIT(5),
170 ATH9K_HW_CAP_CIPHER_TKIP = BIT(6),
171 ATH9K_HW_CAP_VEOL = BIT(7),
172 ATH9K_HW_CAP_BSSIDMASK = BIT(8),
173 ATH9K_HW_CAP_MCAST_KEYSEARCH = BIT(9),
174 ATH9K_HW_CAP_CHAN_HALFRATE = BIT(10),
175 ATH9K_HW_CAP_CHAN_QUARTERRATE = BIT(11),
176 ATH9K_HW_CAP_HT = BIT(12),
177 ATH9K_HW_CAP_GTT = BIT(13),
178 ATH9K_HW_CAP_FASTCC = BIT(14),
179 ATH9K_HW_CAP_RFSILENT = BIT(15),
180 ATH9K_HW_CAP_WOW = BIT(16),
181 ATH9K_HW_CAP_CST = BIT(17),
182 ATH9K_HW_CAP_ENHANCEDPM = BIT(18),
183 ATH9K_HW_CAP_AUTOSLEEP = BIT(19),
184 ATH9K_HW_CAP_4KB_SPLITTRANS = BIT(20),
185 ATH9K_HW_CAP_WOW_MATCHPATTERN_EXACT = BIT(21),
186};
187
188enum ath9k_capability_type {
189 ATH9K_CAP_CIPHER = 0,
190 ATH9K_CAP_TKIP_MIC,
191 ATH9K_CAP_TKIP_SPLIT,
192 ATH9K_CAP_PHYCOUNTERS,
193 ATH9K_CAP_DIVERSITY,
194 ATH9K_CAP_TXPOW,
195 ATH9K_CAP_PHYDIAG,
196 ATH9K_CAP_MCAST_KEYSRCH,
197 ATH9K_CAP_TSF_ADJUST,
198 ATH9K_CAP_WME_TKIPMIC,
199 ATH9K_CAP_RFSILENT,
200 ATH9K_CAP_ANT_CFG_2GHZ,
201 ATH9K_CAP_ANT_CFG_5GHZ
202};
203
204struct ath9k_hw_capabilities {
205 u32 hw_caps; /* ATH9K_HW_CAP_* from ath9k_hw_caps */
206 DECLARE_BITMAP(wireless_modes, ATH9K_MODE_MAX); /* ATH9K_MODE_* */
207 u16 total_queues;
208 u16 keycache_size;
209 u16 low_5ghz_chan, high_5ghz_chan;
210 u16 low_2ghz_chan, high_2ghz_chan;
211 u16 num_mr_retries;
212 u16 rts_aggr_limit;
213 u8 tx_chainmask;
214 u8 rx_chainmask;
215 u16 tx_triglevel_max;
216 u16 reg_cap;
217 u8 num_gpio_pins;
218 u8 num_antcfg_2ghz;
219 u8 num_antcfg_5ghz;
220};
221
222struct ath9k_ops_config {
223 int dma_beacon_response_time;
224 int sw_beacon_response_time;
225 int additional_swba_backoff;
226 int ack_6mb;
227 int cwm_ignore_extcca;
228 u8 pcie_powersave_enable;
229 u8 pcie_l1skp_enable;
230 u8 pcie_clock_req;
231 u32 pcie_waen;
232 int pcie_power_reset;
233 u8 pcie_restore;
234 u8 analog_shiftreg;
235 u8 ht_enable;
236 u32 ofdm_trig_low;
237 u32 ofdm_trig_high;
238 u32 cck_trig_high;
239 u32 cck_trig_low;
240 u32 enable_ani;
241 u8 noise_immunity_level;
242 u32 ofdm_weaksignal_det;
243 u32 cck_weaksignal_thr;
244 u8 spur_immunity_level;
245 u8 firstep_level;
246 int8_t rssi_thr_high;
247 int8_t rssi_thr_low;
248 u16 diversity_control;
249 u16 antenna_switch_swap;
250 int serialize_regmode;
251 int intr_mitigation;
252#define SPUR_DISABLE 0
253#define SPUR_ENABLE_IOCTL 1
254#define SPUR_ENABLE_EEPROM 2
255#define AR_EEPROM_MODAL_SPURS 5
256#define AR_SPUR_5413_1 1640
257#define AR_SPUR_5413_2 1200
258#define AR_NO_SPUR 0x8000
259#define AR_BASE_FREQ_2GHZ 2300
260#define AR_BASE_FREQ_5GHZ 4900
261#define AR_SPUR_FEEQ_BOUND_HT40 19
262#define AR_SPUR_FEEQ_BOUND_HT20 10
263 int spurmode;
264 u16 spurchans[AR_EEPROM_MODAL_SPURS][2];
265};
266
267enum ath9k_tx_queue {
268 ATH9K_TX_QUEUE_INACTIVE = 0,
269 ATH9K_TX_QUEUE_DATA,
270 ATH9K_TX_QUEUE_BEACON,
271 ATH9K_TX_QUEUE_CAB,
272 ATH9K_TX_QUEUE_UAPSD,
273 ATH9K_TX_QUEUE_PSPOLL
274};
275
276#define ATH9K_NUM_TX_QUEUES 10
277
278enum ath9k_tx_queue_subtype {
279 ATH9K_WME_AC_BK = 0,
280 ATH9K_WME_AC_BE,
281 ATH9K_WME_AC_VI,
282 ATH9K_WME_AC_VO,
283 ATH9K_WME_UPSD
284};
285
286enum ath9k_tx_queue_flags {
287 TXQ_FLAG_TXOKINT_ENABLE = 0x0001,
288 TXQ_FLAG_TXERRINT_ENABLE = 0x0001,
289 TXQ_FLAG_TXDESCINT_ENABLE = 0x0002,
290 TXQ_FLAG_TXEOLINT_ENABLE = 0x0004,
291 TXQ_FLAG_TXURNINT_ENABLE = 0x0008,
292 TXQ_FLAG_BACKOFF_DISABLE = 0x0010,
293 TXQ_FLAG_COMPRESSION_ENABLE = 0x0020,
294 TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE = 0x0040,
295 TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE = 0x0080,
296};
297
298#define ATH9K_TXQ_USEDEFAULT ((u32) -1)
299
300#define ATH9K_DECOMP_MASK_SIZE 128
301#define ATH9K_READY_TIME_LO_BOUND 50
302#define ATH9K_READY_TIME_HI_BOUND 96
303
304enum ath9k_pkt_type {
305 ATH9K_PKT_TYPE_NORMAL = 0,
306 ATH9K_PKT_TYPE_ATIM,
307 ATH9K_PKT_TYPE_PSPOLL,
308 ATH9K_PKT_TYPE_BEACON,
309 ATH9K_PKT_TYPE_PROBE_RESP,
310 ATH9K_PKT_TYPE_CHIRP,
311 ATH9K_PKT_TYPE_GRP_POLL,
312};
313
314struct ath9k_tx_queue_info {
315 u32 tqi_ver;
316 enum ath9k_tx_queue tqi_type;
317 enum ath9k_tx_queue_subtype tqi_subtype;
318 enum ath9k_tx_queue_flags tqi_qflags;
319 u32 tqi_priority;
320 u32 tqi_aifs;
321 u32 tqi_cwmin;
322 u32 tqi_cwmax;
323 u16 tqi_shretry;
324 u16 tqi_lgretry;
325 u32 tqi_cbrPeriod;
326 u32 tqi_cbrOverflowLimit;
327 u32 tqi_burstTime;
328 u32 tqi_readyTime;
329 u32 tqi_physCompBuf;
330 u32 tqi_intFlags;
331};
332
333enum ath9k_rx_filter {
334 ATH9K_RX_FILTER_UCAST = 0x00000001,
335 ATH9K_RX_FILTER_MCAST = 0x00000002,
336 ATH9K_RX_FILTER_BCAST = 0x00000004,
337 ATH9K_RX_FILTER_CONTROL = 0x00000008,
338 ATH9K_RX_FILTER_BEACON = 0x00000010,
339 ATH9K_RX_FILTER_PROM = 0x00000020,
340 ATH9K_RX_FILTER_PROBEREQ = 0x00000080,
341 ATH9K_RX_FILTER_PSPOLL = 0x00004000,
342 ATH9K_RX_FILTER_PHYERR = 0x00000100,
343 ATH9K_RX_FILTER_PHYRADAR = 0x00002000,
344};
345
346enum ath9k_int {
347 ATH9K_INT_RX = 0x00000001,
348 ATH9K_INT_RXDESC = 0x00000002,
349 ATH9K_INT_RXNOFRM = 0x00000008,
350 ATH9K_INT_RXEOL = 0x00000010,
351 ATH9K_INT_RXORN = 0x00000020,
352 ATH9K_INT_TX = 0x00000040,
353 ATH9K_INT_TXDESC = 0x00000080,
354 ATH9K_INT_TIM_TIMER = 0x00000100,
355 ATH9K_INT_TXURN = 0x00000800,
356 ATH9K_INT_MIB = 0x00001000,
357 ATH9K_INT_RXPHY = 0x00004000,
358 ATH9K_INT_RXKCM = 0x00008000,
359 ATH9K_INT_SWBA = 0x00010000,
360 ATH9K_INT_BMISS = 0x00040000,
361 ATH9K_INT_BNR = 0x00100000,
362 ATH9K_INT_TIM = 0x00200000,
363 ATH9K_INT_DTIM = 0x00400000,
364 ATH9K_INT_DTIMSYNC = 0x00800000,
365 ATH9K_INT_GPIO = 0x01000000,
366 ATH9K_INT_CABEND = 0x02000000,
367 ATH9K_INT_CST = 0x10000000,
368 ATH9K_INT_GTT = 0x20000000,
369 ATH9K_INT_FATAL = 0x40000000,
370 ATH9K_INT_GLOBAL = 0x80000000,
371 ATH9K_INT_BMISC = ATH9K_INT_TIM |
372 ATH9K_INT_DTIM |
373 ATH9K_INT_DTIMSYNC |
374 ATH9K_INT_CABEND,
375 ATH9K_INT_COMMON = ATH9K_INT_RXNOFRM |
376 ATH9K_INT_RXDESC |
377 ATH9K_INT_RXEOL |
378 ATH9K_INT_RXORN |
379 ATH9K_INT_TXURN |
380 ATH9K_INT_TXDESC |
381 ATH9K_INT_MIB |
382 ATH9K_INT_RXPHY |
383 ATH9K_INT_RXKCM |
384 ATH9K_INT_SWBA |
385 ATH9K_INT_BMISS |
386 ATH9K_INT_GPIO,
387 ATH9K_INT_NOCARD = 0xffffffff
388};
389
390struct ath9k_rate_table {
391 int rateCount;
392 u8 rateCodeToIndex[256];
393 struct {
394 u8 valid;
395 u8 phy;
396 u32 rateKbps;
397 u8 rateCode;
398 u8 shortPreamble;
399 u8 dot11Rate;
400 u8 controlRate;
401 u16 lpAckDuration;
402 u16 spAckDuration;
403 } info[32];
404};
405
406#define ATH9K_RATESERIES_RTS_CTS 0x0001
407#define ATH9K_RATESERIES_2040 0x0002
408#define ATH9K_RATESERIES_HALFGI 0x0004
409
410struct ath9k_11n_rate_series {
411 u32 Tries;
412 u32 Rate;
413 u32 PktDuration;
414 u32 ChSel;
415 u32 RateFlags;
416};
417
418#define CHANNEL_CW_INT 0x00002
419#define CHANNEL_CCK 0x00020
420#define CHANNEL_OFDM 0x00040
421#define CHANNEL_2GHZ 0x00080
422#define CHANNEL_5GHZ 0x00100
423#define CHANNEL_PASSIVE 0x00200
424#define CHANNEL_DYN 0x00400
425#define CHANNEL_HALF 0x04000
426#define CHANNEL_QUARTER 0x08000
427#define CHANNEL_HT20 0x10000
428#define CHANNEL_HT40PLUS 0x20000
429#define CHANNEL_HT40MINUS 0x40000
430
431#define CHANNEL_INTERFERENCE 0x01
432#define CHANNEL_DFS 0x02
433#define CHANNEL_4MS_LIMIT 0x04
434#define CHANNEL_DFS_CLEAR 0x08
435#define CHANNEL_DISALLOW_ADHOC 0x10
436#define CHANNEL_PER_11D_ADHOC 0x20
437
438#define CHANNEL_A (CHANNEL_5GHZ|CHANNEL_OFDM)
439#define CHANNEL_B (CHANNEL_2GHZ|CHANNEL_CCK)
440#define CHANNEL_G (CHANNEL_2GHZ|CHANNEL_OFDM)
441#define CHANNEL_G_HT20 (CHANNEL_2GHZ|CHANNEL_HT20)
442#define CHANNEL_A_HT20 (CHANNEL_5GHZ|CHANNEL_HT20)
443#define CHANNEL_G_HT40PLUS (CHANNEL_2GHZ|CHANNEL_HT40PLUS)
444#define CHANNEL_G_HT40MINUS (CHANNEL_2GHZ|CHANNEL_HT40MINUS)
445#define CHANNEL_A_HT40PLUS (CHANNEL_5GHZ|CHANNEL_HT40PLUS)
446#define CHANNEL_A_HT40MINUS (CHANNEL_5GHZ|CHANNEL_HT40MINUS)
447#define CHANNEL_ALL \
448 (CHANNEL_OFDM| \
449 CHANNEL_CCK| \
450 CHANNEL_2GHZ | \
451 CHANNEL_5GHZ | \
452 CHANNEL_HT20 | \
453 CHANNEL_HT40PLUS | \
454 CHANNEL_HT40MINUS)
455
456struct ath9k_channel {
457 u16 channel;
458 u32 channelFlags;
459 u8 privFlags;
460 int8_t maxRegTxPower;
461 int8_t maxTxPower;
462 int8_t minTxPower;
463 u32 chanmode;
464 int32_t CalValid;
465 bool oneTimeCalsDone;
466 int8_t iCoff;
467 int8_t qCoff;
468 int16_t rawNoiseFloor;
469 int8_t antennaMax;
470 u32 regDmnFlags;
471 u32 conformanceTestLimit[3]; /* 0:11a, 1: 11b, 2:11g */
472#ifdef ATH_NF_PER_CHAN
473 struct ath9k_nfcal_hist nfCalHist[NUM_NF_READINGS];
474#endif
475};
476
477#define IS_CHAN_A(_c) ((((_c)->channelFlags & CHANNEL_A) == CHANNEL_A) || \
478 (((_c)->channelFlags & CHANNEL_A_HT20) == CHANNEL_A_HT20) || \
479 (((_c)->channelFlags & CHANNEL_A_HT40PLUS) == CHANNEL_A_HT40PLUS) || \
480 (((_c)->channelFlags & CHANNEL_A_HT40MINUS) == CHANNEL_A_HT40MINUS))
481#define IS_CHAN_B(_c) (((_c)->channelFlags & CHANNEL_B) == CHANNEL_B)
482#define IS_CHAN_G(_c) ((((_c)->channelFlags & (CHANNEL_G)) == CHANNEL_G) || \
483 (((_c)->channelFlags & CHANNEL_G_HT20) == CHANNEL_G_HT20) || \
484 (((_c)->channelFlags & CHANNEL_G_HT40PLUS) == CHANNEL_G_HT40PLUS) || \
485 (((_c)->channelFlags & CHANNEL_G_HT40MINUS) == CHANNEL_G_HT40MINUS))
486#define IS_CHAN_CCK(_c) (((_c)->channelFlags & CHANNEL_CCK) != 0)
487#define IS_CHAN_OFDM(_c) (((_c)->channelFlags & CHANNEL_OFDM) != 0)
488#define IS_CHAN_5GHZ(_c) (((_c)->channelFlags & CHANNEL_5GHZ) != 0)
489#define IS_CHAN_2GHZ(_c) (((_c)->channelFlags & CHANNEL_2GHZ) != 0)
490#define IS_CHAN_PASSIVE(_c) (((_c)->channelFlags & CHANNEL_PASSIVE) != 0)
491#define IS_CHAN_HALF_RATE(_c) (((_c)->channelFlags & CHANNEL_HALF) != 0)
492#define IS_CHAN_QUARTER_RATE(_c) (((_c)->channelFlags & CHANNEL_QUARTER) != 0)
493
494/* These macros check chanmode and not channelFlags */
495#define IS_CHAN_HT20(_c) (((_c)->chanmode == CHANNEL_A_HT20) || \
496 ((_c)->chanmode == CHANNEL_G_HT20))
497#define IS_CHAN_HT40(_c) (((_c)->chanmode == CHANNEL_A_HT40PLUS) || \
498 ((_c)->chanmode == CHANNEL_A_HT40MINUS) || \
499 ((_c)->chanmode == CHANNEL_G_HT40PLUS) || \
500 ((_c)->chanmode == CHANNEL_G_HT40MINUS))
501#define IS_CHAN_HT(_c) (IS_CHAN_HT20((_c)) || IS_CHAN_HT40((_c)))
502
503#define IS_CHAN_IN_PUBLIC_SAFETY_BAND(_c) ((_c) > 4940 && (_c) < 4990)
504#define IS_CHAN_A_5MHZ_SPACED(_c) \
505 ((((_c)->channelFlags & CHANNEL_5GHZ) != 0) && \
506 (((_c)->channel % 20) != 0) && \
507 (((_c)->channel % 10) != 0))
508
509struct ath9k_keyval {
510 u8 kv_type;
511 u8 kv_pad;
512 u16 kv_len;
513 u8 kv_val[16];
514 u8 kv_mic[8];
515 u8 kv_txmic[8];
516};
517
518enum ath9k_key_type {
519 ATH9K_KEY_TYPE_CLEAR,
520 ATH9K_KEY_TYPE_WEP,
521 ATH9K_KEY_TYPE_AES,
522 ATH9K_KEY_TYPE_TKIP,
523};
524
525enum ath9k_cipher {
526 ATH9K_CIPHER_WEP = 0,
527 ATH9K_CIPHER_AES_OCB = 1,
528 ATH9K_CIPHER_AES_CCM = 2,
529 ATH9K_CIPHER_CKIP = 3,
530 ATH9K_CIPHER_TKIP = 4,
531 ATH9K_CIPHER_CLR = 5,
532 ATH9K_CIPHER_MIC = 127
533};
534
535#define AR_EEPROM_EEPCAP_COMPRESS_DIS 0x0001
536#define AR_EEPROM_EEPCAP_AES_DIS 0x0002
537#define AR_EEPROM_EEPCAP_FASTFRAME_DIS 0x0004
538#define AR_EEPROM_EEPCAP_BURST_DIS 0x0008
539#define AR_EEPROM_EEPCAP_MAXQCU 0x01F0
540#define AR_EEPROM_EEPCAP_MAXQCU_S 4
541#define AR_EEPROM_EEPCAP_HEAVY_CLIP_EN 0x0200
542#define AR_EEPROM_EEPCAP_KC_ENTRIES 0xF000
543#define AR_EEPROM_EEPCAP_KC_ENTRIES_S 12
544
545#define AR_EEPROM_EEREGCAP_EN_FCC_MIDBAND 0x0040
546#define AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN 0x0080
547#define AR_EEPROM_EEREGCAP_EN_KK_U2 0x0100
548#define AR_EEPROM_EEREGCAP_EN_KK_MIDBAND 0x0200
549#define AR_EEPROM_EEREGCAP_EN_KK_U1_ODD 0x0400
550#define AR_EEPROM_EEREGCAP_EN_KK_NEW_11A 0x0800
551
552#define AR_EEPROM_EEREGCAP_EN_KK_U1_ODD_PRE4_0 0x4000
553#define AR_EEPROM_EEREGCAP_EN_KK_NEW_11A_PRE4_0 0x8000
554
555#define SD_NO_CTL 0xE0
556#define NO_CTL 0xff
557#define CTL_MODE_M 7
558#define CTL_11A 0
559#define CTL_11B 1
560#define CTL_11G 2
561#define CTL_2GHT20 5
562#define CTL_5GHT20 6
563#define CTL_2GHT40 7
564#define CTL_5GHT40 8
565
566#define AR_EEPROM_MAC(i) (0x1d+(i))
567#define EEP_SCALE 100
568#define EEP_DELTA 10
569
570#define AR_EEPROM_RFSILENT_GPIO_SEL 0x001c
571#define AR_EEPROM_RFSILENT_GPIO_SEL_S 2
572#define AR_EEPROM_RFSILENT_POLARITY 0x0002
573#define AR_EEPROM_RFSILENT_POLARITY_S 1
574
575#define CTRY_DEBUG 0x1ff
576#define CTRY_DEFAULT 0
577
578enum reg_ext_bitmap {
579 REG_EXT_JAPAN_MIDBAND = 1,
580 REG_EXT_FCC_DFS_HT40 = 2,
581 REG_EXT_JAPAN_NONDFS_HT40 = 3,
582 REG_EXT_JAPAN_DFS_HT40 = 4
583};
584
585struct ath9k_country_entry {
586 u16 countryCode;
587 u16 regDmnEnum;
588 u16 regDmn5G;
589 u16 regDmn2G;
590 u8 isMultidomain;
591 u8 iso[3];
592};
593
594#define REG_WRITE(_ah, _reg, _val) iowrite32(_val, _ah->ah_sh + _reg)
595#define REG_READ(_ah, _reg) ioread32(_ah->ah_sh + _reg)
596
597#define SM(_v, _f) (((_v) << _f##_S) & _f)
598#define MS(_v, _f) (((_v) & _f) >> _f##_S)
599#define REG_RMW(_a, _r, _set, _clr) \
600 REG_WRITE(_a, _r, (REG_READ(_a, _r) & ~(_clr)) | (_set))
601#define REG_RMW_FIELD(_a, _r, _f, _v) \
602 REG_WRITE(_a, _r, \
603 (REG_READ(_a, _r) & ~_f) | (((_v) << _f##_S) & _f))
604#define REG_SET_BIT(_a, _r, _f) \
605 REG_WRITE(_a, _r, REG_READ(_a, _r) | _f)
606#define REG_CLR_BIT(_a, _r, _f) \
607 REG_WRITE(_a, _r, REG_READ(_a, _r) & ~_f)
608
609#define ATH9K_COMP_BUF_MAX_SIZE 9216
610#define ATH9K_COMP_BUF_ALIGN_SIZE 512
611
612#define ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS 0x00000001
613
614#define INIT_AIFS 2
615#define INIT_CWMIN 15
616#define INIT_CWMIN_11B 31
617#define INIT_CWMAX 1023
618#define INIT_SH_RETRY 10
619#define INIT_LG_RETRY 10
620#define INIT_SSH_RETRY 32
621#define INIT_SLG_RETRY 32
622
623#define WLAN_CTRL_FRAME_SIZE (2+2+6+4)
624
625#define ATH_AMPDU_LIMIT_MAX (64 * 1024 - 1)
626#define ATH_AMPDU_LIMIT_DEFAULT ATH_AMPDU_LIMIT_MAX
627
628#define IEEE80211_WEP_IVLEN 3
629#define IEEE80211_WEP_KIDLEN 1
630#define IEEE80211_WEP_CRCLEN 4
631#define IEEE80211_MAX_MPDU_LEN (3840 + FCS_LEN + \
632 (IEEE80211_WEP_IVLEN + \
633 IEEE80211_WEP_KIDLEN + \
634 IEEE80211_WEP_CRCLEN))
635#define IEEE80211_MAX_LEN (2300 + FCS_LEN + \
636 (IEEE80211_WEP_IVLEN + \
637 IEEE80211_WEP_KIDLEN + \
638 IEEE80211_WEP_CRCLEN))
639
640#define MAX_REG_ADD_COUNT 129
641#define MAX_RATE_POWER 63
642
643enum ath9k_power_mode {
644 ATH9K_PM_AWAKE = 0,
645 ATH9K_PM_FULL_SLEEP,
646 ATH9K_PM_NETWORK_SLEEP,
647 ATH9K_PM_UNDEFINED
648};
649
650struct ath9k_mib_stats {
651 u32 ackrcv_bad;
652 u32 rts_bad;
653 u32 rts_good;
654 u32 fcs_bad;
655 u32 beacons;
656};
657
658enum ath9k_ant_setting {
659 ATH9K_ANT_VARIABLE = 0,
660 ATH9K_ANT_FIXED_A,
661 ATH9K_ANT_FIXED_B
662};
663
664enum ath9k_opmode {
665 ATH9K_M_STA = 1,
666 ATH9K_M_IBSS = 0,
667 ATH9K_M_HOSTAP = 6,
668 ATH9K_M_MONITOR = 8
669};
670
671#define ATH9K_SLOT_TIME_6 6
672#define ATH9K_SLOT_TIME_9 9
673#define ATH9K_SLOT_TIME_20 20
674
675enum ath9k_ht_macmode {
676 ATH9K_HT_MACMODE_20 = 0,
677 ATH9K_HT_MACMODE_2040 = 1,
678};
679
680enum ath9k_ht_extprotspacing {
681 ATH9K_HT_EXTPROTSPACING_20 = 0,
682 ATH9K_HT_EXTPROTSPACING_25 = 1,
683};
684
685struct ath9k_ht_cwm {
686 enum ath9k_ht_macmode ht_macmode;
687 enum ath9k_ht_extprotspacing ht_extprotspacing;
688};
689
690enum ath9k_ani_cmd {
691 ATH9K_ANI_PRESENT = 0x1,
692 ATH9K_ANI_NOISE_IMMUNITY_LEVEL = 0x2,
693 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION = 0x4,
694 ATH9K_ANI_CCK_WEAK_SIGNAL_THR = 0x8,
695 ATH9K_ANI_FIRSTEP_LEVEL = 0x10,
696 ATH9K_ANI_SPUR_IMMUNITY_LEVEL = 0x20,
697 ATH9K_ANI_MODE = 0x40,
698 ATH9K_ANI_PHYERR_RESET = 0x80,
699 ATH9K_ANI_ALL = 0xff
700};
701
702enum phytype {
703 PHY_DS,
704 PHY_FH,
705 PHY_OFDM,
706 PHY_HT,
707};
708#define PHY_CCK PHY_DS
709
710enum start_adhoc_option {
711 START_ADHOC_NO_11A,
712 START_ADHOC_PER_11D,
713 START_ADHOC_IN_11A,
714 START_ADHOC_IN_11B,
715};
716
717enum ath9k_tp_scale {
718 ATH9K_TP_SCALE_MAX = 0,
719 ATH9K_TP_SCALE_50,
720 ATH9K_TP_SCALE_25,
721 ATH9K_TP_SCALE_12,
722 ATH9K_TP_SCALE_MIN
723};
724
725enum ser_reg_mode {
726 SER_REG_MODE_OFF = 0,
727 SER_REG_MODE_ON = 1,
728 SER_REG_MODE_AUTO = 2,
729};
730
731#define AR_PHY_CCA_MAX_GOOD_VALUE -85
732#define AR_PHY_CCA_MAX_HIGH_VALUE -62
733#define AR_PHY_CCA_MIN_BAD_VALUE -121
734#define AR_PHY_CCA_FILTERWINDOW_LENGTH_INIT 3
735#define AR_PHY_CCA_FILTERWINDOW_LENGTH 5
736
737#define ATH9K_NF_CAL_HIST_MAX 5
738#define NUM_NF_READINGS 6
739
740struct ath9k_nfcal_hist {
741 int16_t nfCalBuffer[ATH9K_NF_CAL_HIST_MAX];
742 u8 currIndex;
743 int16_t privNF;
744 u8 invalidNFcount;
745};
746
747struct ath9k_beacon_state {
748 u32 bs_nexttbtt;
749 u32 bs_nextdtim;
750 u32 bs_intval;
751#define ATH9K_BEACON_PERIOD 0x0000ffff
752#define ATH9K_BEACON_ENA 0x00800000
753#define ATH9K_BEACON_RESET_TSF 0x01000000
754 u32 bs_dtimperiod;
755 u16 bs_cfpperiod;
756 u16 bs_cfpmaxduration;
757 u32 bs_cfpnext;
758 u16 bs_timoffset;
759 u16 bs_bmissthreshold;
760 u32 bs_sleepduration;
761};
762
763struct ath9k_node_stats {
764 u32 ns_avgbrssi;
765 u32 ns_avgrssi;
766 u32 ns_avgtxrssi;
767 u32 ns_avgtxrate;
768};
769
770#define ATH9K_RSSI_EP_MULTIPLIER (1<<7)
771
772enum ath9k_gpio_output_mux_type {
773 ATH9K_GPIO_OUTPUT_MUX_AS_OUTPUT,
774 ATH9K_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED,
775 ATH9K_GPIO_OUTPUT_MUX_AS_PCIE_POWER_LED,
776 ATH9K_GPIO_OUTPUT_MUX_AS_MAC_NETWORK_LED,
777 ATH9K_GPIO_OUTPUT_MUX_AS_MAC_POWER_LED,
778 ATH9K_GPIO_OUTPUT_MUX_NUM_ENTRIES
779};
780
781enum {
782 ATH9K_RESET_POWER_ON,
783 ATH9K_RESET_WARM,
784 ATH9K_RESET_COLD,
785};
786
787#define AH_USE_EEPROM 0x1
788
789struct ath_hal {
790 u32 ah_magic;
791 u16 ah_devid;
792 u16 ah_subvendorid;
793 struct ath_softc *ah_sc;
794 void __iomem *ah_sh;
795 u16 ah_countryCode;
796 u32 ah_macVersion;
797 u16 ah_macRev;
798 u16 ah_phyRev;
799 u16 ah_analog5GhzRev;
800 u16 ah_analog2GhzRev;
801 u8 ah_decompMask[ATH9K_DECOMP_MASK_SIZE];
802 u32 ah_flags;
803 enum ath9k_opmode ah_opmode;
804 struct ath9k_ops_config ah_config;
805 struct ath9k_hw_capabilities ah_caps;
806 int16_t ah_powerLimit;
807 u16 ah_maxPowerLevel;
808 u32 ah_tpScale;
809 u16 ah_currentRD;
810 u16 ah_currentRDExt;
811 u16 ah_currentRDInUse;
812 u16 ah_currentRD5G;
813 u16 ah_currentRD2G;
814 char ah_iso[4];
815 enum start_adhoc_option ah_adHocMode;
816 bool ah_commonMode;
817 struct ath9k_channel ah_channels[150];
818 u32 ah_nchan;
819 struct ath9k_channel *ah_curchan;
820 u16 ah_rfsilent;
821 bool ah_rfkillEnabled;
822 bool ah_isPciExpress;
823 u16 ah_txTrigLevel;
824#ifndef ATH_NF_PER_CHAN
825 struct ath9k_nfcal_hist nfCalHist[NUM_NF_READINGS];
826#endif
827};
828
829struct chan_centers {
830 u16 synth_center;
831 u16 ctl_center;
832 u16 ext_center;
833};
834
835int ath_hal_getcapability(struct ath_hal *ah,
836 enum ath9k_capability_type type,
837 u32 capability,
838 u32 *result);
839const struct ath9k_rate_table *ath9k_hw_getratetable(struct ath_hal *ah,
840 u32 mode);
841void ath9k_hw_detach(struct ath_hal *ah);
842struct ath_hal *ath9k_hw_attach(u16 devid,
843 struct ath_softc *sc,
844 void __iomem *mem,
845 int *error);
846bool ath9k_regd_init_channels(struct ath_hal *ah,
847 u32 maxchans, u32 *nchans,
848 u8 *regclassids,
849 u32 maxregids, u32 *nregids,
850 u16 cc,
851 bool enableOutdoor,
852 bool enableExtendedChannels);
853u32 ath9k_hw_mhz2ieee(struct ath_hal *ah, u32 freq, u32 flags);
854enum ath9k_int ath9k_hw_set_interrupts(struct ath_hal *ah,
855 enum ath9k_int ints);
856bool ath9k_hw_reset(struct ath_hal *ah, enum ath9k_opmode opmode,
857 struct ath9k_channel *chan,
858 enum ath9k_ht_macmode macmode,
859 u8 txchainmask, u8 rxchainmask,
860 enum ath9k_ht_extprotspacing extprotspacing,
861 bool bChannelChange,
862 int *status);
863bool ath9k_hw_phy_disable(struct ath_hal *ah);
864void ath9k_hw_reset_calvalid(struct ath_hal *ah, struct ath9k_channel *chan,
865 bool *isCalDone);
866void ath9k_hw_ani_monitor(struct ath_hal *ah,
867 const struct ath9k_node_stats *stats,
868 struct ath9k_channel *chan);
869bool ath9k_hw_calibrate(struct ath_hal *ah,
870 struct ath9k_channel *chan,
871 u8 rxchainmask,
872 bool longcal,
873 bool *isCalDone);
874int16_t ath9k_hw_getchan_noise(struct ath_hal *ah,
875 struct ath9k_channel *chan);
876void ath9k_hw_write_associd(struct ath_hal *ah, const u8 *bssid,
877 u16 assocId);
878void ath9k_hw_setrxfilter(struct ath_hal *ah, u32 bits);
879void ath9k_hw_write_associd(struct ath_hal *ah, const u8 *bssid,
880 u16 assocId);
881bool ath9k_hw_stoptxdma(struct ath_hal *ah, u32 q);
882void ath9k_hw_reset_tsf(struct ath_hal *ah);
883bool ath9k_hw_keyisvalid(struct ath_hal *ah, u16 entry);
884bool ath9k_hw_keysetmac(struct ath_hal *ah, u16 entry,
885 const u8 *mac);
886bool ath9k_hw_set_keycache_entry(struct ath_hal *ah,
887 u16 entry,
888 const struct ath9k_keyval *k,
889 const u8 *mac,
890 int xorKey);
891bool ath9k_hw_set_tsfadjust(struct ath_hal *ah,
892 u32 setting);
893void ath9k_hw_configpcipowersave(struct ath_hal *ah, int restore);
894bool ath9k_hw_intrpend(struct ath_hal *ah);
895bool ath9k_hw_getisr(struct ath_hal *ah, enum ath9k_int *masked);
896bool ath9k_hw_updatetxtriglevel(struct ath_hal *ah,
897 bool bIncTrigLevel);
898void ath9k_hw_procmibevent(struct ath_hal *ah,
899 const struct ath9k_node_stats *stats);
900bool ath9k_hw_setrxabort(struct ath_hal *ah, bool set);
901void ath9k_hw_set11nmac2040(struct ath_hal *ah, enum ath9k_ht_macmode mode);
902bool ath9k_hw_phycounters(struct ath_hal *ah);
903bool ath9k_hw_keyreset(struct ath_hal *ah, u16 entry);
904bool ath9k_hw_getcapability(struct ath_hal *ah,
905 enum ath9k_capability_type type,
906 u32 capability,
907 u32 *result);
908bool ath9k_hw_setcapability(struct ath_hal *ah,
909 enum ath9k_capability_type type,
910 u32 capability,
911 u32 setting,
912 int *status);
913u32 ath9k_hw_getdefantenna(struct ath_hal *ah);
914void ath9k_hw_getmac(struct ath_hal *ah, u8 *mac);
915void ath9k_hw_getbssidmask(struct ath_hal *ah, u8 *mask);
916bool ath9k_hw_setbssidmask(struct ath_hal *ah,
917 const u8 *mask);
918bool ath9k_hw_setpower(struct ath_hal *ah,
919 enum ath9k_power_mode mode);
920enum ath9k_int ath9k_hw_intrget(struct ath_hal *ah);
921u64 ath9k_hw_gettsf64(struct ath_hal *ah);
922u32 ath9k_hw_getdefantenna(struct ath_hal *ah);
923bool ath9k_hw_setslottime(struct ath_hal *ah, u32 us);
924bool ath9k_hw_setantennaswitch(struct ath_hal *ah,
925 enum ath9k_ant_setting settings,
926 struct ath9k_channel *chan,
927 u8 *tx_chainmask,
928 u8 *rx_chainmask,
929 u8 *antenna_cfgd);
930void ath9k_hw_setantenna(struct ath_hal *ah, u32 antenna);
931int ath9k_hw_select_antconfig(struct ath_hal *ah,
932 u32 cfg);
933bool ath9k_hw_puttxbuf(struct ath_hal *ah, u32 q,
934 u32 txdp);
935bool ath9k_hw_txstart(struct ath_hal *ah, u32 q);
936u16 ath9k_hw_computetxtime(struct ath_hal *ah,
937 const struct ath9k_rate_table *rates,
938 u32 frameLen, u16 rateix,
939 bool shortPreamble);
940void ath9k_hw_set11n_ratescenario(struct ath_hal *ah, struct ath_desc *ds,
941 struct ath_desc *lastds,
942 u32 durUpdateEn, u32 rtsctsRate,
943 u32 rtsctsDuration,
944 struct ath9k_11n_rate_series series[],
945 u32 nseries, u32 flags);
946void ath9k_hw_set11n_burstduration(struct ath_hal *ah,
947 struct ath_desc *ds,
948 u32 burstDuration);
949void ath9k_hw_cleartxdesc(struct ath_hal *ah, struct ath_desc *ds);
950u32 ath9k_hw_reverse_bits(u32 val, u32 n);
951bool ath9k_hw_resettxqueue(struct ath_hal *ah, u32 q);
952u32 ath9k_regd_get_ctl(struct ath_hal *ah, struct ath9k_channel *chan);
953u32 ath9k_regd_get_antenna_allowed(struct ath_hal *ah,
954 struct ath9k_channel *chan);
955u32 ath9k_hw_mhz2ieee(struct ath_hal *ah, u32 freq, u32 flags);
956bool ath9k_hw_get_txq_props(struct ath_hal *ah, int q,
957 struct ath9k_tx_queue_info *qinfo);
958bool ath9k_hw_set_txq_props(struct ath_hal *ah, int q,
959 const struct ath9k_tx_queue_info *qinfo);
960struct ath9k_channel *ath9k_regd_check_channel(struct ath_hal *ah,
961 const struct ath9k_channel *c);
962void ath9k_hw_set11n_txdesc(struct ath_hal *ah, struct ath_desc *ds,
963 u32 pktLen, enum ath9k_pkt_type type,
964 u32 txPower, u32 keyIx,
965 enum ath9k_key_type keyType, u32 flags);
966bool ath9k_hw_filltxdesc(struct ath_hal *ah, struct ath_desc *ds,
967 u32 segLen, bool firstSeg,
968 bool lastSeg,
969 const struct ath_desc *ds0);
970u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hal *ah,
971 u32 *rxc_pcnt,
972 u32 *rxf_pcnt,
973 u32 *txf_pcnt);
974void ath9k_hw_dmaRegDump(struct ath_hal *ah);
975void ath9k_hw_beaconinit(struct ath_hal *ah,
976 u32 next_beacon, u32 beacon_period);
977void ath9k_hw_set_sta_beacon_timers(struct ath_hal *ah,
978 const struct ath9k_beacon_state *bs);
979bool ath9k_hw_setuprxdesc(struct ath_hal *ah, struct ath_desc *ds,
980 u32 size, u32 flags);
981void ath9k_hw_putrxbuf(struct ath_hal *ah, u32 rxdp);
982void ath9k_hw_rxena(struct ath_hal *ah);
983void ath9k_hw_setopmode(struct ath_hal *ah);
984bool ath9k_hw_setmac(struct ath_hal *ah, const u8 *mac);
985void ath9k_hw_setmcastfilter(struct ath_hal *ah, u32 filter0,
986 u32 filter1);
987u32 ath9k_hw_getrxfilter(struct ath_hal *ah);
988void ath9k_hw_startpcureceive(struct ath_hal *ah);
989void ath9k_hw_stoppcurecv(struct ath_hal *ah);
990bool ath9k_hw_stopdmarecv(struct ath_hal *ah);
991int ath9k_hw_rxprocdesc(struct ath_hal *ah,
992 struct ath_desc *ds, u32 pa,
993 struct ath_desc *nds, u64 tsf);
994u32 ath9k_hw_gettxbuf(struct ath_hal *ah, u32 q);
995int ath9k_hw_txprocdesc(struct ath_hal *ah,
996 struct ath_desc *ds);
997void ath9k_hw_set11n_aggr_middle(struct ath_hal *ah, struct ath_desc *ds,
998 u32 numDelims);
999void ath9k_hw_set11n_aggr_first(struct ath_hal *ah, struct ath_desc *ds,
1000 u32 aggrLen);
1001void ath9k_hw_set11n_aggr_last(struct ath_hal *ah, struct ath_desc *ds);
1002bool ath9k_hw_releasetxqueue(struct ath_hal *ah, u32 q);
1003void ath9k_hw_gettxintrtxqs(struct ath_hal *ah, u32 *txqs);
1004void ath9k_hw_clr11n_aggr(struct ath_hal *ah, struct ath_desc *ds);
1005void ath9k_hw_set11n_virtualmorefrag(struct ath_hal *ah,
1006 struct ath_desc *ds, u32 vmf);
1007bool ath9k_hw_set_txpowerlimit(struct ath_hal *ah, u32 limit);
1008bool ath9k_regd_is_public_safety_sku(struct ath_hal *ah);
1009int ath9k_hw_setuptxqueue(struct ath_hal *ah, enum ath9k_tx_queue type,
1010 const struct ath9k_tx_queue_info *qinfo);
1011u32 ath9k_hw_numtxpending(struct ath_hal *ah, u32 q);
1012const char *ath9k_hw_probe(u16 vendorid, u16 devid);
1013bool ath9k_hw_disable(struct ath_hal *ah);
1014void ath9k_hw_rfdetach(struct ath_hal *ah);
1015void ath9k_hw_get_channel_centers(struct ath_hal *ah,
1016 struct ath9k_channel *chan,
1017 struct chan_centers *centers);
1018bool ath9k_get_channel_edges(struct ath_hal *ah,
1019 u16 flags, u16 *low,
1020 u16 *high);
1021#endif
diff --git a/drivers/net/wireless/ath9k/beacon.c b/drivers/net/wireless/ath9k/beacon.c
new file mode 100644
index 000000000000..caf569401a34
--- /dev/null
+++ b/drivers/net/wireless/ath9k/beacon.c
@@ -0,0 +1,979 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 /* Implementation of beacon processing. */
18
19#include <asm/unaligned.h>
20#include "core.h"
21
22/*
23 * Configure parameters for the beacon queue
24 *
25 * This function will modify certain transmit queue properties depending on
26 * the operating mode of the station (AP or AdHoc). Parameters are AIFS
27 * settings and channel width min/max
28*/
29
30static int ath_beaconq_config(struct ath_softc *sc)
31{
32 struct ath_hal *ah = sc->sc_ah;
33 struct ath9k_tx_queue_info qi;
34
35 ath9k_hw_get_txq_props(ah, sc->sc_bhalq, &qi);
36 if (sc->sc_opmode == ATH9K_M_HOSTAP) {
37 /* Always burst out beacon and CAB traffic. */
38 qi.tqi_aifs = 1;
39 qi.tqi_cwmin = 0;
40 qi.tqi_cwmax = 0;
41 } else {
42 /* Adhoc mode; important thing is to use 2x cwmin. */
43 qi.tqi_aifs = sc->sc_beacon_qi.tqi_aifs;
44 qi.tqi_cwmin = 2*sc->sc_beacon_qi.tqi_cwmin;
45 qi.tqi_cwmax = sc->sc_beacon_qi.tqi_cwmax;
46 }
47
48 if (!ath9k_hw_set_txq_props(ah, sc->sc_bhalq, &qi)) {
49 DPRINTF(sc, ATH_DBG_FATAL,
50 "%s: unable to update h/w beacon queue parameters\n",
51 __func__);
52 return 0;
53 } else {
54 ath9k_hw_resettxqueue(ah, sc->sc_bhalq); /* push to h/w */
55 return 1;
56 }
57}
58
59/*
60 * Setup the beacon frame for transmit.
61 *
62 * Associates the beacon frame buffer with a transmit descriptor. Will set
63 * up all required antenna switch parameters, rate codes, and channel flags.
64 * Beacons are always sent out at the lowest rate, and are not retried.
65*/
66
67static void ath_beacon_setup(struct ath_softc *sc,
68 struct ath_vap *avp, struct ath_buf *bf)
69{
70 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
71 struct ath_hal *ah = sc->sc_ah;
72 struct ath_desc *ds;
73 int flags, antenna;
74 const struct ath9k_rate_table *rt;
75 u8 rix, rate;
76 int ctsrate = 0;
77 int ctsduration = 0;
78 struct ath9k_11n_rate_series series[4];
79
80 DPRINTF(sc, ATH_DBG_BEACON, "%s: m %p len %u\n",
81 __func__, skb, skb->len);
82
83 /* setup descriptors */
84 ds = bf->bf_desc;
85
86 flags = ATH9K_TXDESC_NOACK;
87
88 if (sc->sc_opmode == ATH9K_M_IBSS &&
89 (ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL)) {
90 ds->ds_link = bf->bf_daddr; /* self-linked */
91 flags |= ATH9K_TXDESC_VEOL;
92 /* Let hardware handle antenna switching. */
93 antenna = 0;
94 } else {
95 ds->ds_link = 0;
96 /*
97 * Switch antenna every beacon.
98 * Should only switch every beacon period, not for every
99 * SWBA's
100 * XXX assumes two antenna
101 */
102 antenna = ((sc->ast_be_xmit / sc->sc_nbcnvaps) & 1 ? 2 : 1);
103 }
104
105 ds->ds_data = bf->bf_buf_addr;
106
107 /*
108 * Calculate rate code.
109 * XXX everything at min xmit rate
110 */
111 rix = 0;
112 rt = sc->sc_currates;
113 rate = rt->info[rix].rateCode;
114 if (sc->sc_flags & ATH_PREAMBLE_SHORT)
115 rate |= rt->info[rix].shortPreamble;
116
117 ath9k_hw_set11n_txdesc(ah, ds
118 , skb->len + FCS_LEN /* frame length */
119 , ATH9K_PKT_TYPE_BEACON /* Atheros packet type */
120 , avp->av_btxctl.txpower /* txpower XXX */
121 , ATH9K_TXKEYIX_INVALID /* no encryption */
122 , ATH9K_KEY_TYPE_CLEAR /* no encryption */
123 , flags /* no ack, veol for beacons */
124 );
125
126 /* NB: beacon's BufLen must be a multiple of 4 bytes */
127 ath9k_hw_filltxdesc(ah, ds
128 , roundup(skb->len, 4) /* buffer length */
129 , true /* first segment */
130 , true /* last segment */
131 , ds /* first descriptor */
132 );
133
134 memzero(series, sizeof(struct ath9k_11n_rate_series) * 4);
135 series[0].Tries = 1;
136 series[0].Rate = rate;
137 series[0].ChSel = sc->sc_tx_chainmask;
138 series[0].RateFlags = (ctsrate) ? ATH9K_RATESERIES_RTS_CTS : 0;
139 ath9k_hw_set11n_ratescenario(ah, ds, ds, 0,
140 ctsrate, ctsduration, series, 4, 0);
141}
142
143/* Move everything from the vap's mcast queue to the hardware cab queue.
144 * Caller must hold mcasq lock and cabq lock
145 * XXX MORE_DATA bit?
146 */
147static void empty_mcastq_into_cabq(struct ath_hal *ah,
148 struct ath_txq *mcastq, struct ath_txq *cabq)
149{
150 struct ath_buf *bfmcast;
151
152 BUG_ON(list_empty(&mcastq->axq_q));
153
154 bfmcast = list_first_entry(&mcastq->axq_q, struct ath_buf, list);
155
156 /* link the descriptors */
157 if (!cabq->axq_link)
158 ath9k_hw_puttxbuf(ah, cabq->axq_qnum, bfmcast->bf_daddr);
159 else
160 *cabq->axq_link = bfmcast->bf_daddr;
161
162 /* append the private vap mcast list to the cabq */
163
164 cabq->axq_depth += mcastq->axq_depth;
165 cabq->axq_totalqueued += mcastq->axq_totalqueued;
166 cabq->axq_linkbuf = mcastq->axq_linkbuf;
167 cabq->axq_link = mcastq->axq_link;
168 list_splice_tail_init(&mcastq->axq_q, &cabq->axq_q);
169 mcastq->axq_depth = 0;
170 mcastq->axq_totalqueued = 0;
171 mcastq->axq_linkbuf = NULL;
172 mcastq->axq_link = NULL;
173}
174
175/* This is only run at DTIM. We move everything from the vap's mcast queue
176 * to the hardware cab queue. Caller must hold the mcastq lock. */
177static void trigger_mcastq(struct ath_hal *ah,
178 struct ath_txq *mcastq, struct ath_txq *cabq)
179{
180 spin_lock_bh(&cabq->axq_lock);
181
182 if (!list_empty(&mcastq->axq_q))
183 empty_mcastq_into_cabq(ah, mcastq, cabq);
184
185 /* cabq is gated by beacon so it is safe to start here */
186 if (!list_empty(&cabq->axq_q))
187 ath9k_hw_txstart(ah, cabq->axq_qnum);
188
189 spin_unlock_bh(&cabq->axq_lock);
190}
191
192/*
193 * Generate beacon frame and queue cab data for a vap.
194 *
195 * Updates the contents of the beacon frame. It is assumed that the buffer for
196 * the beacon frame has been allocated in the ATH object, and simply needs to
197 * be filled for this cycle. Also, any CAB (crap after beacon?) traffic will
198 * be added to the beacon frame at this point.
199*/
200static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id)
201{
202 struct ath_hal *ah = sc->sc_ah;
203 struct ath_buf *bf;
204 struct ath_vap *avp;
205 struct sk_buff *skb;
206 int cabq_depth;
207 int mcastq_depth;
208 int is_beacon_dtim = 0;
209 unsigned int curlen;
210 struct ath_txq *cabq;
211 struct ath_txq *mcastq;
212 avp = sc->sc_vaps[if_id];
213
214 mcastq = &avp->av_mcastq;
215 cabq = sc->sc_cabq;
216
217 ASSERT(avp);
218
219 if (avp->av_bcbuf == NULL) {
220 DPRINTF(sc, ATH_DBG_BEACON, "%s: avp=%p av_bcbuf=%p\n",
221 __func__, avp, avp->av_bcbuf);
222 return NULL;
223 }
224 bf = avp->av_bcbuf;
225 skb = (struct sk_buff *) bf->bf_mpdu;
226
227 /*
228 * Update dynamic beacon contents. If this returns
229 * non-zero then we need to remap the memory because
230 * the beacon frame changed size (probably because
231 * of the TIM bitmap).
232 */
233 curlen = skb->len;
234
235 /* XXX: spin_lock_bh should not be used here, but sparse bitches
236 * otherwise. We should fix sparse :) */
237 spin_lock_bh(&mcastq->axq_lock);
238 mcastq_depth = avp->av_mcastq.axq_depth;
239
240 if (ath_update_beacon(sc, if_id, &avp->av_boff, skb, mcastq_depth) ==
241 1) {
242 ath_skb_unmap_single(sc, skb, PCI_DMA_TODEVICE,
243 get_dma_mem_context(bf, bf_dmacontext));
244 bf->bf_buf_addr = ath_skb_map_single(sc, skb, PCI_DMA_TODEVICE,
245 get_dma_mem_context(bf, bf_dmacontext));
246 } else {
247 pci_dma_sync_single_for_cpu(sc->pdev,
248 bf->bf_buf_addr,
249 skb_tailroom(skb),
250 PCI_DMA_TODEVICE);
251 }
252
253 /*
254 * if the CABQ traffic from previous DTIM is pending and the current
255 * beacon is also a DTIM.
256 * 1) if there is only one vap let the cab traffic continue.
257 * 2) if there are more than one vap and we are using staggered
258 * beacons, then drain the cabq by dropping all the frames in
259 * the cabq so that the current vaps cab traffic can be scheduled.
260 */
261 spin_lock_bh(&cabq->axq_lock);
262 cabq_depth = cabq->axq_depth;
263 spin_unlock_bh(&cabq->axq_lock);
264
265 is_beacon_dtim = avp->av_boff.bo_tim[4] & 1;
266
267 if (mcastq_depth && is_beacon_dtim && cabq_depth) {
268 /*
269 * Unlock the cabq lock as ath_tx_draintxq acquires
270 * the lock again which is a common function and that
271 * acquires txq lock inside.
272 */
273 if (sc->sc_nvaps > 1) {
274 ath_tx_draintxq(sc, cabq, false);
275 DPRINTF(sc, ATH_DBG_BEACON,
276 "%s: flush previous cabq traffic\n", __func__);
277 }
278 }
279
280 /* Construct tx descriptor. */
281 ath_beacon_setup(sc, avp, bf);
282
283 /*
284 * Enable the CAB queue before the beacon queue to
285 * insure cab frames are triggered by this beacon.
286 */
287 if (is_beacon_dtim)
288 trigger_mcastq(ah, mcastq, cabq);
289
290 spin_unlock_bh(&mcastq->axq_lock);
291 return bf;
292}
293
294/*
295 * Startup beacon transmission for adhoc mode when they are sent entirely
296 * by the hardware using the self-linked descriptor + veol trick.
297*/
298
299static void ath_beacon_start_adhoc(struct ath_softc *sc, int if_id)
300{
301 struct ath_hal *ah = sc->sc_ah;
302 struct ath_buf *bf;
303 struct ath_vap *avp;
304 struct sk_buff *skb;
305
306 avp = sc->sc_vaps[if_id];
307 ASSERT(avp);
308
309 if (avp->av_bcbuf == NULL) {
310 DPRINTF(sc, ATH_DBG_BEACON, "%s: avp=%p av_bcbuf=%p\n",
311 __func__, avp, avp != NULL ? avp->av_bcbuf : NULL);
312 return;
313 }
314 bf = avp->av_bcbuf;
315 skb = (struct sk_buff *) bf->bf_mpdu;
316
317 /* Construct tx descriptor. */
318 ath_beacon_setup(sc, avp, bf);
319
320 /* NB: caller is known to have already stopped tx dma */
321 ath9k_hw_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr);
322 ath9k_hw_txstart(ah, sc->sc_bhalq);
323 DPRINTF(sc, ATH_DBG_BEACON, "%s: TXDP%u = %llx (%p)\n", __func__,
324 sc->sc_bhalq, ito64(bf->bf_daddr), bf->bf_desc);
325}
326
327/*
328 * Setup a h/w transmit queue for beacons.
329 *
330 * This function allocates an information structure (struct ath9k_txq_info)
331 * on the stack, sets some specific parameters (zero out channel width
332 * min/max, and enable aifs). The info structure does not need to be
333 * persistant.
334*/
335
336int ath_beaconq_setup(struct ath_hal *ah)
337{
338 struct ath9k_tx_queue_info qi;
339
340 memzero(&qi, sizeof(qi));
341 qi.tqi_aifs = 1;
342 qi.tqi_cwmin = 0;
343 qi.tqi_cwmax = 0;
344 /* NB: don't enable any interrupts */
345 return ath9k_hw_setuptxqueue(ah, ATH9K_TX_QUEUE_BEACON, &qi);
346}
347
348
349/*
350 * Allocate and setup an initial beacon frame.
351 *
352 * Allocate a beacon state variable for a specific VAP instance created on
353 * the ATH interface. This routine also calculates the beacon "slot" for
354 * staggared beacons in the mBSSID case.
355*/
356
357int ath_beacon_alloc(struct ath_softc *sc, int if_id)
358{
359 struct ath_vap *avp;
360 struct ieee80211_hdr *wh;
361 struct ath_buf *bf;
362 struct sk_buff *skb;
363
364 avp = sc->sc_vaps[if_id];
365 ASSERT(avp);
366
367 /* Allocate a beacon descriptor if we haven't done so. */
368 if (!avp->av_bcbuf) {
369 /*
370 * Allocate beacon state for hostap/ibss. We know
371 * a buffer is available.
372 */
373
374 avp->av_bcbuf = list_first_entry(&sc->sc_bbuf,
375 struct ath_buf, list);
376 list_del(&avp->av_bcbuf->list);
377
378 if (sc->sc_opmode == ATH9K_M_HOSTAP ||
379 !(sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL)) {
380 int slot;
381 /*
382 * Assign the vap to a beacon xmit slot. As
383 * above, this cannot fail to find one.
384 */
385 avp->av_bslot = 0;
386 for (slot = 0; slot < ATH_BCBUF; slot++)
387 if (sc->sc_bslot[slot] == ATH_IF_ID_ANY) {
388 /*
389 * XXX hack, space out slots to better
390 * deal with misses
391 */
392 if (slot+1 < ATH_BCBUF &&
393 sc->sc_bslot[slot+1] ==
394 ATH_IF_ID_ANY) {
395 avp->av_bslot = slot+1;
396 break;
397 }
398 avp->av_bslot = slot;
399 /* NB: keep looking for a double slot */
400 }
401 BUG_ON(sc->sc_bslot[avp->av_bslot] != ATH_IF_ID_ANY);
402 sc->sc_bslot[avp->av_bslot] = if_id;
403 sc->sc_nbcnvaps++;
404 }
405 }
406
407 /* release the previous beacon frame , if it already exists. */
408 bf = avp->av_bcbuf;
409 if (bf->bf_mpdu != NULL) {
410 skb = (struct sk_buff *)bf->bf_mpdu;
411 ath_skb_unmap_single(sc, skb, PCI_DMA_TODEVICE,
412 get_dma_mem_context(bf, bf_dmacontext));
413 dev_kfree_skb_any(skb);
414 bf->bf_mpdu = NULL;
415 }
416
417 /*
418 * NB: the beacon data buffer must be 32-bit aligned;
419 * we assume the wbuf routines will return us something
420 * with this alignment (perhaps should assert).
421 * FIXME: Fill avp->av_boff.bo_tim,avp->av_btxctl.txpower and
422 * avp->av_btxctl.shortPreamble
423 */
424 skb = ieee80211_beacon_get(sc->hw, avp->av_if_data);
425 if (skb == NULL) {
426 DPRINTF(sc, ATH_DBG_BEACON, "%s: cannot get skb\n",
427 __func__);
428 return -ENOMEM;
429 }
430
431 /*
432 * Calculate a TSF adjustment factor required for
433 * staggered beacons. Note that we assume the format
434 * of the beacon frame leaves the tstamp field immediately
435 * following the header.
436 */
437 if (avp->av_bslot > 0) {
438 u64 tsfadjust;
439 __le64 val;
440 int intval;
441
442 /* FIXME: Use default value for now: Sujith */
443
444 intval = ATH_DEFAULT_BINTVAL;
445
446 /*
447 * The beacon interval is in TU's; the TSF in usecs.
448 * We figure out how many TU's to add to align the
449 * timestamp then convert to TSF units and handle
450 * byte swapping before writing it in the frame.
451 * The hardware will then add this each time a beacon
452 * frame is sent. Note that we align vap's 1..N
453 * and leave vap 0 untouched. This means vap 0
454 * has a timestamp in one beacon interval while the
455 * others get a timestamp aligned to the next interval.
456 */
457 tsfadjust = (intval * (ATH_BCBUF - avp->av_bslot)) / ATH_BCBUF;
458 val = cpu_to_le64(tsfadjust << 10); /* TU->TSF */
459
460 DPRINTF(sc, ATH_DBG_BEACON,
461 "%s: %s beacons, bslot %d intval %u tsfadjust %llu\n",
462 __func__, "stagger",
463 avp->av_bslot, intval, (unsigned long long)tsfadjust);
464
465 wh = (struct ieee80211_hdr *)skb->data;
466 memcpy(&wh[1], &val, sizeof(val));
467 }
468
469 bf->bf_buf_addr = ath_skb_map_single(sc, skb, PCI_DMA_TODEVICE,
470 get_dma_mem_context(bf, bf_dmacontext));
471 bf->bf_mpdu = skb;
472
473 return 0;
474}
475
476/*
477 * Reclaim beacon resources and return buffer to the pool.
478 *
479 * Checks the VAP to put the beacon frame buffer back to the ATH object
480 * queue, and de-allocates any wbuf frames that were sent as CAB traffic.
481*/
482
483void ath_beacon_return(struct ath_softc *sc, struct ath_vap *avp)
484{
485 if (avp->av_bcbuf != NULL) {
486 struct ath_buf *bf;
487
488 if (avp->av_bslot != -1) {
489 sc->sc_bslot[avp->av_bslot] = ATH_IF_ID_ANY;
490 sc->sc_nbcnvaps--;
491 }
492
493 bf = avp->av_bcbuf;
494 if (bf->bf_mpdu != NULL) {
495 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
496 ath_skb_unmap_single(sc, skb, PCI_DMA_TODEVICE,
497 get_dma_mem_context(bf, bf_dmacontext));
498 dev_kfree_skb_any(skb);
499 bf->bf_mpdu = NULL;
500 }
501 list_add_tail(&bf->list, &sc->sc_bbuf);
502
503 avp->av_bcbuf = NULL;
504 }
505}
506
507/*
508 * Reclaim beacon resources and return buffer to the pool.
509 *
510 * This function will free any wbuf frames that are still attached to the
511 * beacon buffers in the ATH object. Note that this does not de-allocate
512 * any wbuf objects that are in the transmit queue and have not yet returned
513 * to the ATH object.
514*/
515
516void ath_beacon_free(struct ath_softc *sc)
517{
518 struct ath_buf *bf;
519
520 list_for_each_entry(bf, &sc->sc_bbuf, list) {
521 if (bf->bf_mpdu != NULL) {
522 struct sk_buff *skb = (struct sk_buff *) bf->bf_mpdu;
523 ath_skb_unmap_single(sc, skb, PCI_DMA_TODEVICE,
524 get_dma_mem_context(bf, bf_dmacontext));
525 dev_kfree_skb_any(skb);
526 bf->bf_mpdu = NULL;
527 }
528 }
529}
530
531/*
532 * Tasklet for Sending Beacons
533 *
534 * Transmit one or more beacon frames at SWBA. Dynamic updates to the frame
535 * contents are done as needed and the slot time is also adjusted based on
536 * current state.
537 *
538 * This tasklet is not scheduled, it's called in ISR context.
539*/
540
541void ath9k_beacon_tasklet(unsigned long data)
542{
543#define TSF_TO_TU(_h,_l) \
544 ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
545
546 struct ath_softc *sc = (struct ath_softc *)data;
547 struct ath_hal *ah = sc->sc_ah;
548 struct ath_buf *bf = NULL;
549 int slot, if_id;
550 u32 bfaddr;
551 u32 rx_clear = 0, rx_frame = 0, tx_frame = 0;
552 u32 show_cycles = 0;
553 u32 bc = 0; /* beacon count */
554 u64 tsf;
555 u32 tsftu;
556 u16 intval;
557
558 if (sc->sc_noreset) {
559 show_cycles = ath9k_hw_GetMibCycleCountsPct(ah,
560 &rx_clear,
561 &rx_frame,
562 &tx_frame);
563 }
564
565 /*
566 * Check if the previous beacon has gone out. If
567 * not don't try to post another, skip this period
568 * and wait for the next. Missed beacons indicate
569 * a problem and should not occur. If we miss too
570 * many consecutive beacons reset the device.
571 */
572 if (ath9k_hw_numtxpending(ah, sc->sc_bhalq) != 0) {
573 sc->sc_bmisscount++;
574 /* XXX: doth needs the chanchange IE countdown decremented.
575 * We should consider adding a mac80211 call to indicate
576 * a beacon miss so appropriate action could be taken
577 * (in that layer).
578 */
579 if (sc->sc_bmisscount < BSTUCK_THRESH) {
580 if (sc->sc_noreset) {
581 DPRINTF(sc, ATH_DBG_BEACON,
582 "%s: missed %u consecutive beacons\n",
583 __func__, sc->sc_bmisscount);
584 if (show_cycles) {
585 /*
586 * Display cycle counter stats
587 * from HW to aide in debug of
588 * stickiness.
589 */
590 DPRINTF(sc,
591 ATH_DBG_BEACON,
592 "%s: busy times: rx_clear=%d, "
593 "rx_frame=%d, tx_frame=%d\n",
594 __func__, rx_clear, rx_frame,
595 tx_frame);
596 } else {
597 DPRINTF(sc,
598 ATH_DBG_BEACON,
599 "%s: unable to obtain "
600 "busy times\n", __func__);
601 }
602 } else {
603 DPRINTF(sc, ATH_DBG_BEACON,
604 "%s: missed %u consecutive beacons\n",
605 __func__, sc->sc_bmisscount);
606 }
607 } else if (sc->sc_bmisscount >= BSTUCK_THRESH) {
608 if (sc->sc_noreset) {
609 if (sc->sc_bmisscount == BSTUCK_THRESH) {
610 DPRINTF(sc,
611 ATH_DBG_BEACON,
612 "%s: beacon is officially "
613 "stuck\n", __func__);
614 ath9k_hw_dmaRegDump(ah);
615 }
616 } else {
617 DPRINTF(sc, ATH_DBG_BEACON,
618 "%s: beacon is officially stuck\n",
619 __func__);
620 ath_bstuck_process(sc);
621 }
622 }
623
624 return;
625 }
626 if (sc->sc_bmisscount != 0) {
627 if (sc->sc_noreset) {
628 DPRINTF(sc,
629 ATH_DBG_BEACON,
630 "%s: resume beacon xmit after %u misses\n",
631 __func__, sc->sc_bmisscount);
632 } else {
633 DPRINTF(sc, ATH_DBG_BEACON,
634 "%s: resume beacon xmit after %u misses\n",
635 __func__, sc->sc_bmisscount);
636 }
637 sc->sc_bmisscount = 0;
638 }
639
640 /*
641 * Generate beacon frames. we are sending frames
642 * staggered so calculate the slot for this frame based
643 * on the tsf to safeguard against missing an swba.
644 */
645
646 /* FIXME: Use default value for now - Sujith */
647 intval = ATH_DEFAULT_BINTVAL;
648
649 tsf = ath9k_hw_gettsf64(ah);
650 tsftu = TSF_TO_TU(tsf>>32, tsf);
651 slot = ((tsftu % intval) * ATH_BCBUF) / intval;
652 if_id = sc->sc_bslot[(slot + 1) % ATH_BCBUF];
653 DPRINTF(sc, ATH_DBG_BEACON,
654 "%s: slot %d [tsf %llu tsftu %u intval %u] if_id %d\n",
655 __func__, slot, (unsigned long long) tsf, tsftu,
656 intval, if_id);
657 bfaddr = 0;
658 if (if_id != ATH_IF_ID_ANY) {
659 bf = ath_beacon_generate(sc, if_id);
660 if (bf != NULL) {
661 bfaddr = bf->bf_daddr;
662 bc = 1;
663 }
664 }
665 /*
666 * Handle slot time change when a non-ERP station joins/leaves
667 * an 11g network. The 802.11 layer notifies us via callback,
668 * we mark updateslot, then wait one beacon before effecting
669 * the change. This gives associated stations at least one
670 * beacon interval to note the state change.
671 *
672 * NB: The slot time change state machine is clocked according
673 * to whether we are bursting or staggering beacons. We
674 * recognize the request to update and record the current
675 * slot then don't transition until that slot is reached
676 * again. If we miss a beacon for that slot then we'll be
677 * slow to transition but we'll be sure at least one beacon
678 * interval has passed. When bursting slot is always left
679 * set to ATH_BCBUF so this check is a noop.
680 */
681 /* XXX locking */
682 if (sc->sc_updateslot == UPDATE) {
683 sc->sc_updateslot = COMMIT; /* commit next beacon */
684 sc->sc_slotupdate = slot;
685 } else if (sc->sc_updateslot == COMMIT && sc->sc_slotupdate == slot)
686 ath_setslottime(sc); /* commit change to hardware */
687
688 if (bfaddr != 0) {
689 /*
690 * Stop any current dma and put the new frame(s) on the queue.
691 * This should never fail since we check above that no frames
692 * are still pending on the queue.
693 */
694 if (!ath9k_hw_stoptxdma(ah, sc->sc_bhalq)) {
695 DPRINTF(sc, ATH_DBG_FATAL,
696 "%s: beacon queue %u did not stop?\n",
697 __func__, sc->sc_bhalq);
698 /* NB: the HAL still stops DMA, so proceed */
699 }
700
701 /* NB: cabq traffic should already be queued and primed */
702 ath9k_hw_puttxbuf(ah, sc->sc_bhalq, bfaddr);
703 ath9k_hw_txstart(ah, sc->sc_bhalq);
704
705 sc->ast_be_xmit += bc; /* XXX per-vap? */
706 }
707#undef TSF_TO_TU
708}
709
710/*
711 * Tasklet for Beacon Stuck processing
712 *
713 * Processing for Beacon Stuck.
714 * Basically calls the ath_internal_reset function to reset the chip.
715*/
716
717void ath_bstuck_process(struct ath_softc *sc)
718{
719 DPRINTF(sc, ATH_DBG_BEACON,
720 "%s: stuck beacon; resetting (bmiss count %u)\n",
721 __func__, sc->sc_bmisscount);
722 ath_internal_reset(sc);
723}
724
725/*
726 * Configure the beacon and sleep timers.
727 *
728 * When operating as an AP this resets the TSF and sets
729 * up the hardware to notify us when we need to issue beacons.
730 *
731 * When operating in station mode this sets up the beacon
732 * timers according to the timestamp of the last received
733 * beacon and the current TSF, configures PCF and DTIM
734 * handling, programs the sleep registers so the hardware
735 * will wakeup in time to receive beacons, and configures
736 * the beacon miss handling so we'll receive a BMISS
737 * interrupt when we stop seeing beacons from the AP
738 * we've associated with.
739 */
740
741void ath_beacon_config(struct ath_softc *sc, int if_id)
742{
743#define TSF_TO_TU(_h,_l) \
744 ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
745 struct ath_hal *ah = sc->sc_ah;
746 u32 nexttbtt, intval;
747 struct ath_beacon_config conf;
748 enum ath9k_opmode av_opmode;
749
750 if (if_id != ATH_IF_ID_ANY)
751 av_opmode = sc->sc_vaps[if_id]->av_opmode;
752 else
753 av_opmode = sc->sc_opmode;
754
755 memzero(&conf, sizeof(struct ath_beacon_config));
756
757 /* FIXME: Use default values for now - Sujith */
758 /* Query beacon configuration first */
759 /*
760 * Protocol stack doesn't support dynamic beacon configuration,
761 * use default configurations.
762 */
763 conf.beacon_interval = ATH_DEFAULT_BINTVAL;
764 conf.listen_interval = 1;
765 conf.dtim_period = conf.beacon_interval;
766 conf.dtim_count = 1;
767 conf.bmiss_timeout = ATH_DEFAULT_BMISS_LIMIT * conf.beacon_interval;
768
769 /* extract tstamp from last beacon and convert to TU */
770 nexttbtt = TSF_TO_TU(get_unaligned_le32(conf.u.last_tstamp + 4),
771 get_unaligned_le32(conf.u.last_tstamp));
772 /* XXX conditionalize multi-bss support? */
773 if (sc->sc_opmode == ATH9K_M_HOSTAP) {
774 /*
775 * For multi-bss ap support beacons are either staggered
776 * evenly over N slots or burst together. For the former
777 * arrange for the SWBA to be delivered for each slot.
778 * Slots that are not occupied will generate nothing.
779 */
780 /* NB: the beacon interval is kept internally in TU's */
781 intval = conf.beacon_interval & ATH9K_BEACON_PERIOD;
782 intval /= ATH_BCBUF; /* for staggered beacons */
783 } else {
784 intval = conf.beacon_interval & ATH9K_BEACON_PERIOD;
785 }
786
787 if (nexttbtt == 0) /* e.g. for ap mode */
788 nexttbtt = intval;
789 else if (intval) /* NB: can be 0 for monitor mode */
790 nexttbtt = roundup(nexttbtt, intval);
791 DPRINTF(sc, ATH_DBG_BEACON, "%s: nexttbtt %u intval %u (%u)\n",
792 __func__, nexttbtt, intval, conf.beacon_interval);
793 /* Check for ATH9K_M_HOSTAP and sc_nostabeacons for WDS client */
794 if (sc->sc_opmode == ATH9K_M_STA) {
795 struct ath9k_beacon_state bs;
796 u64 tsf;
797 u32 tsftu;
798 int dtimperiod, dtimcount, sleepduration;
799 int cfpperiod, cfpcount;
800
801 /*
802 * Setup dtim and cfp parameters according to
803 * last beacon we received (which may be none).
804 */
805 dtimperiod = conf.dtim_period;
806 if (dtimperiod <= 0) /* NB: 0 if not known */
807 dtimperiod = 1;
808 dtimcount = conf.dtim_count;
809 if (dtimcount >= dtimperiod) /* NB: sanity check */
810 dtimcount = 0; /* XXX? */
811 cfpperiod = 1; /* NB: no PCF support yet */
812 cfpcount = 0;
813
814 sleepduration = conf.listen_interval * intval;
815 if (sleepduration <= 0)
816 sleepduration = intval;
817
818#define FUDGE 2
819 /*
820 * Pull nexttbtt forward to reflect the current
821 * TSF and calculate dtim+cfp state for the result.
822 */
823 tsf = ath9k_hw_gettsf64(ah);
824 tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
825 do {
826 nexttbtt += intval;
827 if (--dtimcount < 0) {
828 dtimcount = dtimperiod - 1;
829 if (--cfpcount < 0)
830 cfpcount = cfpperiod - 1;
831 }
832 } while (nexttbtt < tsftu);
833#undef FUDGE
834 memzero(&bs, sizeof(bs));
835 bs.bs_intval = intval;
836 bs.bs_nexttbtt = nexttbtt;
837 bs.bs_dtimperiod = dtimperiod*intval;
838 bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount*intval;
839 bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod;
840 bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod;
841 bs.bs_cfpmaxduration = 0;
842 /*
843 * Calculate the number of consecutive beacons to miss
844 * before taking a BMISS interrupt. The configuration
845 * is specified in TU so we only need calculate based
846 * on the beacon interval. Note that we clamp the
847 * result to at most 15 beacons.
848 */
849 if (sleepduration > intval) {
850 bs.bs_bmissthreshold =
851 conf.listen_interval *
852 ATH_DEFAULT_BMISS_LIMIT / 2;
853 } else {
854 bs.bs_bmissthreshold =
855 DIV_ROUND_UP(conf.bmiss_timeout, intval);
856 if (bs.bs_bmissthreshold > 15)
857 bs.bs_bmissthreshold = 15;
858 else if (bs.bs_bmissthreshold <= 0)
859 bs.bs_bmissthreshold = 1;
860 }
861
862 /*
863 * Calculate sleep duration. The configuration is
864 * given in ms. We insure a multiple of the beacon
865 * period is used. Also, if the sleep duration is
866 * greater than the DTIM period then it makes senses
867 * to make it a multiple of that.
868 *
869 * XXX fixed at 100ms
870 */
871
872 bs.bs_sleepduration =
873 roundup(IEEE80211_MS_TO_TU(100), sleepduration);
874 if (bs.bs_sleepduration > bs.bs_dtimperiod)
875 bs.bs_sleepduration = bs.bs_dtimperiod;
876
877 DPRINTF(sc, ATH_DBG_BEACON,
878 "%s: tsf %llu "
879 "tsf:tu %u "
880 "intval %u "
881 "nexttbtt %u "
882 "dtim %u "
883 "nextdtim %u "
884 "bmiss %u "
885 "sleep %u "
886 "cfp:period %u "
887 "maxdur %u "
888 "next %u "
889 "timoffset %u\n"
890 , __func__
891 , (unsigned long long)tsf, tsftu
892 , bs.bs_intval
893 , bs.bs_nexttbtt
894 , bs.bs_dtimperiod
895 , bs.bs_nextdtim
896 , bs.bs_bmissthreshold
897 , bs.bs_sleepduration
898 , bs.bs_cfpperiod
899 , bs.bs_cfpmaxduration
900 , bs.bs_cfpnext
901 , bs.bs_timoffset
902 );
903
904 ath9k_hw_set_interrupts(ah, 0);
905 ath9k_hw_set_sta_beacon_timers(ah, &bs);
906 sc->sc_imask |= ATH9K_INT_BMISS;
907 ath9k_hw_set_interrupts(ah, sc->sc_imask);
908 } else {
909 u64 tsf;
910 u32 tsftu;
911 ath9k_hw_set_interrupts(ah, 0);
912 if (nexttbtt == intval)
913 intval |= ATH9K_BEACON_RESET_TSF;
914 if (sc->sc_opmode == ATH9K_M_IBSS) {
915 /*
916 * Pull nexttbtt forward to reflect the current
917 * TSF .
918 */
919#define FUDGE 2
920 if (!(intval & ATH9K_BEACON_RESET_TSF)) {
921 tsf = ath9k_hw_gettsf64(ah);
922 tsftu = TSF_TO_TU((u32)(tsf>>32),
923 (u32)tsf) + FUDGE;
924 do {
925 nexttbtt += intval;
926 } while (nexttbtt < tsftu);
927 }
928#undef FUDGE
929 DPRINTF(sc, ATH_DBG_BEACON,
930 "%s: IBSS nexttbtt %u intval %u (%u)\n",
931 __func__, nexttbtt,
932 intval & ~ATH9K_BEACON_RESET_TSF,
933 conf.beacon_interval);
934
935 /*
936 * In IBSS mode enable the beacon timers but only
937 * enable SWBA interrupts if we need to manually
938 * prepare beacon frames. Otherwise we use a
939 * self-linked tx descriptor and let the hardware
940 * deal with things.
941 */
942 intval |= ATH9K_BEACON_ENA;
943 if (!(ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL))
944 sc->sc_imask |= ATH9K_INT_SWBA;
945 ath_beaconq_config(sc);
946 } else if (sc->sc_opmode == ATH9K_M_HOSTAP) {
947 /*
948 * In AP mode we enable the beacon timers and
949 * SWBA interrupts to prepare beacon frames.
950 */
951 intval |= ATH9K_BEACON_ENA;
952 sc->sc_imask |= ATH9K_INT_SWBA; /* beacon prepare */
953 ath_beaconq_config(sc);
954 }
955 ath9k_hw_beaconinit(ah, nexttbtt, intval);
956 sc->sc_bmisscount = 0;
957 ath9k_hw_set_interrupts(ah, sc->sc_imask);
958 /*
959 * When using a self-linked beacon descriptor in
960 * ibss mode load it once here.
961 */
962 if (sc->sc_opmode == ATH9K_M_IBSS &&
963 (ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL))
964 ath_beacon_start_adhoc(sc, 0);
965 }
966#undef TSF_TO_TU
967}
968
969/* Function to collect beacon rssi data and resync beacon if necessary */
970
971void ath_beacon_sync(struct ath_softc *sc, int if_id)
972{
973 /*
974 * Resync beacon timers using the tsf of the
975 * beacon frame we just received.
976 */
977 ath_beacon_config(sc, if_id);
978 sc->sc_beacons = 1;
979}
diff --git a/drivers/net/wireless/ath9k/core.c b/drivers/net/wireless/ath9k/core.c
new file mode 100644
index 000000000000..f6c45288d0e7
--- /dev/null
+++ b/drivers/net/wireless/ath9k/core.c
@@ -0,0 +1,1923 @@
1/*
2 * Copyright (c) 2008, Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 /* Implementation of the main "ATH" layer. */
18
19#include "core.h"
20#include "regd.h"
21
22static int ath_outdoor; /* enable outdoor use */
23
24static const u8 ath_bcast_mac[ETH_ALEN] =
25 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
26
27static u32 ath_chainmask_sel_up_rssi_thres =
28 ATH_CHAINMASK_SEL_UP_RSSI_THRES;
29static u32 ath_chainmask_sel_down_rssi_thres =
30 ATH_CHAINMASK_SEL_DOWN_RSSI_THRES;
31static u32 ath_chainmask_sel_period =
32 ATH_CHAINMASK_SEL_TIMEOUT;
33
34/* return bus cachesize in 4B word units */
35
36static void bus_read_cachesize(struct ath_softc *sc, int *csz)
37{
38 u8 u8tmp;
39
40 pci_read_config_byte(sc->pdev, PCI_CACHE_LINE_SIZE, (u8 *)&u8tmp);
41 *csz = (int)u8tmp;
42
43 /*
44 * This check was put in to avoid "unplesant" consequences if
45 * the bootrom has not fully initialized all PCI devices.
46 * Sometimes the cache line size register is not set
47 */
48
49 if (*csz == 0)
50 *csz = DEFAULT_CACHELINE >> 2; /* Use the default size */
51}
52
53/*
54 * Set current operating mode
55 *
56 * This function initializes and fills the rate table in the ATH object based
57 * on the operating mode. The blink rates are also set up here, although
58 * they have been superceeded by the ath_led module.
59*/
60
61static void ath_setcurmode(struct ath_softc *sc, enum wireless_mode mode)
62{
63 const struct ath9k_rate_table *rt;
64 int i;
65
66 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap));
67 rt = ath9k_hw_getratetable(sc->sc_ah, mode);
68 BUG_ON(!rt);
69
70 for (i = 0; i < rt->rateCount; i++)
71 sc->sc_rixmap[rt->info[i].rateCode] = (u8) i;
72
73 memzero(sc->sc_hwmap, sizeof(sc->sc_hwmap));
74 for (i = 0; i < 256; i++) {
75 u8 ix = rt->rateCodeToIndex[i];
76
77 if (ix == 0xff)
78 continue;
79
80 sc->sc_hwmap[i].ieeerate =
81 rt->info[ix].dot11Rate & IEEE80211_RATE_VAL;
82 sc->sc_hwmap[i].rateKbps = rt->info[ix].rateKbps;
83
84 if (rt->info[ix].shortPreamble ||
85 rt->info[ix].phy == PHY_OFDM) {
86 /* XXX: Handle this */
87 }
88
89 /* NB: this uses the last entry if the rate isn't found */
90 /* XXX beware of overlow */
91 }
92 sc->sc_currates = rt;
93 sc->sc_curmode = mode;
94 /*
95 * All protection frames are transmited at 2Mb/s for
96 * 11g, otherwise at 1Mb/s.
97 * XXX select protection rate index from rate table.
98 */
99 sc->sc_protrix = (mode == ATH9K_MODE_11G ? 1 : 0);
100}
101
102/*
103 * Set up rate table (legacy rates)
104 */
105static void ath_setup_rates(struct ath_softc *sc, enum ieee80211_band band)
106{
107 struct ath_hal *ah = sc->sc_ah;
108 const struct ath9k_rate_table *rt = NULL;
109 struct ieee80211_supported_band *sband;
110 struct ieee80211_rate *rate;
111 int i, maxrates;
112
113 switch (band) {
114 case IEEE80211_BAND_2GHZ:
115 rt = ath9k_hw_getratetable(ah, ATH9K_MODE_11G);
116 break;
117 case IEEE80211_BAND_5GHZ:
118 rt = ath9k_hw_getratetable(ah, ATH9K_MODE_11A);
119 break;
120 default:
121 break;
122 }
123
124 if (rt == NULL)
125 return;
126
127 sband = &sc->sbands[band];
128 rate = sc->rates[band];
129
130 if (rt->rateCount > ATH_RATE_MAX)
131 maxrates = ATH_RATE_MAX;
132 else
133 maxrates = rt->rateCount;
134
135 for (i = 0; i < maxrates; i++) {
136 rate[i].bitrate = rt->info[i].rateKbps / 100;
137 rate[i].hw_value = rt->info[i].rateCode;
138 sband->n_bitrates++;
139 DPRINTF(sc, ATH_DBG_CONFIG,
140 "%s: Rate: %2dMbps, ratecode: %2d\n",
141 __func__,
142 rate[i].bitrate / 10,
143 rate[i].hw_value);
144 }
145}
146
147/*
148 * Set up channel list
149 */
150static int ath_setup_channels(struct ath_softc *sc)
151{
152 struct ath_hal *ah = sc->sc_ah;
153 int nchan, i, a = 0, b = 0;
154 u8 regclassids[ATH_REGCLASSIDS_MAX];
155 u32 nregclass = 0;
156 struct ieee80211_supported_band *band_2ghz;
157 struct ieee80211_supported_band *band_5ghz;
158 struct ieee80211_channel *chan_2ghz;
159 struct ieee80211_channel *chan_5ghz;
160 struct ath9k_channel *c;
161
162 /* Fill in ah->ah_channels */
163 if (!ath9k_regd_init_channels(ah,
164 ATH_CHAN_MAX,
165 (u32 *)&nchan,
166 regclassids,
167 ATH_REGCLASSIDS_MAX,
168 &nregclass,
169 CTRY_DEFAULT,
170 false,
171 1)) {
172 u32 rd = ah->ah_currentRD;
173
174 DPRINTF(sc, ATH_DBG_FATAL,
175 "%s: unable to collect channel list; "
176 "regdomain likely %u country code %u\n",
177 __func__, rd, CTRY_DEFAULT);
178 return -EINVAL;
179 }
180
181 band_2ghz = &sc->sbands[IEEE80211_BAND_2GHZ];
182 band_5ghz = &sc->sbands[IEEE80211_BAND_5GHZ];
183 chan_2ghz = sc->channels[IEEE80211_BAND_2GHZ];
184 chan_5ghz = sc->channels[IEEE80211_BAND_5GHZ];
185
186 for (i = 0; i < nchan; i++) {
187 c = &ah->ah_channels[i];
188 if (IS_CHAN_2GHZ(c)) {
189 chan_2ghz[a].band = IEEE80211_BAND_2GHZ;
190 chan_2ghz[a].center_freq = c->channel;
191 chan_2ghz[a].max_power = c->maxTxPower;
192
193 if (c->privFlags & CHANNEL_DISALLOW_ADHOC)
194 chan_2ghz[a].flags |=
195 IEEE80211_CHAN_NO_IBSS;
196 if (c->channelFlags & CHANNEL_PASSIVE)
197 chan_2ghz[a].flags |=
198 IEEE80211_CHAN_PASSIVE_SCAN;
199
200 band_2ghz->n_channels = ++a;
201
202 DPRINTF(sc, ATH_DBG_CONFIG,
203 "%s: 2MHz channel: %d, "
204 "channelFlags: 0x%x\n",
205 __func__,
206 c->channel,
207 c->channelFlags);
208 } else if (IS_CHAN_5GHZ(c)) {
209 chan_5ghz[b].band = IEEE80211_BAND_5GHZ;
210 chan_5ghz[b].center_freq = c->channel;
211 chan_5ghz[b].max_power = c->maxTxPower;
212
213 if (c->privFlags & CHANNEL_DISALLOW_ADHOC)
214 chan_5ghz[b].flags |=
215 IEEE80211_CHAN_NO_IBSS;
216 if (c->channelFlags & CHANNEL_PASSIVE)
217 chan_5ghz[b].flags |=
218 IEEE80211_CHAN_PASSIVE_SCAN;
219
220 band_5ghz->n_channels = ++b;
221
222 DPRINTF(sc, ATH_DBG_CONFIG,
223 "%s: 5MHz channel: %d, "
224 "channelFlags: 0x%x\n",
225 __func__,
226 c->channel,
227 c->channelFlags);
228 }
229 }
230
231 return 0;
232}
233
234/*
235 * Determine mode from channel flags
236 *
237 * This routine will provide the enumerated WIRELESSS_MODE value based
238 * on the settings of the channel flags. If ho valid set of flags
239 * exist, the lowest mode (11b) is selected.
240*/
241
242static enum wireless_mode ath_chan2mode(struct ath9k_channel *chan)
243{
244 if (chan->chanmode == CHANNEL_A)
245 return ATH9K_MODE_11A;
246 else if (chan->chanmode == CHANNEL_G)
247 return ATH9K_MODE_11G;
248 else if (chan->chanmode == CHANNEL_B)
249 return ATH9K_MODE_11B;
250 else if (chan->chanmode == CHANNEL_A_HT20)
251 return ATH9K_MODE_11NA_HT20;
252 else if (chan->chanmode == CHANNEL_G_HT20)
253 return ATH9K_MODE_11NG_HT20;
254 else if (chan->chanmode == CHANNEL_A_HT40PLUS)
255 return ATH9K_MODE_11NA_HT40PLUS;
256 else if (chan->chanmode == CHANNEL_A_HT40MINUS)
257 return ATH9K_MODE_11NA_HT40MINUS;
258 else if (chan->chanmode == CHANNEL_G_HT40PLUS)
259 return ATH9K_MODE_11NG_HT40PLUS;
260 else if (chan->chanmode == CHANNEL_G_HT40MINUS)
261 return ATH9K_MODE_11NG_HT40MINUS;
262
263 /* NB: should not get here */
264 return ATH9K_MODE_11B;
265}
266
267/*
268 * Stop the device, grabbing the top-level lock to protect
269 * against concurrent entry through ath_init (which can happen
270 * if another thread does a system call and the thread doing the
271 * stop is preempted).
272 */
273
274static int ath_stop(struct ath_softc *sc)
275{
276 struct ath_hal *ah = sc->sc_ah;
277
278 DPRINTF(sc, ATH_DBG_CONFIG, "%s: invalid %u\n",
279 __func__, sc->sc_invalid);
280
281 /*
282 * Shutdown the hardware and driver:
283 * stop output from above
284 * reset 802.11 state machine
285 * (sends station deassoc/deauth frames)
286 * turn off timers
287 * disable interrupts
288 * clear transmit machinery
289 * clear receive machinery
290 * turn off the radio
291 * reclaim beacon resources
292 *
293 * Note that some of this work is not possible if the
294 * hardware is gone (invalid).
295 */
296
297 if (!sc->sc_invalid)
298 ath9k_hw_set_interrupts(ah, 0);
299 ath_draintxq(sc, false);
300 if (!sc->sc_invalid) {
301 ath_stoprecv(sc);
302 ath9k_hw_phy_disable(ah);
303 } else
304 sc->sc_rxlink = NULL;
305
306 return 0;
307}
308
309/*
310 * Start Scan
311 *
312 * This function is called when starting a channel scan. It will perform
313 * power save wakeup processing, set the filter for the scan, and get the
314 * chip ready to send broadcast packets out during the scan.
315*/
316
317void ath_scan_start(struct ath_softc *sc)
318{
319 struct ath_hal *ah = sc->sc_ah;
320 u32 rfilt;
321 u32 now = (u32) jiffies_to_msecs(get_timestamp());
322
323 sc->sc_scanning = 1;
324 rfilt = ath_calcrxfilter(sc);
325 ath9k_hw_setrxfilter(ah, rfilt);
326 ath9k_hw_write_associd(ah, ath_bcast_mac, 0);
327
328 /* Restore previous power management state. */
329
330 DPRINTF(sc, ATH_DBG_CONFIG, "%d.%03d | %s: RX filter 0x%x aid 0\n",
331 now / 1000, now % 1000, __func__, rfilt);
332}
333
334/*
335 * Scan End
336 *
337 * This routine is called by the upper layer when the scan is completed. This
338 * will set the filters back to normal operating mode, set the BSSID to the
339 * correct value, and restore the power save state.
340*/
341
342void ath_scan_end(struct ath_softc *sc)
343{
344 struct ath_hal *ah = sc->sc_ah;
345 u32 rfilt;
346 u32 now = (u32) jiffies_to_msecs(get_timestamp());
347
348 sc->sc_scanning = 0;
349 /* Request for a full reset due to rx packet filter changes */
350 sc->sc_full_reset = 1;
351 rfilt = ath_calcrxfilter(sc);
352 ath9k_hw_setrxfilter(ah, rfilt);
353 ath9k_hw_write_associd(ah, sc->sc_curbssid, sc->sc_curaid);
354
355 DPRINTF(sc, ATH_DBG_CONFIG, "%d.%03d | %s: RX filter 0x%x aid 0x%x\n",
356 now / 1000, now % 1000, __func__, rfilt, sc->sc_curaid);
357}
358
359/*
360 * Set the current channel
361 *
362 * Set/change channels. If the channel is really being changed, it's done
363 * by reseting the chip. To accomplish this we must first cleanup any pending
364 * DMA, then restart stuff after a la ath_init.
365*/
366int ath_set_channel(struct ath_softc *sc, struct ath9k_channel *hchan)
367{
368 struct ath_hal *ah = sc->sc_ah;
369 bool fastcc = true, stopped;
370 enum ath9k_ht_macmode ht_macmode;
371
372 if (sc->sc_invalid) /* if the device is invalid or removed */
373 return -EIO;
374
375 DPRINTF(sc, ATH_DBG_CONFIG,
376 "%s: %u (%u MHz) -> %u (%u MHz), cflags:%x\n",
377 __func__,
378 ath9k_hw_mhz2ieee(ah, sc->sc_curchan.channel,
379 sc->sc_curchan.channelFlags),
380 sc->sc_curchan.channel,
381 ath9k_hw_mhz2ieee(ah, hchan->channel, hchan->channelFlags),
382 hchan->channel, hchan->channelFlags);
383
384 ht_macmode = ath_cwm_macmode(sc);
385
386 if (hchan->channel != sc->sc_curchan.channel ||
387 hchan->channelFlags != sc->sc_curchan.channelFlags ||
388 sc->sc_update_chainmask || sc->sc_full_reset) {
389 int status;
390 /*
391 * This is only performed if the channel settings have
392 * actually changed.
393 *
394 * To switch channels clear any pending DMA operations;
395 * wait long enough for the RX fifo to drain, reset the
396 * hardware at the new frequency, and then re-enable
397 * the relevant bits of the h/w.
398 */
399 ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
400 ath_draintxq(sc, false); /* clear pending tx frames */
401 stopped = ath_stoprecv(sc); /* turn off frame recv */
402
403 /* XXX: do not flush receive queue here. We don't want
404 * to flush data frames already in queue because of
405 * changing channel. */
406
407 if (!stopped || sc->sc_full_reset)
408 fastcc = false;
409
410 spin_lock_bh(&sc->sc_resetlock);
411 if (!ath9k_hw_reset(ah, sc->sc_opmode, hchan,
412 ht_macmode, sc->sc_tx_chainmask,
413 sc->sc_rx_chainmask,
414 sc->sc_ht_extprotspacing,
415 fastcc, &status)) {
416 DPRINTF(sc, ATH_DBG_FATAL,
417 "%s: unable to reset channel %u (%uMhz) "
418 "flags 0x%x hal status %u\n", __func__,
419 ath9k_hw_mhz2ieee(ah, hchan->channel,
420 hchan->channelFlags),
421 hchan->channel, hchan->channelFlags, status);
422 spin_unlock_bh(&sc->sc_resetlock);
423 return -EIO;
424 }
425 spin_unlock_bh(&sc->sc_resetlock);
426
427 sc->sc_curchan = *hchan;
428 sc->sc_update_chainmask = 0;
429 sc->sc_full_reset = 0;
430
431 /* Re-enable rx framework */
432 if (ath_startrecv(sc) != 0) {
433 DPRINTF(sc, ATH_DBG_FATAL,
434 "%s: unable to restart recv logic\n", __func__);
435 return -EIO;
436 }
437 /*
438 * Change channels and update the h/w rate map
439 * if we're switching; e.g. 11a to 11b/g.
440 */
441 ath_setcurmode(sc, ath_chan2mode(hchan));
442
443 ath_update_txpow(sc); /* update tx power state */
444 /*
445 * Re-enable interrupts.
446 */
447 ath9k_hw_set_interrupts(ah, sc->sc_imask);
448 }
449 return 0;
450}
451
452/**********************/
453/* Chainmask Handling */
454/**********************/
455
456static void ath_chainmask_sel_timertimeout(unsigned long data)
457{
458 struct ath_chainmask_sel *cm = (struct ath_chainmask_sel *)data;
459 cm->switch_allowed = 1;
460}
461
462/* Start chainmask select timer */
463static void ath_chainmask_sel_timerstart(struct ath_chainmask_sel *cm)
464{
465 cm->switch_allowed = 0;
466 mod_timer(&cm->timer, ath_chainmask_sel_period);
467}
468
469/* Stop chainmask select timer */
470static void ath_chainmask_sel_timerstop(struct ath_chainmask_sel *cm)
471{
472 cm->switch_allowed = 0;
473 del_timer_sync(&cm->timer);
474}
475
476static void ath_chainmask_sel_init(struct ath_softc *sc, struct ath_node *an)
477{
478 struct ath_chainmask_sel *cm = &an->an_chainmask_sel;
479
480 memzero(cm, sizeof(struct ath_chainmask_sel));
481
482 cm->cur_tx_mask = sc->sc_tx_chainmask;
483 cm->cur_rx_mask = sc->sc_rx_chainmask;
484 cm->tx_avgrssi = ATH_RSSI_DUMMY_MARKER;
485 setup_timer(&cm->timer,
486 ath_chainmask_sel_timertimeout, (unsigned long) cm);
487}
488
489int ath_chainmask_sel_logic(struct ath_softc *sc, struct ath_node *an)
490{
491 struct ath_chainmask_sel *cm = &an->an_chainmask_sel;
492
493 /*
494 * Disable auto-swtiching in one of the following if conditions.
495 * sc_chainmask_auto_sel is used for internal global auto-switching
496 * enabled/disabled setting
497 */
498 if (sc->sc_ah->ah_caps.tx_chainmask != ATH_CHAINMASK_SEL_3X3) {
499 cm->cur_tx_mask = sc->sc_tx_chainmask;
500 return cm->cur_tx_mask;
501 }
502
503 if (cm->tx_avgrssi == ATH_RSSI_DUMMY_MARKER)
504 return cm->cur_tx_mask;
505
506 if (cm->switch_allowed) {
507 /* Switch down from tx 3 to tx 2. */
508 if (cm->cur_tx_mask == ATH_CHAINMASK_SEL_3X3 &&
509 ATH_RSSI_OUT(cm->tx_avgrssi) >=
510 ath_chainmask_sel_down_rssi_thres) {
511 cm->cur_tx_mask = sc->sc_tx_chainmask;
512
513 /* Don't let another switch happen until
514 * this timer expires */
515 ath_chainmask_sel_timerstart(cm);
516 }
517 /* Switch up from tx 2 to 3. */
518 else if (cm->cur_tx_mask == sc->sc_tx_chainmask &&
519 ATH_RSSI_OUT(cm->tx_avgrssi) <=
520 ath_chainmask_sel_up_rssi_thres) {
521 cm->cur_tx_mask = ATH_CHAINMASK_SEL_3X3;
522
523 /* Don't let another switch happen
524 * until this timer expires */
525 ath_chainmask_sel_timerstart(cm);
526 }
527 }
528
529 return cm->cur_tx_mask;
530}
531
532/*
533 * Update tx/rx chainmask. For legacy association,
534 * hard code chainmask to 1x1, for 11n association, use
535 * the chainmask configuration.
536 */
537
538void ath_update_chainmask(struct ath_softc *sc, int is_ht)
539{
540 sc->sc_update_chainmask = 1;
541 if (is_ht) {
542 sc->sc_tx_chainmask = sc->sc_ah->ah_caps.tx_chainmask;
543 sc->sc_rx_chainmask = sc->sc_ah->ah_caps.rx_chainmask;
544 } else {
545 sc->sc_tx_chainmask = 1;
546 sc->sc_rx_chainmask = 1;
547 }
548
549 DPRINTF(sc, ATH_DBG_CONFIG, "%s: tx chmask: %d, rx chmask: %d\n",
550 __func__, sc->sc_tx_chainmask, sc->sc_rx_chainmask);
551}
552
553/******************/
554/* VAP management */
555/******************/
556
557/*
558 * VAP in Listen mode
559 *
560 * This routine brings the VAP out of the down state into a "listen" state
561 * where it waits for association requests. This is used in AP and AdHoc
562 * modes.
563*/
564
565int ath_vap_listen(struct ath_softc *sc, int if_id)
566{
567 struct ath_hal *ah = sc->sc_ah;
568 struct ath_vap *avp;
569 u32 rfilt = 0;
570 DECLARE_MAC_BUF(mac);
571
572 avp = sc->sc_vaps[if_id];
573 if (avp == NULL) {
574 DPRINTF(sc, ATH_DBG_FATAL, "%s: invalid interface id %u\n",
575 __func__, if_id);
576 return -EINVAL;
577 }
578
579#ifdef CONFIG_SLOW_ANT_DIV
580 ath_slow_ant_div_stop(&sc->sc_antdiv);
581#endif
582
583 /* update ratectrl about the new state */
584 ath_rate_newstate(sc, avp);
585
586 rfilt = ath_calcrxfilter(sc);
587 ath9k_hw_setrxfilter(ah, rfilt);
588
589 if (sc->sc_opmode == ATH9K_M_STA || sc->sc_opmode == ATH9K_M_IBSS) {
590 memcpy(sc->sc_curbssid, ath_bcast_mac, ETH_ALEN);
591 ath9k_hw_write_associd(ah, sc->sc_curbssid, sc->sc_curaid);
592 } else
593 sc->sc_curaid = 0;
594
595 DPRINTF(sc, ATH_DBG_CONFIG,
596 "%s: RX filter 0x%x bssid %s aid 0x%x\n",
597 __func__, rfilt, print_mac(mac,
598 sc->sc_curbssid), sc->sc_curaid);
599
600 /*
601 * XXXX
602 * Disable BMISS interrupt when we're not associated
603 */
604 ath9k_hw_set_interrupts(ah,
605 sc->sc_imask & ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS));
606 sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
607 /* need to reconfigure the beacons when it moves to RUN */
608 sc->sc_beacons = 0;
609
610 return 0;
611}
612
613int ath_vap_attach(struct ath_softc *sc,
614 int if_id,
615 struct ieee80211_vif *if_data,
616 enum ath9k_opmode opmode)
617{
618 struct ath_vap *avp;
619
620 if (if_id >= ATH_BCBUF || sc->sc_vaps[if_id] != NULL) {
621 DPRINTF(sc, ATH_DBG_FATAL,
622 "%s: Invalid interface id = %u\n", __func__, if_id);
623 return -EINVAL;
624 }
625
626 switch (opmode) {
627 case ATH9K_M_STA:
628 case ATH9K_M_IBSS:
629 case ATH9K_M_MONITOR:
630 break;
631 case ATH9K_M_HOSTAP:
632 /* XXX not right, beacon buffer is allocated on RUN trans */
633 if (list_empty(&sc->sc_bbuf))
634 return -ENOMEM;
635 break;
636 default:
637 return -EINVAL;
638 }
639
640 /* create ath_vap */
641 avp = kmalloc(sizeof(struct ath_vap), GFP_KERNEL);
642 if (avp == NULL)
643 return -ENOMEM;
644
645 memzero(avp, sizeof(struct ath_vap));
646 avp->av_if_data = if_data;
647 /* Set the VAP opmode */
648 avp->av_opmode = opmode;
649 avp->av_bslot = -1;
650 INIT_LIST_HEAD(&avp->av_mcastq.axq_q);
651 INIT_LIST_HEAD(&avp->av_mcastq.axq_acq);
652 spin_lock_init(&avp->av_mcastq.axq_lock);
653
654 ath9k_hw_set_tsfadjust(sc->sc_ah, 1);
655
656 sc->sc_vaps[if_id] = avp;
657 sc->sc_nvaps++;
658 /* Set the device opmode */
659 sc->sc_opmode = opmode;
660
661 /* default VAP configuration */
662 avp->av_config.av_fixed_rateset = IEEE80211_FIXED_RATE_NONE;
663 avp->av_config.av_fixed_retryset = 0x03030303;
664
665 return 0;
666}
667
668int ath_vap_detach(struct ath_softc *sc, int if_id)
669{
670 struct ath_hal *ah = sc->sc_ah;
671 struct ath_vap *avp;
672
673 avp = sc->sc_vaps[if_id];
674 if (avp == NULL) {
675 DPRINTF(sc, ATH_DBG_FATAL, "%s: invalid interface id %u\n",
676 __func__, if_id);
677 return -EINVAL;
678 }
679
680 /*
681 * Quiesce the hardware while we remove the vap. In
682 * particular we need to reclaim all references to the
683 * vap state by any frames pending on the tx queues.
684 *
685 * XXX can we do this w/o affecting other vap's?
686 */
687 ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
688 ath_draintxq(sc, false); /* stop xmit side */
689 ath_stoprecv(sc); /* stop recv side */
690 ath_flushrecv(sc); /* flush recv queue */
691
692 /* Reclaim any pending mcast bufs on the vap. */
693 ath_tx_draintxq(sc, &avp->av_mcastq, false);
694
695 kfree(avp);
696 sc->sc_vaps[if_id] = NULL;
697 sc->sc_nvaps--;
698
699 return 0;
700}
701
702int ath_vap_config(struct ath_softc *sc,
703 int if_id, struct ath_vap_config *if_config)
704{
705 struct ath_vap *avp;
706
707 if (if_id >= ATH_BCBUF) {
708 DPRINTF(sc, ATH_DBG_FATAL,
709 "%s: Invalid interface id = %u\n", __func__, if_id);
710 return -EINVAL;
711 }
712
713 avp = sc->sc_vaps[if_id];
714 ASSERT(avp != NULL);
715
716 if (avp)
717 memcpy(&avp->av_config, if_config, sizeof(avp->av_config));
718
719 return 0;
720}
721
722/********/
723/* Core */
724/********/
725
726int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan)
727{
728 struct ath_hal *ah = sc->sc_ah;
729 int status;
730 int error = 0;
731 enum ath9k_ht_macmode ht_macmode = ath_cwm_macmode(sc);
732
733 DPRINTF(sc, ATH_DBG_CONFIG, "%s: mode %d\n", __func__, sc->sc_opmode);
734
735 /*
736 * Stop anything previously setup. This is safe
737 * whether this is the first time through or not.
738 */
739 ath_stop(sc);
740
741 /* Initialize chanmask selection */
742 sc->sc_tx_chainmask = ah->ah_caps.tx_chainmask;
743 sc->sc_rx_chainmask = ah->ah_caps.rx_chainmask;
744
745 /* Reset SERDES registers */
746 ath9k_hw_configpcipowersave(ah, 0);
747
748 /*
749 * The basic interface to setting the hardware in a good
750 * state is ``reset''. On return the hardware is known to
751 * be powered up and with interrupts disabled. This must
752 * be followed by initialization of the appropriate bits
753 * and then setup of the interrupt mask.
754 */
755 sc->sc_curchan = *initial_chan;
756
757 spin_lock_bh(&sc->sc_resetlock);
758 if (!ath9k_hw_reset(ah, sc->sc_opmode, &sc->sc_curchan, ht_macmode,
759 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
760 sc->sc_ht_extprotspacing, false, &status)) {
761 DPRINTF(sc, ATH_DBG_FATAL,
762 "%s: unable to reset hardware; hal status %u "
763 "(freq %u flags 0x%x)\n", __func__, status,
764 sc->sc_curchan.channel, sc->sc_curchan.channelFlags);
765 error = -EIO;
766 spin_unlock_bh(&sc->sc_resetlock);
767 goto done;
768 }
769 spin_unlock_bh(&sc->sc_resetlock);
770 /*
771 * This is needed only to setup initial state
772 * but it's best done after a reset.
773 */
774 ath_update_txpow(sc);
775
776 /*
777 * Setup the hardware after reset:
778 * The receive engine is set going.
779 * Frame transmit is handled entirely
780 * in the frame output path; there's nothing to do
781 * here except setup the interrupt mask.
782 */
783 if (ath_startrecv(sc) != 0) {
784 DPRINTF(sc, ATH_DBG_FATAL,
785 "%s: unable to start recv logic\n", __func__);
786 error = -EIO;
787 goto done;
788 }
789 /* Setup our intr mask. */
790 sc->sc_imask = ATH9K_INT_RX | ATH9K_INT_TX
791 | ATH9K_INT_RXEOL | ATH9K_INT_RXORN
792 | ATH9K_INT_FATAL | ATH9K_INT_GLOBAL;
793
794 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_GTT)
795 sc->sc_imask |= ATH9K_INT_GTT;
796
797 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)
798 sc->sc_imask |= ATH9K_INT_CST;
799
800 /*
801 * Enable MIB interrupts when there are hardware phy counters.
802 * Note we only do this (at the moment) for station mode.
803 */
804 if (ath9k_hw_phycounters(ah) &&
805 ((sc->sc_opmode == ATH9K_M_STA) || (sc->sc_opmode == ATH9K_M_IBSS)))
806 sc->sc_imask |= ATH9K_INT_MIB;
807 /*
808 * Some hardware processes the TIM IE and fires an
809 * interrupt when the TIM bit is set. For hardware
810 * that does, if not overridden by configuration,
811 * enable the TIM interrupt when operating as station.
812 */
813 if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_ENHANCEDPM) &&
814 (sc->sc_opmode == ATH9K_M_STA) &&
815 !sc->sc_config.swBeaconProcess)
816 sc->sc_imask |= ATH9K_INT_TIM;
817 /*
818 * Don't enable interrupts here as we've not yet built our
819 * vap and node data structures, which will be needed as soon
820 * as we start receiving.
821 */
822 ath_setcurmode(sc, ath_chan2mode(initial_chan));
823
824 /* XXX: we must make sure h/w is ready and clear invalid flag
825 * before turning on interrupt. */
826 sc->sc_invalid = 0;
827done:
828 return error;
829}
830
831/*
832 * Reset the hardware w/o losing operational state. This is
833 * basically a more efficient way of doing ath_stop, ath_init,
834 * followed by state transitions to the current 802.11
835 * operational state. Used to recover from errors rx overrun
836 * and to reset the hardware when rf gain settings must be reset.
837 */
838
839static int ath_reset_start(struct ath_softc *sc, u32 flag)
840{
841 struct ath_hal *ah = sc->sc_ah;
842
843 ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
844 ath_draintxq(sc, flag & RESET_RETRY_TXQ); /* stop xmit side */
845 ath_stoprecv(sc); /* stop recv side */
846 ath_flushrecv(sc); /* flush recv queue */
847
848 return 0;
849}
850
851static int ath_reset_end(struct ath_softc *sc, u32 flag)
852{
853 struct ath_hal *ah = sc->sc_ah;
854
855 if (ath_startrecv(sc) != 0) /* restart recv */
856 DPRINTF(sc, ATH_DBG_FATAL,
857 "%s: unable to start recv logic\n", __func__);
858
859 /*
860 * We may be doing a reset in response to a request
861 * that changes the channel so update any state that
862 * might change as a result.
863 */
864 ath_setcurmode(sc, ath_chan2mode(&sc->sc_curchan));
865
866 ath_update_txpow(sc); /* update tx power state */
867
868 if (sc->sc_beacons)
869 ath_beacon_config(sc, ATH_IF_ID_ANY); /* restart beacons */
870 ath9k_hw_set_interrupts(ah, sc->sc_imask);
871
872 /* Restart the txq */
873 if (flag & RESET_RETRY_TXQ) {
874 int i;
875 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
876 if (ATH_TXQ_SETUP(sc, i)) {
877 spin_lock_bh(&sc->sc_txq[i].axq_lock);
878 ath_txq_schedule(sc, &sc->sc_txq[i]);
879 spin_unlock_bh(&sc->sc_txq[i].axq_lock);
880 }
881 }
882 }
883 return 0;
884}
885
886int ath_reset(struct ath_softc *sc)
887{
888 struct ath_hal *ah = sc->sc_ah;
889 int status;
890 int error = 0;
891 enum ath9k_ht_macmode ht_macmode = ath_cwm_macmode(sc);
892
893 /* NB: indicate channel change so we do a full reset */
894 spin_lock_bh(&sc->sc_resetlock);
895 if (!ath9k_hw_reset(ah, sc->sc_opmode, &sc->sc_curchan,
896 ht_macmode,
897 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
898 sc->sc_ht_extprotspacing, false, &status)) {
899 DPRINTF(sc, ATH_DBG_FATAL,
900 "%s: unable to reset hardware; hal status %u\n",
901 __func__, status);
902 error = -EIO;
903 }
904 spin_unlock_bh(&sc->sc_resetlock);
905
906 return error;
907}
908
909int ath_suspend(struct ath_softc *sc)
910{
911 struct ath_hal *ah = sc->sc_ah;
912
913 /* No I/O if device has been surprise removed */
914 if (sc->sc_invalid)
915 return -EIO;
916
917 /* Shut off the interrupt before setting sc->sc_invalid to '1' */
918 ath9k_hw_set_interrupts(ah, 0);
919
920 /* XXX: we must make sure h/w will not generate any interrupt
921 * before setting the invalid flag. */
922 sc->sc_invalid = 1;
923
924 /* disable HAL and put h/w to sleep */
925 ath9k_hw_disable(sc->sc_ah);
926
927 ath9k_hw_configpcipowersave(sc->sc_ah, 1);
928
929 return 0;
930}
931
932/* Interrupt handler. Most of the actual processing is deferred.
933 * It's the caller's responsibility to ensure the chip is awake. */
934
935irqreturn_t ath_isr(int irq, void *dev)
936{
937 struct ath_softc *sc = dev;
938 struct ath_hal *ah = sc->sc_ah;
939 enum ath9k_int status;
940 bool sched = false;
941
942 do {
943 if (sc->sc_invalid) {
944 /*
945 * The hardware is not ready/present, don't
946 * touch anything. Note this can happen early
947 * on if the IRQ is shared.
948 */
949 return IRQ_NONE;
950 }
951 if (!ath9k_hw_intrpend(ah)) { /* shared irq, not for us */
952 return IRQ_NONE;
953 }
954
955 /*
956 * Figure out the reason(s) for the interrupt. Note
957 * that the hal returns a pseudo-ISR that may include
958 * bits we haven't explicitly enabled so we mask the
959 * value to insure we only process bits we requested.
960 */
961 ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
962
963 status &= sc->sc_imask; /* discard unasked-for bits */
964
965 /*
966 * If there are no status bits set, then this interrupt was not
967 * for me (should have been caught above).
968 */
969
970 if (!status)
971 return IRQ_NONE;
972
973 sc->sc_intrstatus = status;
974
975 if (status & ATH9K_INT_FATAL) {
976 /* need a chip reset */
977 sched = true;
978 } else if (status & ATH9K_INT_RXORN) {
979 /* need a chip reset */
980 sched = true;
981 } else {
982 if (status & ATH9K_INT_SWBA) {
983 /* schedule a tasklet for beacon handling */
984 tasklet_schedule(&sc->bcon_tasklet);
985 }
986 if (status & ATH9K_INT_RXEOL) {
987 /*
988 * NB: the hardware should re-read the link when
989 * RXE bit is written, but it doesn't work
990 * at least on older hardware revs.
991 */
992 sched = true;
993 }
994
995 if (status & ATH9K_INT_TXURN)
996 /* bump tx trigger level */
997 ath9k_hw_updatetxtriglevel(ah, true);
998 /* XXX: optimize this */
999 if (status & ATH9K_INT_RX)
1000 sched = true;
1001 if (status & ATH9K_INT_TX)
1002 sched = true;
1003 if (status & ATH9K_INT_BMISS)
1004 sched = true;
1005 /* carrier sense timeout */
1006 if (status & ATH9K_INT_CST)
1007 sched = true;
1008 if (status & ATH9K_INT_MIB) {
1009 /*
1010 * Disable interrupts until we service the MIB
1011 * interrupt; otherwise it will continue to
1012 * fire.
1013 */
1014 ath9k_hw_set_interrupts(ah, 0);
1015 /*
1016 * Let the hal handle the event. We assume
1017 * it will clear whatever condition caused
1018 * the interrupt.
1019 */
1020 ath9k_hw_procmibevent(ah, &sc->sc_halstats);
1021 ath9k_hw_set_interrupts(ah, sc->sc_imask);
1022 }
1023 if (status & ATH9K_INT_TIM_TIMER) {
1024 if (!(ah->ah_caps.hw_caps &
1025 ATH9K_HW_CAP_AUTOSLEEP)) {
1026 /* Clear RxAbort bit so that we can
1027 * receive frames */
1028 ath9k_hw_setrxabort(ah, 0);
1029 sched = true;
1030 }
1031 }
1032 }
1033 } while (0);
1034
1035 if (sched) {
1036 /* turn off every interrupt except SWBA */
1037 ath9k_hw_set_interrupts(ah, (sc->sc_imask & ATH9K_INT_SWBA));
1038 tasklet_schedule(&sc->intr_tq);
1039 }
1040
1041 return IRQ_HANDLED;
1042}
1043
1044/* Deferred interrupt processing */
1045
1046static void ath9k_tasklet(unsigned long data)
1047{
1048 struct ath_softc *sc = (struct ath_softc *)data;
1049 u32 status = sc->sc_intrstatus;
1050
1051 if (status & ATH9K_INT_FATAL) {
1052 /* need a chip reset */
1053 ath_internal_reset(sc);
1054 return;
1055 } else {
1056
1057 if (status &
1058 (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN)) {
1059 /* XXX: fill me in */
1060 /*
1061 if (status & ATH9K_INT_RXORN) {
1062 }
1063 if (status & ATH9K_INT_RXEOL) {
1064 }
1065 */
1066 spin_lock_bh(&sc->sc_rxflushlock);
1067 ath_rx_tasklet(sc, 0);
1068 spin_unlock_bh(&sc->sc_rxflushlock);
1069 }
1070 /* XXX: optimize this */
1071 if (status & ATH9K_INT_TX)
1072 ath_tx_tasklet(sc);
1073 /* XXX: fill me in */
1074 /*
1075 if (status & ATH9K_INT_BMISS) {
1076 }
1077 if (status & (ATH9K_INT_TIM | ATH9K_INT_DTIMSYNC)) {
1078 if (status & ATH9K_INT_TIM) {
1079 }
1080 if (status & ATH9K_INT_DTIMSYNC) {
1081 }
1082 }
1083 */
1084 }
1085
1086 /* re-enable hardware interrupt */
1087 ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask);
1088}
1089
1090int ath_init(u16 devid, struct ath_softc *sc)
1091{
1092 struct ath_hal *ah = NULL;
1093 int status;
1094 int error = 0, i;
1095 int csz = 0;
1096 u32 rd;
1097
1098 /* XXX: hardware will not be ready until ath_open() being called */
1099 sc->sc_invalid = 1;
1100
1101 sc->sc_debug = DBG_DEFAULT;
1102 DPRINTF(sc, ATH_DBG_CONFIG, "%s: devid 0x%x\n", __func__, devid);
1103
1104 /* Initialize tasklet */
1105 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
1106 tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet,
1107 (unsigned long)sc);
1108
1109 /*
1110 * Cache line size is used to size and align various
1111 * structures used to communicate with the hardware.
1112 */
1113 bus_read_cachesize(sc, &csz);
1114 /* XXX assert csz is non-zero */
1115 sc->sc_cachelsz = csz << 2; /* convert to bytes */
1116
1117 spin_lock_init(&sc->sc_resetlock);
1118
1119 ah = ath9k_hw_attach(devid, sc, sc->mem, &status);
1120 if (ah == NULL) {
1121 DPRINTF(sc, ATH_DBG_FATAL,
1122 "%s: unable to attach hardware; HAL status %u\n",
1123 __func__, status);
1124 error = -ENXIO;
1125 goto bad;
1126 }
1127 sc->sc_ah = ah;
1128
1129 /* Get the chipset-specific aggr limit. */
1130 sc->sc_rtsaggrlimit = ah->ah_caps.rts_aggr_limit;
1131
1132 /* Get the hardware key cache size. */
1133 sc->sc_keymax = ah->ah_caps.keycache_size;
1134 if (sc->sc_keymax > ATH_KEYMAX) {
1135 DPRINTF(sc, ATH_DBG_KEYCACHE,
1136 "%s: Warning, using only %u entries in %u key cache\n",
1137 __func__, ATH_KEYMAX, sc->sc_keymax);
1138 sc->sc_keymax = ATH_KEYMAX;
1139 }
1140
1141 /*
1142 * Reset the key cache since some parts do not
1143 * reset the contents on initial power up.
1144 */
1145 for (i = 0; i < sc->sc_keymax; i++)
1146 ath9k_hw_keyreset(ah, (u16) i);
1147 /*
1148 * Mark key cache slots associated with global keys
1149 * as in use. If we knew TKIP was not to be used we
1150 * could leave the +32, +64, and +32+64 slots free.
1151 * XXX only for splitmic.
1152 */
1153 for (i = 0; i < IEEE80211_WEP_NKID; i++) {
1154 set_bit(i, sc->sc_keymap);
1155 set_bit(i + 32, sc->sc_keymap);
1156 set_bit(i + 64, sc->sc_keymap);
1157 set_bit(i + 32 + 64, sc->sc_keymap);
1158 }
1159 /*
1160 * Collect the channel list using the default country
1161 * code and including outdoor channels. The 802.11 layer
1162 * is resposible for filtering this list based on settings
1163 * like the phy mode.
1164 */
1165 rd = ah->ah_currentRD;
1166
1167 error = ath_setup_channels(sc);
1168 if (error)
1169 goto bad;
1170
1171 /* default to STA mode */
1172 sc->sc_opmode = ATH9K_M_MONITOR;
1173
1174 /* Setup rate tables */
1175
1176 ath_setup_rates(sc, IEEE80211_BAND_2GHZ);
1177 ath_setup_rates(sc, IEEE80211_BAND_5GHZ);
1178
1179 /* NB: setup here so ath_rate_update is happy */
1180 ath_setcurmode(sc, ATH9K_MODE_11A);
1181
1182 /*
1183 * Allocate hardware transmit queues: one queue for
1184 * beacon frames and one data queue for each QoS
1185 * priority. Note that the hal handles reseting
1186 * these queues at the needed time.
1187 */
1188 sc->sc_bhalq = ath_beaconq_setup(ah);
1189 if (sc->sc_bhalq == -1) {
1190 DPRINTF(sc, ATH_DBG_FATAL,
1191 "%s: unable to setup a beacon xmit queue\n", __func__);
1192 error = -EIO;
1193 goto bad2;
1194 }
1195 sc->sc_cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
1196 if (sc->sc_cabq == NULL) {
1197 DPRINTF(sc, ATH_DBG_FATAL,
1198 "%s: unable to setup CAB xmit queue\n", __func__);
1199 error = -EIO;
1200 goto bad2;
1201 }
1202
1203 sc->sc_config.cabqReadytime = ATH_CABQ_READY_TIME;
1204 ath_cabq_update(sc);
1205
1206 for (i = 0; i < ARRAY_SIZE(sc->sc_haltype2q); i++)
1207 sc->sc_haltype2q[i] = -1;
1208
1209 /* Setup data queues */
1210 /* NB: ensure BK queue is the lowest priority h/w queue */
1211 if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
1212 DPRINTF(sc, ATH_DBG_FATAL,
1213 "%s: unable to setup xmit queue for BK traffic\n",
1214 __func__);
1215 error = -EIO;
1216 goto bad2;
1217 }
1218
1219 if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
1220 DPRINTF(sc, ATH_DBG_FATAL,
1221 "%s: unable to setup xmit queue for BE traffic\n",
1222 __func__);
1223 error = -EIO;
1224 goto bad2;
1225 }
1226 if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
1227 DPRINTF(sc, ATH_DBG_FATAL,
1228 "%s: unable to setup xmit queue for VI traffic\n",
1229 __func__);
1230 error = -EIO;
1231 goto bad2;
1232 }
1233 if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
1234 DPRINTF(sc, ATH_DBG_FATAL,
1235 "%s: unable to setup xmit queue for VO traffic\n",
1236 __func__);
1237 error = -EIO;
1238 goto bad2;
1239 }
1240
1241 sc->sc_rc = ath_rate_attach(ah);
1242 if (sc->sc_rc == NULL) {
1243 error = EIO;
1244 goto bad2;
1245 }
1246
1247 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1248 ATH9K_CIPHER_TKIP, NULL)) {
1249 /*
1250 * Whether we should enable h/w TKIP MIC.
1251 * XXX: if we don't support WME TKIP MIC, then we wouldn't
1252 * report WMM capable, so it's always safe to turn on
1253 * TKIP MIC in this case.
1254 */
1255 ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC,
1256 0, 1, NULL);
1257 }
1258
1259 /*
1260 * Check whether the separate key cache entries
1261 * are required to handle both tx+rx MIC keys.
1262 * With split mic keys the number of stations is limited
1263 * to 27 otherwise 59.
1264 */
1265 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1266 ATH9K_CIPHER_TKIP, NULL)
1267 && ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1268 ATH9K_CIPHER_MIC, NULL)
1269 && ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT,
1270 0, NULL))
1271 sc->sc_splitmic = 1;
1272
1273 /* turn on mcast key search if possible */
1274 if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
1275 (void)ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1,
1276 1, NULL);
1277
1278 sc->sc_config.txpowlimit = ATH_TXPOWER_MAX;
1279 sc->sc_config.txpowlimit_override = 0;
1280
1281 /* 11n Capabilities */
1282 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) {
1283 sc->sc_txaggr = 1;
1284 sc->sc_rxaggr = 1;
1285 }
1286
1287 sc->sc_tx_chainmask = ah->ah_caps.tx_chainmask;
1288 sc->sc_rx_chainmask = ah->ah_caps.rx_chainmask;
1289
1290 /* Configuration for rx chain detection */
1291 sc->sc_rxchaindetect_ref = 0;
1292 sc->sc_rxchaindetect_thresh5GHz = 35;
1293 sc->sc_rxchaindetect_thresh2GHz = 35;
1294 sc->sc_rxchaindetect_delta5GHz = 30;
1295 sc->sc_rxchaindetect_delta2GHz = 30;
1296
1297 ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
1298 sc->sc_defant = ath9k_hw_getdefantenna(ah);
1299
1300 ath9k_hw_getmac(ah, sc->sc_myaddr);
1301 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) {
1302 ath9k_hw_getbssidmask(ah, sc->sc_bssidmask);
1303 ATH_SET_VAP_BSSID_MASK(sc->sc_bssidmask);
1304 ath9k_hw_setbssidmask(ah, sc->sc_bssidmask);
1305 }
1306 sc->sc_slottime = ATH9K_SLOT_TIME_9; /* default to short slot time */
1307
1308 /* initialize beacon slots */
1309 for (i = 0; i < ARRAY_SIZE(sc->sc_bslot); i++)
1310 sc->sc_bslot[i] = ATH_IF_ID_ANY;
1311
1312 /* save MISC configurations */
1313 sc->sc_config.swBeaconProcess = 1;
1314
1315#ifdef CONFIG_SLOW_ANT_DIV
1316 /* range is 40 - 255, we use something in the middle */
1317 ath_slow_ant_div_init(&sc->sc_antdiv, sc, 0x127);
1318#endif
1319
1320 return 0;
1321bad2:
1322 /* cleanup tx queues */
1323 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1324 if (ATH_TXQ_SETUP(sc, i))
1325 ath_tx_cleanupq(sc, &sc->sc_txq[i]);
1326bad:
1327 if (ah)
1328 ath9k_hw_detach(ah);
1329 return error;
1330}
1331
1332void ath_deinit(struct ath_softc *sc)
1333{
1334 struct ath_hal *ah = sc->sc_ah;
1335 int i;
1336
1337 DPRINTF(sc, ATH_DBG_CONFIG, "%s\n", __func__);
1338
1339 ath_stop(sc);
1340 if (!sc->sc_invalid)
1341 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
1342 ath_rate_detach(sc->sc_rc);
1343 /* cleanup tx queues */
1344 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1345 if (ATH_TXQ_SETUP(sc, i))
1346 ath_tx_cleanupq(sc, &sc->sc_txq[i]);
1347 ath9k_hw_detach(ah);
1348}
1349
1350/*******************/
1351/* Node Management */
1352/*******************/
1353
1354struct ath_node *ath_node_attach(struct ath_softc *sc, u8 *addr, int if_id)
1355{
1356 struct ath_vap *avp;
1357 struct ath_node *an;
1358 DECLARE_MAC_BUF(mac);
1359
1360 avp = sc->sc_vaps[if_id];
1361 ASSERT(avp != NULL);
1362
1363 /* mac80211 sta_notify callback is from an IRQ context, so no sleep */
1364 an = kmalloc(sizeof(struct ath_node), GFP_ATOMIC);
1365 if (an == NULL)
1366 return NULL;
1367 memzero(an, sizeof(*an));
1368
1369 an->an_sc = sc;
1370 memcpy(an->an_addr, addr, ETH_ALEN);
1371 atomic_set(&an->an_refcnt, 1);
1372
1373 /* set up per-node tx/rx state */
1374 ath_tx_node_init(sc, an);
1375 ath_rx_node_init(sc, an);
1376
1377 ath_chainmask_sel_init(sc, an);
1378 ath_chainmask_sel_timerstart(&an->an_chainmask_sel);
1379 list_add(&an->list, &sc->node_list);
1380
1381 return an;
1382}
1383
1384void ath_node_detach(struct ath_softc *sc, struct ath_node *an, bool bh_flag)
1385{
1386 unsigned long flags;
1387
1388 DECLARE_MAC_BUF(mac);
1389
1390 ath_chainmask_sel_timerstop(&an->an_chainmask_sel);
1391 an->an_flags |= ATH_NODE_CLEAN;
1392 ath_tx_node_cleanup(sc, an, bh_flag);
1393 ath_rx_node_cleanup(sc, an);
1394
1395 ath_tx_node_free(sc, an);
1396 ath_rx_node_free(sc, an);
1397
1398 spin_lock_irqsave(&sc->node_lock, flags);
1399
1400 list_del(&an->list);
1401
1402 spin_unlock_irqrestore(&sc->node_lock, flags);
1403
1404 kfree(an);
1405}
1406
1407/* Finds a node and increases the refcnt if found */
1408
1409struct ath_node *ath_node_get(struct ath_softc *sc, u8 *addr)
1410{
1411 struct ath_node *an = NULL, *an_found = NULL;
1412
1413 if (list_empty(&sc->node_list)) /* FIXME */
1414 goto out;
1415 list_for_each_entry(an, &sc->node_list, list) {
1416 if (!compare_ether_addr(an->an_addr, addr)) {
1417 atomic_inc(&an->an_refcnt);
1418 an_found = an;
1419 break;
1420 }
1421 }
1422out:
1423 return an_found;
1424}
1425
1426/* Decrements the refcnt and if it drops to zero, detach the node */
1427
1428void ath_node_put(struct ath_softc *sc, struct ath_node *an, bool bh_flag)
1429{
1430 if (atomic_dec_and_test(&an->an_refcnt))
1431 ath_node_detach(sc, an, bh_flag);
1432}
1433
1434/* Finds a node, doesn't increment refcnt. Caller must hold sc->node_lock */
1435struct ath_node *ath_node_find(struct ath_softc *sc, u8 *addr)
1436{
1437 struct ath_node *an = NULL, *an_found = NULL;
1438
1439 if (list_empty(&sc->node_list))
1440 return NULL;
1441
1442 list_for_each_entry(an, &sc->node_list, list)
1443 if (!compare_ether_addr(an->an_addr, addr)) {
1444 an_found = an;
1445 break;
1446 }
1447
1448 return an_found;
1449}
1450
1451/*
1452 * Set up New Node
1453 *
1454 * Setup driver-specific state for a newly associated node. This routine
1455 * really only applies if compression or XR are enabled, there is no code
1456 * covering any other cases.
1457*/
1458
1459void ath_newassoc(struct ath_softc *sc,
1460 struct ath_node *an, int isnew, int isuapsd)
1461{
1462 int tidno;
1463
1464 /* if station reassociates, tear down the aggregation state. */
1465 if (!isnew) {
1466 for (tidno = 0; tidno < WME_NUM_TID; tidno++) {
1467 if (sc->sc_txaggr)
1468 ath_tx_aggr_teardown(sc, an, tidno);
1469 if (sc->sc_rxaggr)
1470 ath_rx_aggr_teardown(sc, an, tidno);
1471 }
1472 }
1473 an->an_flags = 0;
1474}
1475
1476/**************/
1477/* Encryption */
1478/**************/
1479
1480void ath_key_reset(struct ath_softc *sc, u16 keyix, int freeslot)
1481{
1482 ath9k_hw_keyreset(sc->sc_ah, keyix);
1483 if (freeslot)
1484 clear_bit(keyix, sc->sc_keymap);
1485}
1486
1487int ath_keyset(struct ath_softc *sc,
1488 u16 keyix,
1489 struct ath9k_keyval *hk,
1490 const u8 mac[ETH_ALEN])
1491{
1492 bool status;
1493
1494 status = ath9k_hw_set_keycache_entry(sc->sc_ah,
1495 keyix, hk, mac, false);
1496
1497 return status != false;
1498}
1499
1500/***********************/
1501/* TX Power/Regulatory */
1502/***********************/
1503
1504/*
1505 * Set Transmit power in HAL
1506 *
1507 * This routine makes the actual HAL calls to set the new transmit power
1508 * limit.
1509*/
1510
1511void ath_update_txpow(struct ath_softc *sc)
1512{
1513 struct ath_hal *ah = sc->sc_ah;
1514 u32 txpow;
1515
1516 if (sc->sc_curtxpow != sc->sc_config.txpowlimit) {
1517 ath9k_hw_set_txpowerlimit(ah, sc->sc_config.txpowlimit);
1518 /* read back in case value is clamped */
1519 ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow);
1520 sc->sc_curtxpow = txpow;
1521 }
1522}
1523
1524/* Return the current country and domain information */
1525void ath_get_currentCountry(struct ath_softc *sc,
1526 struct ath9k_country_entry *ctry)
1527{
1528 ath9k_regd_get_current_country(sc->sc_ah, ctry);
1529
1530 /* If HAL not specific yet, since it is band dependent,
1531 * use the one we passed in. */
1532 if (ctry->countryCode == CTRY_DEFAULT) {
1533 ctry->iso[0] = 0;
1534 ctry->iso[1] = 0;
1535 } else if (ctry->iso[0] && ctry->iso[1]) {
1536 if (!ctry->iso[2]) {
1537 if (ath_outdoor)
1538 ctry->iso[2] = 'O';
1539 else
1540 ctry->iso[2] = 'I';
1541 }
1542 }
1543}
1544
1545/**************************/
1546/* Slow Antenna Diversity */
1547/**************************/
1548
1549void ath_slow_ant_div_init(struct ath_antdiv *antdiv,
1550 struct ath_softc *sc,
1551 int32_t rssitrig)
1552{
1553 int trig;
1554
1555 /* antdivf_rssitrig can range from 40 - 0xff */
1556 trig = (rssitrig > 0xff) ? 0xff : rssitrig;
1557 trig = (rssitrig < 40) ? 40 : rssitrig;
1558
1559 antdiv->antdiv_sc = sc;
1560 antdiv->antdivf_rssitrig = trig;
1561}
1562
1563void ath_slow_ant_div_start(struct ath_antdiv *antdiv,
1564 u8 num_antcfg,
1565 const u8 *bssid)
1566{
1567 antdiv->antdiv_num_antcfg =
1568 num_antcfg < ATH_ANT_DIV_MAX_CFG ?
1569 num_antcfg : ATH_ANT_DIV_MAX_CFG;
1570 antdiv->antdiv_state = ATH_ANT_DIV_IDLE;
1571 antdiv->antdiv_curcfg = 0;
1572 antdiv->antdiv_bestcfg = 0;
1573 antdiv->antdiv_laststatetsf = 0;
1574
1575 memcpy(antdiv->antdiv_bssid, bssid, sizeof(antdiv->antdiv_bssid));
1576
1577 antdiv->antdiv_start = 1;
1578}
1579
1580void ath_slow_ant_div_stop(struct ath_antdiv *antdiv)
1581{
1582 antdiv->antdiv_start = 0;
1583}
1584
1585static int32_t ath_find_max_val(int32_t *val,
1586 u8 num_val, u8 *max_index)
1587{
1588 u32 MaxVal = *val++;
1589 u32 cur_index = 0;
1590
1591 *max_index = 0;
1592 while (++cur_index < num_val) {
1593 if (*val > MaxVal) {
1594 MaxVal = *val;
1595 *max_index = cur_index;
1596 }
1597
1598 val++;
1599 }
1600
1601 return MaxVal;
1602}
1603
1604void ath_slow_ant_div(struct ath_antdiv *antdiv,
1605 struct ieee80211_hdr *hdr,
1606 struct ath_rx_status *rx_stats)
1607{
1608 struct ath_softc *sc = antdiv->antdiv_sc;
1609 struct ath_hal *ah = sc->sc_ah;
1610 u64 curtsf = 0;
1611 u8 bestcfg, curcfg = antdiv->antdiv_curcfg;
1612 __le16 fc = hdr->frame_control;
1613
1614 if (antdiv->antdiv_start && ieee80211_is_beacon(fc)
1615 && !compare_ether_addr(hdr->addr3, antdiv->antdiv_bssid)) {
1616 antdiv->antdiv_lastbrssi[curcfg] = rx_stats->rs_rssi;
1617 antdiv->antdiv_lastbtsf[curcfg] = ath9k_hw_gettsf64(sc->sc_ah);
1618 curtsf = antdiv->antdiv_lastbtsf[curcfg];
1619 } else {
1620 return;
1621 }
1622
1623 switch (antdiv->antdiv_state) {
1624 case ATH_ANT_DIV_IDLE:
1625 if ((antdiv->antdiv_lastbrssi[curcfg] <
1626 antdiv->antdivf_rssitrig)
1627 && ((curtsf - antdiv->antdiv_laststatetsf) >
1628 ATH_ANT_DIV_MIN_IDLE_US)) {
1629
1630 curcfg++;
1631 if (curcfg == antdiv->antdiv_num_antcfg)
1632 curcfg = 0;
1633
1634 if (!ath9k_hw_select_antconfig(ah, curcfg)) {
1635 antdiv->antdiv_bestcfg = antdiv->antdiv_curcfg;
1636 antdiv->antdiv_curcfg = curcfg;
1637 antdiv->antdiv_laststatetsf = curtsf;
1638 antdiv->antdiv_state = ATH_ANT_DIV_SCAN;
1639 }
1640 }
1641 break;
1642
1643 case ATH_ANT_DIV_SCAN:
1644 if ((curtsf - antdiv->antdiv_laststatetsf) <
1645 ATH_ANT_DIV_MIN_SCAN_US)
1646 break;
1647
1648 curcfg++;
1649 if (curcfg == antdiv->antdiv_num_antcfg)
1650 curcfg = 0;
1651
1652 if (curcfg == antdiv->antdiv_bestcfg) {
1653 ath_find_max_val(antdiv->antdiv_lastbrssi,
1654 antdiv->antdiv_num_antcfg, &bestcfg);
1655 if (!ath9k_hw_select_antconfig(ah, bestcfg)) {
1656 antdiv->antdiv_bestcfg = bestcfg;
1657 antdiv->antdiv_curcfg = bestcfg;
1658 antdiv->antdiv_laststatetsf = curtsf;
1659 antdiv->antdiv_state = ATH_ANT_DIV_IDLE;
1660 }
1661 } else {
1662 if (!ath9k_hw_select_antconfig(ah, curcfg)) {
1663 antdiv->antdiv_curcfg = curcfg;
1664 antdiv->antdiv_laststatetsf = curtsf;
1665 antdiv->antdiv_state = ATH_ANT_DIV_SCAN;
1666 }
1667 }
1668
1669 break;
1670 }
1671}
1672
1673/***********************/
1674/* Descriptor Handling */
1675/***********************/
1676
1677/*
1678 * Set up DMA descriptors
1679 *
1680 * This function will allocate both the DMA descriptor structure, and the
1681 * buffers it contains. These are used to contain the descriptors used
1682 * by the system.
1683*/
1684
1685int ath_descdma_setup(struct ath_softc *sc,
1686 struct ath_descdma *dd,
1687 struct list_head *head,
1688 const char *name,
1689 int nbuf,
1690 int ndesc)
1691{
1692#define DS2PHYS(_dd, _ds) \
1693 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
1694#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
1695#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
1696
1697 struct ath_desc *ds;
1698 struct ath_buf *bf;
1699 int i, bsize, error;
1700
1701 DPRINTF(sc, ATH_DBG_CONFIG, "%s: %s DMA: %u buffers %u desc/buf\n",
1702 __func__, name, nbuf, ndesc);
1703
1704 /* ath_desc must be a multiple of DWORDs */
1705 if ((sizeof(struct ath_desc) % 4) != 0) {
1706 DPRINTF(sc, ATH_DBG_FATAL, "%s: ath_desc not DWORD aligned\n",
1707 __func__);
1708 ASSERT((sizeof(struct ath_desc) % 4) == 0);
1709 error = -ENOMEM;
1710 goto fail;
1711 }
1712
1713 dd->dd_name = name;
1714 dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
1715
1716 /*
1717 * Need additional DMA memory because we can't use
1718 * descriptors that cross the 4K page boundary. Assume
1719 * one skipped descriptor per 4K page.
1720 */
1721 if (!(sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
1722 u32 ndesc_skipped =
1723 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
1724 u32 dma_len;
1725
1726 while (ndesc_skipped) {
1727 dma_len = ndesc_skipped * sizeof(struct ath_desc);
1728 dd->dd_desc_len += dma_len;
1729
1730 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
1731 };
1732 }
1733
1734 /* allocate descriptors */
1735 dd->dd_desc = pci_alloc_consistent(sc->pdev,
1736 dd->dd_desc_len,
1737 &dd->dd_desc_paddr);
1738 if (dd->dd_desc == NULL) {
1739 error = -ENOMEM;
1740 goto fail;
1741 }
1742 ds = dd->dd_desc;
1743 DPRINTF(sc, ATH_DBG_CONFIG, "%s: %s DMA map: %p (%u) -> %llx (%u)\n",
1744 __func__, dd->dd_name, ds, (u32) dd->dd_desc_len,
1745 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
1746
1747 /* allocate buffers */
1748 bsize = sizeof(struct ath_buf) * nbuf;
1749 bf = kmalloc(bsize, GFP_KERNEL);
1750 if (bf == NULL) {
1751 error = -ENOMEM;
1752 goto fail2;
1753 }
1754 memzero(bf, bsize);
1755 dd->dd_bufptr = bf;
1756
1757 INIT_LIST_HEAD(head);
1758 for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
1759 bf->bf_desc = ds;
1760 bf->bf_daddr = DS2PHYS(dd, ds);
1761
1762 if (!(sc->sc_ah->ah_caps.hw_caps &
1763 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
1764 /*
1765 * Skip descriptor addresses which can cause 4KB
1766 * boundary crossing (addr + length) with a 32 dword
1767 * descriptor fetch.
1768 */
1769 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
1770 ASSERT((caddr_t) bf->bf_desc <
1771 ((caddr_t) dd->dd_desc +
1772 dd->dd_desc_len));
1773
1774 ds += ndesc;
1775 bf->bf_desc = ds;
1776 bf->bf_daddr = DS2PHYS(dd, ds);
1777 }
1778 }
1779 list_add_tail(&bf->list, head);
1780 }
1781 return 0;
1782fail2:
1783 pci_free_consistent(sc->pdev,
1784 dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr);
1785fail:
1786 memzero(dd, sizeof(*dd));
1787 return error;
1788#undef ATH_DESC_4KB_BOUND_CHECK
1789#undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
1790#undef DS2PHYS
1791}
1792
1793/*
1794 * Cleanup DMA descriptors
1795 *
1796 * This function will free the DMA block that was allocated for the descriptor
1797 * pool. Since this was allocated as one "chunk", it is freed in the same
1798 * manner.
1799*/
1800
1801void ath_descdma_cleanup(struct ath_softc *sc,
1802 struct ath_descdma *dd,
1803 struct list_head *head)
1804{
1805 /* Free memory associated with descriptors */
1806 pci_free_consistent(sc->pdev,
1807 dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr);
1808
1809 INIT_LIST_HEAD(head);
1810 kfree(dd->dd_bufptr);
1811 memzero(dd, sizeof(*dd));
1812}
1813
1814/*************/
1815/* Utilities */
1816/*************/
1817
1818void ath_internal_reset(struct ath_softc *sc)
1819{
1820 ath_reset_start(sc, 0);
1821 ath_reset(sc);
1822 ath_reset_end(sc, 0);
1823}
1824
1825int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
1826{
1827 int qnum;
1828
1829 switch (queue) {
1830 case 0:
1831 qnum = sc->sc_haltype2q[ATH9K_WME_AC_VO];
1832 break;
1833 case 1:
1834 qnum = sc->sc_haltype2q[ATH9K_WME_AC_VI];
1835 break;
1836 case 2:
1837 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
1838 break;
1839 case 3:
1840 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BK];
1841 break;
1842 default:
1843 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
1844 break;
1845 }
1846
1847 return qnum;
1848}
1849
1850int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc)
1851{
1852 int qnum;
1853
1854 switch (queue) {
1855 case ATH9K_WME_AC_VO:
1856 qnum = 0;
1857 break;
1858 case ATH9K_WME_AC_VI:
1859 qnum = 1;
1860 break;
1861 case ATH9K_WME_AC_BE:
1862 qnum = 2;
1863 break;
1864 case ATH9K_WME_AC_BK:
1865 qnum = 3;
1866 break;
1867 default:
1868 qnum = -1;
1869 break;
1870 }
1871
1872 return qnum;
1873}
1874
1875
1876/*
1877 * Expand time stamp to TSF
1878 *
1879 * Extend 15-bit time stamp from rx descriptor to
1880 * a full 64-bit TSF using the current h/w TSF.
1881*/
1882
1883u64 ath_extend_tsf(struct ath_softc *sc, u32 rstamp)
1884{
1885 u64 tsf;
1886
1887 tsf = ath9k_hw_gettsf64(sc->sc_ah);
1888 if ((tsf & 0x7fff) < rstamp)
1889 tsf -= 0x8000;
1890 return (tsf & ~0x7fff) | rstamp;
1891}
1892
1893/*
1894 * Set Default Antenna
1895 *
1896 * Call into the HAL to set the default antenna to use. Not really valid for
1897 * MIMO technology.
1898*/
1899
1900void ath_setdefantenna(void *context, u32 antenna)
1901{
1902 struct ath_softc *sc = (struct ath_softc *)context;
1903 struct ath_hal *ah = sc->sc_ah;
1904
1905 /* XXX block beacon interrupts */
1906 ath9k_hw_setantenna(ah, antenna);
1907 sc->sc_defant = antenna;
1908 sc->sc_rxotherant = 0;
1909}
1910
1911/*
1912 * Set Slot Time
1913 *
1914 * This will wake up the chip if required, and set the slot time for the
1915 * frame (maximum transmit time). Slot time is assumed to be already set
1916 * in the ATH object member sc_slottime
1917*/
1918
1919void ath_setslottime(struct ath_softc *sc)
1920{
1921 ath9k_hw_setslottime(sc->sc_ah, sc->sc_slottime);
1922 sc->sc_updateslot = OK;
1923}
diff --git a/drivers/net/wireless/ath9k/core.h b/drivers/net/wireless/ath9k/core.h
new file mode 100644
index 000000000000..673b3d81133a
--- /dev/null
+++ b/drivers/net/wireless/ath9k/core.h
@@ -0,0 +1,1072 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef CORE_H
18#define CORE_H
19
20#include <linux/version.h>
21#include <linux/autoconf.h>
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/spinlock.h>
25#include <linux/errno.h>
26#include <linux/skbuff.h>
27#include <linux/netdevice.h>
28#include <linux/etherdevice.h>
29#include <linux/ip.h>
30#include <linux/tcp.h>
31#include <linux/in.h>
32#include <linux/delay.h>
33#include <linux/wait.h>
34#include <linux/pci.h>
35#include <linux/interrupt.h>
36#include <linux/sched.h>
37#include <linux/list.h>
38#include <asm/byteorder.h>
39#include <linux/scatterlist.h>
40#include <asm/page.h>
41#include <net/mac80211.h>
42
43#include "ath9k.h"
44#include "rc.h"
45
46struct ath_node;
47
48/******************/
49/* Utility macros */
50/******************/
51
52/* Macro to expand scalars to 64-bit objects */
53
54#define ito64(x) (sizeof(x) == 8) ? \
55 (((unsigned long long int)(x)) & (0xff)) : \
56 (sizeof(x) == 16) ? \
57 (((unsigned long long int)(x)) & 0xffff) : \
58 ((sizeof(x) == 32) ? \
59 (((unsigned long long int)(x)) & 0xffffffff) : \
60 (unsigned long long int)(x))
61
62/* increment with wrap-around */
63#define INCR(_l, _sz) do { \
64 (_l)++; \
65 (_l) &= ((_sz) - 1); \
66 } while (0)
67
68/* decrement with wrap-around */
69#define DECR(_l, _sz) do { \
70 (_l)--; \
71 (_l) &= ((_sz) - 1); \
72 } while (0)
73
74#define A_MAX(a, b) ((a) > (b) ? (a) : (b))
75
76#define ASSERT(exp) do { \
77 if (unlikely(!(exp))) { \
78 BUG(); \
79 } \
80 } while (0)
81
82/* XXX: remove */
83#define memzero(_buf, _len) memset(_buf, 0, _len)
84
85#define get_dma_mem_context(var, field) (&((var)->field))
86#define copy_dma_mem_context(dst, src) (*dst = *src)
87
88#define ATH9K_BH_STATUS_INTACT 0
89#define ATH9K_BH_STATUS_CHANGE 1
90
91#define ATH_TXQ_SETUP(sc, i) ((sc)->sc_txqsetup & (1<<i))
92
93static inline unsigned long get_timestamp(void)
94{
95 return ((jiffies / HZ) * 1000) + (jiffies % HZ) * (1000 / HZ);
96}
97
98/*************/
99/* Debugging */
100/*************/
101
102enum ATH_DEBUG {
103 ATH_DBG_RESET = 0x00000001,
104 ATH_DBG_PHY_IO = 0x00000002,
105 ATH_DBG_REG_IO = 0x00000004,
106 ATH_DBG_QUEUE = 0x00000008,
107 ATH_DBG_EEPROM = 0x00000010,
108 ATH_DBG_NF_CAL = 0x00000020,
109 ATH_DBG_CALIBRATE = 0x00000040,
110 ATH_DBG_CHANNEL = 0x00000080,
111 ATH_DBG_INTERRUPT = 0x00000100,
112 ATH_DBG_REGULATORY = 0x00000200,
113 ATH_DBG_ANI = 0x00000400,
114 ATH_DBG_POWER_MGMT = 0x00000800,
115 ATH_DBG_XMIT = 0x00001000,
116 ATH_DBG_BEACON = 0x00002000,
117 ATH_DBG_RATE = 0x00004000,
118 ATH_DBG_CONFIG = 0x00008000,
119 ATH_DBG_KEYCACHE = 0x00010000,
120 ATH_DBG_AGGR = 0x00020000,
121 ATH_DBG_FATAL = 0x00040000,
122 ATH_DBG_ANY = 0xffffffff
123};
124
125#define DBG_DEFAULT (ATH_DBG_FATAL)
126
127#define DPRINTF(sc, _m, _fmt, ...) do { \
128 if (sc->sc_debug & (_m)) \
129 printk(_fmt , ##__VA_ARGS__); \
130 } while (0)
131
132/***************************/
133/* Load-time Configuration */
134/***************************/
135
136/* Per-instance load-time (note: NOT run-time) configurations
137 * for Atheros Device */
138struct ath_config {
139 u32 ath_aggr_prot;
140 u16 txpowlimit;
141 u16 txpowlimit_override;
142 u8 cabqReadytime; /* Cabq Readytime % */
143 u8 swBeaconProcess; /* Process received beacons in SW (vs HW) */
144};
145
146/***********************/
147/* Chainmask Selection */
148/***********************/
149
150#define ATH_CHAINMASK_SEL_TIMEOUT 6000
151/* Default - Number of last RSSI values that is used for
152 * chainmask selection */
153#define ATH_CHAINMASK_SEL_RSSI_CNT 10
154/* Means use 3x3 chainmask instead of configured chainmask */
155#define ATH_CHAINMASK_SEL_3X3 7
156/* Default - Rssi threshold below which we have to switch to 3x3 */
157#define ATH_CHAINMASK_SEL_UP_RSSI_THRES 20
158/* Default - Rssi threshold above which we have to switch to
159 * user configured values */
160#define ATH_CHAINMASK_SEL_DOWN_RSSI_THRES 35
161/* Struct to store the chainmask select related info */
162struct ath_chainmask_sel {
163 struct timer_list timer;
164 int cur_tx_mask; /* user configured or 3x3 */
165 int cur_rx_mask; /* user configured or 3x3 */
166 int tx_avgrssi;
167 u8 switch_allowed:1, /* timer will set this */
168 cm_sel_enabled : 1;
169};
170
171int ath_chainmask_sel_logic(struct ath_softc *sc, struct ath_node *an);
172void ath_update_chainmask(struct ath_softc *sc, int is_ht);
173
174/*************************/
175/* Descriptor Management */
176/*************************/
177
178/* Number of descriptors per buffer. The only case where we see skbuff
179chains is due to FF aggregation in the driver. */
180#define ATH_TXDESC 1
181/* if there's more fragment for this MSDU */
182#define ATH_BF_MORE_MPDU 1
183#define ATH_TXBUF_RESET(_bf) do { \
184 (_bf)->bf_status = 0; \
185 (_bf)->bf_lastbf = NULL; \
186 (_bf)->bf_lastfrm = NULL; \
187 (_bf)->bf_next = NULL; \
188 memzero(&((_bf)->bf_state), \
189 sizeof(struct ath_buf_state)); \
190 } while (0)
191
192struct ath_buf_state {
193 int bfs_nframes; /* # frames in aggregate */
194 u16 bfs_al; /* length of aggregate */
195 u16 bfs_frmlen; /* length of frame */
196 int bfs_seqno; /* sequence number */
197 int bfs_tidno; /* tid of this frame */
198 int bfs_retries; /* current retries */
199 struct ath_rc_series bfs_rcs[4]; /* rate series */
200 u8 bfs_isdata:1; /* is a data frame/aggregate */
201 u8 bfs_isaggr:1; /* is an aggregate */
202 u8 bfs_isampdu:1; /* is an a-mpdu, aggregate or not */
203 u8 bfs_ht:1; /* is an HT frame */
204 u8 bfs_isretried:1; /* is retried */
205 u8 bfs_isxretried:1; /* is excessive retried */
206 u8 bfs_shpreamble:1; /* is short preamble */
207 u8 bfs_isbar:1; /* is a BAR */
208 u8 bfs_ispspoll:1; /* is a PS-Poll */
209 u8 bfs_aggrburst:1; /* is a aggr burst */
210 u8 bfs_calcairtime:1; /* requests airtime be calculated
211 when set for tx frame */
212 int bfs_rifsburst_elem; /* RIFS burst/bar */
213 int bfs_nrifsubframes; /* # of elements in burst */
214 /* key type use to encrypt this frame */
215 enum ath9k_key_type bfs_keytype;
216};
217
218#define bf_nframes bf_state.bfs_nframes
219#define bf_al bf_state.bfs_al
220#define bf_frmlen bf_state.bfs_frmlen
221#define bf_retries bf_state.bfs_retries
222#define bf_seqno bf_state.bfs_seqno
223#define bf_tidno bf_state.bfs_tidno
224#define bf_rcs bf_state.bfs_rcs
225#define bf_isdata bf_state.bfs_isdata
226#define bf_isaggr bf_state.bfs_isaggr
227#define bf_isampdu bf_state.bfs_isampdu
228#define bf_ht bf_state.bfs_ht
229#define bf_isretried bf_state.bfs_isretried
230#define bf_isxretried bf_state.bfs_isxretried
231#define bf_shpreamble bf_state.bfs_shpreamble
232#define bf_rifsburst_elem bf_state.bfs_rifsburst_elem
233#define bf_nrifsubframes bf_state.bfs_nrifsubframes
234#define bf_keytype bf_state.bfs_keytype
235#define bf_isbar bf_state.bfs_isbar
236#define bf_ispspoll bf_state.bfs_ispspoll
237#define bf_aggrburst bf_state.bfs_aggrburst
238#define bf_calcairtime bf_state.bfs_calcairtime
239
240/*
241 * Abstraction of a contiguous buffer to transmit/receive. There is only
242 * a single hw descriptor encapsulated here.
243 */
244
245struct ath_buf {
246 struct list_head list;
247 struct list_head *last;
248 struct ath_buf *bf_lastbf; /* last buf of this unit (a frame or
249 an aggregate) */
250 struct ath_buf *bf_lastfrm; /* last buf of this frame */
251 struct ath_buf *bf_next; /* next subframe in the aggregate */
252 struct ath_buf *bf_rifslast; /* last buf for RIFS burst */
253 void *bf_mpdu; /* enclosing frame structure */
254 void *bf_node; /* pointer to the node */
255 struct ath_desc *bf_desc; /* virtual addr of desc */
256 dma_addr_t bf_daddr; /* physical addr of desc */
257 dma_addr_t bf_buf_addr; /* physical addr of data buffer */
258 u32 bf_status;
259 u16 bf_flags; /* tx descriptor flags */
260 struct ath_buf_state bf_state; /* buffer state */
261 dma_addr_t bf_dmacontext;
262};
263
264/*
265 * reset the rx buffer.
266 * any new fields added to the athbuf and require
267 * reset need to be added to this macro.
268 * currently bf_status is the only one requires that
269 * requires reset.
270 */
271#define ATH_RXBUF_RESET(_bf) ((_bf)->bf_status = 0)
272
273/* hw processing complete, desc processed by hal */
274#define ATH_BUFSTATUS_DONE 0x00000001
275/* hw processing complete, desc hold for hw */
276#define ATH_BUFSTATUS_STALE 0x00000002
277/* Rx-only: OS is done with this packet and it's ok to queued it to hw */
278#define ATH_BUFSTATUS_FREE 0x00000004
279
280/* DMA state for tx/rx descriptors */
281
282struct ath_descdma {
283 const char *dd_name;
284 struct ath_desc *dd_desc; /* descriptors */
285 dma_addr_t dd_desc_paddr; /* physical addr of dd_desc */
286 u32 dd_desc_len; /* size of dd_desc */
287 struct ath_buf *dd_bufptr; /* associated buffers */
288 dma_addr_t dd_dmacontext;
289};
290
291/* Abstraction of a received RX MPDU/MMPDU, or a RX fragment */
292
293struct ath_rx_context {
294 struct ath_buf *ctx_rxbuf; /* associated ath_buf for rx */
295};
296#define ATH_RX_CONTEXT(skb) ((struct ath_rx_context *)skb->cb)
297
298int ath_descdma_setup(struct ath_softc *sc,
299 struct ath_descdma *dd,
300 struct list_head *head,
301 const char *name,
302 int nbuf,
303 int ndesc);
304int ath_desc_alloc(struct ath_softc *sc);
305void ath_desc_free(struct ath_softc *sc);
306void ath_descdma_cleanup(struct ath_softc *sc,
307 struct ath_descdma *dd,
308 struct list_head *head);
309
310/******/
311/* RX */
312/******/
313
314#define ATH_MAX_ANTENNA 3
315#define ATH_RXBUF 512
316#define ATH_RX_TIMEOUT 40 /* 40 milliseconds */
317#define WME_NUM_TID 16
318#define IEEE80211_BAR_CTL_TID_M 0xF000 /* tid mask */
319#define IEEE80211_BAR_CTL_TID_S 2 /* tid shift */
320
321enum ATH_RX_TYPE {
322 ATH_RX_NON_CONSUMED = 0,
323 ATH_RX_CONSUMED
324};
325
326/* per frame rx status block */
327struct ath_recv_status {
328 u64 tsf; /* mac tsf */
329 int8_t rssi; /* RSSI (noise floor ajusted) */
330 int8_t rssictl[ATH_MAX_ANTENNA]; /* RSSI (noise floor ajusted) */
331 int8_t rssiextn[ATH_MAX_ANTENNA]; /* RSSI (noise floor ajusted) */
332 int8_t abs_rssi; /* absolute RSSI */
333 u8 rateieee; /* data rate received (IEEE rate code) */
334 u8 ratecode; /* phy rate code */
335 int rateKbps; /* data rate received (Kbps) */
336 int antenna; /* rx antenna */
337 int flags; /* status of associated skb */
338#define ATH_RX_FCS_ERROR 0x01
339#define ATH_RX_MIC_ERROR 0x02
340#define ATH_RX_DECRYPT_ERROR 0x04
341#define ATH_RX_RSSI_VALID 0x08
342/* if any of ctl,extn chainrssis are valid */
343#define ATH_RX_CHAIN_RSSI_VALID 0x10
344/* if extn chain rssis are valid */
345#define ATH_RX_RSSI_EXTN_VALID 0x20
346/* set if 40Mhz, clear if 20Mhz */
347#define ATH_RX_40MHZ 0x40
348/* set if short GI, clear if full GI */
349#define ATH_RX_SHORT_GI 0x80
350};
351
352struct ath_rxbuf {
353 struct sk_buff *rx_wbuf;
354 unsigned long rx_time; /* system time when received */
355 struct ath_recv_status rx_status; /* cached rx status */
356};
357
358/* Per-TID aggregate receiver state for a node */
359struct ath_arx_tid {
360 struct ath_node *an;
361 struct ath_rxbuf *rxbuf; /* re-ordering buffer */
362 struct timer_list timer;
363 spinlock_t tidlock;
364 int baw_head; /* seq_next at head */
365 int baw_tail; /* tail of block-ack window */
366 int seq_reset; /* need to reset start sequence */
367 int addba_exchangecomplete;
368 u16 seq_next; /* next expected sequence */
369 u16 baw_size; /* block-ack window size */
370};
371
372/* Per-node receiver aggregate state */
373struct ath_arx {
374 struct ath_arx_tid tid[WME_NUM_TID];
375};
376
377int ath_startrecv(struct ath_softc *sc);
378bool ath_stoprecv(struct ath_softc *sc);
379void ath_flushrecv(struct ath_softc *sc);
380u32 ath_calcrxfilter(struct ath_softc *sc);
381void ath_rx_node_init(struct ath_softc *sc, struct ath_node *an);
382void ath_rx_node_free(struct ath_softc *sc, struct ath_node *an);
383void ath_rx_node_cleanup(struct ath_softc *sc, struct ath_node *an);
384void ath_handle_rx_intr(struct ath_softc *sc);
385int ath_rx_init(struct ath_softc *sc, int nbufs);
386void ath_rx_cleanup(struct ath_softc *sc);
387int ath_rx_tasklet(struct ath_softc *sc, int flush);
388int ath_rx_input(struct ath_softc *sc,
389 struct ath_node *node,
390 int is_ampdu,
391 struct sk_buff *skb,
392 struct ath_recv_status *rx_status,
393 enum ATH_RX_TYPE *status);
394int ath__rx_indicate(struct ath_softc *sc,
395 struct sk_buff *skb,
396 struct ath_recv_status *status,
397 u16 keyix);
398int ath_rx_subframe(struct ath_node *an, struct sk_buff *skb,
399 struct ath_recv_status *status);
400
401/******/
402/* TX */
403/******/
404
405#define ATH_FRAG_PER_MSDU 1
406#define ATH_TXBUF (512/ATH_FRAG_PER_MSDU)
407/* max number of transmit attempts (tries) */
408#define ATH_TXMAXTRY 13
409/* max number of 11n transmit attempts (tries) */
410#define ATH_11N_TXMAXTRY 10
411/* max number of tries for management and control frames */
412#define ATH_MGT_TXMAXTRY 4
413#define WME_BA_BMP_SIZE 64
414#define WME_MAX_BA WME_BA_BMP_SIZE
415#define ATH_TID_MAX_BUFS (2 * WME_MAX_BA)
416#define TID_TO_WME_AC(_tid) \
417 ((((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE : \
418 (((_tid) == 1) || ((_tid) == 2)) ? WME_AC_BK : \
419 (((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \
420 WME_AC_VO)
421
422
423/* Wireless Multimedia Extension Defines */
424#define WME_AC_BE 0 /* best effort */
425#define WME_AC_BK 1 /* background */
426#define WME_AC_VI 2 /* video */
427#define WME_AC_VO 3 /* voice */
428#define WME_NUM_AC 4
429
430enum ATH_SM_PWRSAV{
431 ATH_SM_ENABLE,
432 ATH_SM_PWRSAV_STATIC,
433 ATH_SM_PWRSAV_DYNAMIC,
434};
435
436/*
437 * Data transmit queue state. One of these exists for each
438 * hardware transmit queue. Packets sent to us from above
439 * are assigned to queues based on their priority. Not all
440 * devices support a complete set of hardware transmit queues.
441 * For those devices the array sc_ac2q will map multiple
442 * priorities to fewer hardware queues (typically all to one
443 * hardware queue).
444 */
445struct ath_txq {
446 u32 axq_qnum; /* hardware q number */
447 u32 *axq_link; /* link ptr in last TX desc */
448 struct list_head axq_q; /* transmit queue */
449 spinlock_t axq_lock;
450 unsigned long axq_lockflags; /* intr state when must cli */
451 u32 axq_depth; /* queue depth */
452 u8 axq_aggr_depth; /* aggregates queued */
453 u32 axq_totalqueued; /* total ever queued */
454
455 /* count to determine if descriptor should generate int on this txq. */
456 u32 axq_intrcnt;
457
458 bool stopped; /* Is mac80211 queue stopped ? */
459 struct ath_buf *axq_linkbuf; /* virtual addr of last buffer*/
460
461 /* first desc of the last descriptor that contains CTS */
462 struct ath_desc *axq_lastdsWithCTS;
463
464 /* final desc of the gating desc that determines whether
465 lastdsWithCTS has been DMA'ed or not */
466 struct ath_desc *axq_gatingds;
467
468 struct list_head axq_acq;
469};
470
471/* per TID aggregate tx state for a destination */
472struct ath_atx_tid {
473 struct list_head list; /* round-robin tid entry */
474 struct list_head buf_q; /* pending buffers */
475 struct ath_node *an;
476 struct ath_atx_ac *ac;
477 struct ath_buf *tx_buf[ATH_TID_MAX_BUFS]; /* active tx frames */
478 u16 seq_start;
479 u16 seq_next;
480 u16 baw_size;
481 int tidno;
482 int baw_head; /* first un-acked tx buffer */
483 int baw_tail; /* next unused tx buffer slot */
484 int sched;
485 int paused;
486 int cleanup_inprogress;
487 u32 addba_exchangecomplete:1;
488 int32_t addba_exchangeinprogress;
489 int addba_exchangeattempts;
490};
491
492/* per access-category aggregate tx state for a destination */
493struct ath_atx_ac {
494 int sched; /* dest-ac is scheduled */
495 int qnum; /* H/W queue number associated
496 with this AC */
497 struct list_head list; /* round-robin txq entry */
498 struct list_head tid_q; /* queue of TIDs with buffers */
499};
500
501/* per dest tx state */
502struct ath_atx {
503 struct ath_atx_tid tid[WME_NUM_TID];
504 struct ath_atx_ac ac[WME_NUM_AC];
505};
506
507/* per-frame tx control block */
508struct ath_tx_control {
509 struct ath_node *an;
510 int if_id;
511 int qnum;
512 u32 ht:1;
513 u32 ps:1;
514 u32 use_minrate:1;
515 enum ath9k_pkt_type atype;
516 enum ath9k_key_type keytype;
517 u32 flags;
518 u16 seqno;
519 u16 tidno;
520 u16 txpower;
521 u16 frmlen;
522 u32 keyix;
523 int min_rate;
524 int mcast_rate;
525 u16 nextfraglen;
526 struct ath_softc *dev;
527 dma_addr_t dmacontext;
528};
529
530/* per frame tx status block */
531struct ath_xmit_status {
532 int retries; /* number of retries to successufully
533 transmit this frame */
534 int flags; /* status of transmit */
535#define ATH_TX_ERROR 0x01
536#define ATH_TX_XRETRY 0x02
537#define ATH_TX_BAR 0x04
538};
539
540struct ath_tx_stat {
541 int rssi; /* RSSI (noise floor ajusted) */
542 int rssictl[ATH_MAX_ANTENNA]; /* RSSI (noise floor ajusted) */
543 int rssiextn[ATH_MAX_ANTENNA]; /* RSSI (noise floor ajusted) */
544 int rateieee; /* data rate xmitted (IEEE rate code) */
545 int rateKbps; /* data rate xmitted (Kbps) */
546 int ratecode; /* phy rate code */
547 int flags; /* validity flags */
548/* if any of ctl,extn chain rssis are valid */
549#define ATH_TX_CHAIN_RSSI_VALID 0x01
550/* if extn chain rssis are valid */
551#define ATH_TX_RSSI_EXTN_VALID 0x02
552 u32 airtime; /* time on air per final tx rate */
553};
554
555struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype);
556void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq);
557int ath_tx_setup(struct ath_softc *sc, int haltype);
558void ath_draintxq(struct ath_softc *sc, bool retry_tx);
559void ath_tx_draintxq(struct ath_softc *sc,
560 struct ath_txq *txq, bool retry_tx);
561void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an);
562void ath_tx_node_cleanup(struct ath_softc *sc,
563 struct ath_node *an, bool bh_flag);
564void ath_tx_node_free(struct ath_softc *sc, struct ath_node *an);
565void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq);
566int ath_tx_init(struct ath_softc *sc, int nbufs);
567int ath_tx_cleanup(struct ath_softc *sc);
568int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype);
569int ath_txq_update(struct ath_softc *sc, int qnum,
570 struct ath9k_tx_queue_info *q);
571int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb);
572void ath_tx_tasklet(struct ath_softc *sc);
573u32 ath_txq_depth(struct ath_softc *sc, int qnum);
574u32 ath_txq_aggr_depth(struct ath_softc *sc, int qnum);
575void ath_notify_txq_status(struct ath_softc *sc, u16 queue_depth);
576void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
577 struct ath_xmit_status *tx_status, struct ath_node *an);
578
579/**********************/
580/* Node / Aggregation */
581/**********************/
582
583/* indicates the node is clened up */
584#define ATH_NODE_CLEAN 0x1
585/* indicates the node is 80211 power save */
586#define ATH_NODE_PWRSAVE 0x2
587
588#define ADDBA_TIMEOUT 200 /* 200 milliseconds */
589#define ADDBA_EXCHANGE_ATTEMPTS 10
590#define ATH_AGGR_DELIM_SZ 4 /* delimiter size */
591#define ATH_AGGR_MINPLEN 256 /* in bytes, minimum packet length */
592/* number of delimiters for encryption padding */
593#define ATH_AGGR_ENCRYPTDELIM 10
594/* minimum h/w qdepth to be sustained to maximize aggregation */
595#define ATH_AGGR_MIN_QDEPTH 2
596#define ATH_AMPDU_SUBFRAME_DEFAULT 32
597#define IEEE80211_SEQ_SEQ_SHIFT 4
598#define IEEE80211_SEQ_MAX 4096
599#define IEEE80211_MIN_AMPDU_BUF 0x8
600
601/* return whether a bit at index _n in bitmap _bm is set
602 * _sz is the size of the bitmap */
603#define ATH_BA_ISSET(_bm, _n) (((_n) < (WME_BA_BMP_SIZE)) && \
604 ((_bm)[(_n) >> 5] & (1 << ((_n) & 31))))
605
606/* return block-ack bitmap index given sequence and starting sequence */
607#define ATH_BA_INDEX(_st, _seq) (((_seq) - (_st)) & (IEEE80211_SEQ_MAX - 1))
608
609/* returns delimiter padding required given the packet length */
610#define ATH_AGGR_GET_NDELIM(_len) \
611 (((((_len) + ATH_AGGR_DELIM_SZ) < ATH_AGGR_MINPLEN) ? \
612 (ATH_AGGR_MINPLEN - (_len) - ATH_AGGR_DELIM_SZ) : 0) >> 2)
613
614#define BAW_WITHIN(_start, _bawsz, _seqno) \
615 ((((_seqno) - (_start)) & 4095) < (_bawsz))
616
617#define ATH_DS_BA_SEQ(_ds) ((_ds)->ds_us.tx.ts_seqnum)
618#define ATH_DS_BA_BITMAP(_ds) (&(_ds)->ds_us.tx.ba_low)
619#define ATH_DS_TX_BA(_ds) ((_ds)->ds_us.tx.ts_flags & ATH9K_TX_BA)
620#define ATH_AN_2_TID(_an, _tidno) (&(_an)->an_aggr.tx.tid[(_tidno)])
621
622enum ATH_AGGR_STATUS {
623 ATH_AGGR_DONE,
624 ATH_AGGR_BAW_CLOSED,
625 ATH_AGGR_LIMITED,
626 ATH_AGGR_SHORTPKT,
627 ATH_AGGR_8K_LIMITED,
628};
629
630enum ATH_AGGR_CHECK {
631 AGGR_NOT_REQUIRED,
632 AGGR_REQUIRED,
633 AGGR_CLEANUP_PROGRESS,
634 AGGR_EXCHANGE_PROGRESS,
635 AGGR_EXCHANGE_DONE
636};
637
638struct aggr_rifs_param {
639 int param_max_frames;
640 int param_max_len;
641 int param_rl;
642 int param_al;
643 struct ath_rc_series *param_rcs;
644};
645
646/* Per-node aggregation state */
647struct ath_node_aggr {
648 struct ath_atx tx; /* node transmit state */
649 struct ath_arx rx; /* node receive state */
650};
651
652/* driver-specific node state */
653struct ath_node {
654 struct list_head list;
655 struct ath_softc *an_sc;
656 atomic_t an_refcnt;
657 struct ath_chainmask_sel an_chainmask_sel;
658 struct ath_node_aggr an_aggr;
659 u8 an_smmode; /* SM Power save mode */
660 u8 an_flags;
661 u8 an_addr[ETH_ALEN];
662};
663
664void ath_tx_resume_tid(struct ath_softc *sc,
665 struct ath_atx_tid *tid);
666enum ATH_AGGR_CHECK ath_tx_aggr_check(struct ath_softc *sc,
667 struct ath_node *an, u8 tidno);
668void ath_tx_aggr_teardown(struct ath_softc *sc,
669 struct ath_node *an, u8 tidno);
670void ath_rx_aggr_teardown(struct ath_softc *sc,
671 struct ath_node *an, u8 tidno);
672int ath_rx_aggr_start(struct ath_softc *sc,
673 const u8 *addr,
674 u16 tid,
675 u16 *ssn);
676int ath_rx_aggr_stop(struct ath_softc *sc,
677 const u8 *addr,
678 u16 tid);
679int ath_tx_aggr_start(struct ath_softc *sc,
680 const u8 *addr,
681 u16 tid,
682 u16 *ssn);
683int ath_tx_aggr_stop(struct ath_softc *sc,
684 const u8 *addr,
685 u16 tid);
686void ath_newassoc(struct ath_softc *sc,
687 struct ath_node *node, int isnew, int isuapsd);
688struct ath_node *ath_node_attach(struct ath_softc *sc,
689 u8 addr[ETH_ALEN], int if_id);
690void ath_node_detach(struct ath_softc *sc, struct ath_node *an, bool bh_flag);
691struct ath_node *ath_node_get(struct ath_softc *sc, u8 addr[ETH_ALEN]);
692void ath_node_put(struct ath_softc *sc, struct ath_node *an, bool bh_flag);
693struct ath_node *ath_node_find(struct ath_softc *sc, u8 *addr);
694
695/*******************/
696/* Beacon Handling */
697/*******************/
698
699/*
700 * Regardless of the number of beacons we stagger, (i.e. regardless of the
701 * number of BSSIDs) if a given beacon does not go out even after waiting this
702 * number of beacon intervals, the game's up.
703 */
704#define BSTUCK_THRESH (9 * ATH_BCBUF)
705#define ATH_BCBUF 4 /* number of beacon buffers */
706#define ATH_DEFAULT_BINTVAL 100 /* default beacon interval in TU */
707#define ATH_DEFAULT_BMISS_LIMIT 10
708#define ATH_BEACON_AIFS_DEFAULT 0 /* Default aifs for ap beacon q */
709#define ATH_BEACON_CWMIN_DEFAULT 0 /* Default cwmin for ap beacon q */
710#define ATH_BEACON_CWMAX_DEFAULT 0 /* Default cwmax for ap beacon q */
711#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024)
712
713/* beacon configuration */
714struct ath_beacon_config {
715 u16 beacon_interval;
716 u16 listen_interval;
717 u16 dtim_period;
718 u16 bmiss_timeout;
719 u8 dtim_count;
720 u8 tim_offset;
721 union {
722 u64 last_tsf;
723 u8 last_tstamp[8];
724 } u; /* last received beacon/probe response timestamp of this BSS. */
725};
726
727/* offsets in a beacon frame for
728 * quick acess of beacon content by low-level driver */
729struct ath_beacon_offset {
730 u8 *bo_tim; /* start of atim/dtim */
731};
732
733void ath9k_beacon_tasklet(unsigned long data);
734void ath_beacon_config(struct ath_softc *sc, int if_id);
735int ath_beaconq_setup(struct ath_hal *ah);
736int ath_beacon_alloc(struct ath_softc *sc, int if_id);
737void ath_bstuck_process(struct ath_softc *sc);
738void ath_beacon_tasklet(struct ath_softc *sc, int *needmark);
739void ath_beacon_free(struct ath_softc *sc);
740void ath_beacon_return(struct ath_softc *sc, struct ath_vap *avp);
741void ath_beacon_sync(struct ath_softc *sc, int if_id);
742void ath_update_beacon_info(struct ath_softc *sc, int avgbrssi);
743void ath_get_beaconconfig(struct ath_softc *sc,
744 int if_id,
745 struct ath_beacon_config *conf);
746int ath_update_beacon(struct ath_softc *sc,
747 int if_id,
748 struct ath_beacon_offset *bo,
749 struct sk_buff *skb,
750 int mcast);
751/********/
752/* VAPs */
753/********/
754
755/*
756 * Define the scheme that we select MAC address for multiple
757 * BSS on the same radio. The very first VAP will just use the MAC
758 * address from the EEPROM. For the next 3 VAPs, we set the
759 * U/L bit (bit 1) in MAC address, and use the next two bits as the
760 * index of the VAP.
761 */
762
763#define ATH_SET_VAP_BSSID_MASK(bssid_mask) \
764 ((bssid_mask)[0] &= ~(((ATH_BCBUF-1)<<2)|0x02))
765
766/* VAP configuration (from protocol layer) */
767struct ath_vap_config {
768 u32 av_fixed_rateset;
769 u32 av_fixed_retryset;
770};
771
772/* driver-specific vap state */
773struct ath_vap {
774 struct ieee80211_vif *av_if_data;
775 enum ath9k_opmode av_opmode; /* VAP operational mode */
776 struct ath_buf *av_bcbuf; /* beacon buffer */
777 struct ath_beacon_offset av_boff; /* dynamic update state */
778 struct ath_tx_control av_btxctl; /* txctl information for beacon */
779 int av_bslot; /* beacon slot index */
780 struct ath_txq av_mcastq; /* multicast transmit queue */
781 struct ath_vap_config av_config;/* vap configuration parameters*/
782 struct ath_rate_node *rc_node;
783};
784
785int ath_vap_attach(struct ath_softc *sc,
786 int if_id,
787 struct ieee80211_vif *if_data,
788 enum ath9k_opmode opmode);
789int ath_vap_detach(struct ath_softc *sc, int if_id);
790int ath_vap_config(struct ath_softc *sc,
791 int if_id, struct ath_vap_config *if_config);
792int ath_vap_listen(struct ath_softc *sc, int if_id);
793
794/*********************/
795/* Antenna diversity */
796/*********************/
797
798#define ATH_ANT_DIV_MAX_CFG 2
799#define ATH_ANT_DIV_MIN_IDLE_US 1000000 /* us */
800#define ATH_ANT_DIV_MIN_SCAN_US 50000 /* us */
801
802enum ATH_ANT_DIV_STATE{
803 ATH_ANT_DIV_IDLE,
804 ATH_ANT_DIV_SCAN, /* evaluating antenna */
805};
806
807struct ath_antdiv {
808 struct ath_softc *antdiv_sc;
809 u8 antdiv_start;
810 enum ATH_ANT_DIV_STATE antdiv_state;
811 u8 antdiv_num_antcfg;
812 u8 antdiv_curcfg;
813 u8 antdiv_bestcfg;
814 int32_t antdivf_rssitrig;
815 int32_t antdiv_lastbrssi[ATH_ANT_DIV_MAX_CFG];
816 u64 antdiv_lastbtsf[ATH_ANT_DIV_MAX_CFG];
817 u64 antdiv_laststatetsf;
818 u8 antdiv_bssid[ETH_ALEN];
819};
820
821void ath_slow_ant_div_init(struct ath_antdiv *antdiv,
822 struct ath_softc *sc, int32_t rssitrig);
823void ath_slow_ant_div_start(struct ath_antdiv *antdiv,
824 u8 num_antcfg,
825 const u8 *bssid);
826void ath_slow_ant_div_stop(struct ath_antdiv *antdiv);
827void ath_slow_ant_div(struct ath_antdiv *antdiv,
828 struct ieee80211_hdr *wh,
829 struct ath_rx_status *rx_stats);
830void ath_setdefantenna(void *sc, u32 antenna);
831
832/********************/
833/* Main driver core */
834/********************/
835
836/*
837 * Default cache line size, in bytes.
838 * Used when PCI device not fully initialized by bootrom/BIOS
839*/
840#define DEFAULT_CACHELINE 32
841#define ATH_DEFAULT_NOISE_FLOOR -95
842#define ATH_REGCLASSIDS_MAX 10
843#define ATH_CABQ_READY_TIME 80 /* % of beacon interval */
844#define ATH_PREAMBLE_SHORT (1<<0)
845#define ATH_PROTECT_ENABLE (1<<1)
846#define ATH_MAX_SW_RETRIES 10
847/* Num farmes difference in tx to flip default recv */
848#define ATH_ANTENNA_DIFF 2
849#define ATH_CHAN_MAX 255
850#define IEEE80211_WEP_NKID 4 /* number of key ids */
851#define IEEE80211_RATE_VAL 0x7f
852/*
853 * The key cache is used for h/w cipher state and also for
854 * tracking station state such as the current tx antenna.
855 * We also setup a mapping table between key cache slot indices
856 * and station state to short-circuit node lookups on rx.
857 * Different parts have different size key caches. We handle
858 * up to ATH_KEYMAX entries (could dynamically allocate state).
859 */
860#define ATH_KEYMAX 128 /* max key cache size we handle */
861
862#define RESET_RETRY_TXQ 0x00000001
863#define ATH_IF_ID_ANY 0xff
864
865#define ATH_TXPOWER_MAX 100 /* .5 dBm units */
866
867#define RSSI_LPF_THRESHOLD -20
868#define ATH_RSSI_EP_MULTIPLIER (1<<7) /* pow2 to optimize out * and / */
869#define ATH_RATE_DUMMY_MARKER 0
870#define ATH_RSSI_LPF_LEN 10
871#define ATH_RSSI_DUMMY_MARKER 0x127
872
873#define ATH_EP_MUL(x, mul) ((x) * (mul))
874#define ATH_EP_RND(x, mul) \
875 ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul))
876#define ATH_RSSI_OUT(x) \
877 (((x) != ATH_RSSI_DUMMY_MARKER) ? \
878 (ATH_EP_RND((x), ATH_RSSI_EP_MULTIPLIER)) : ATH_RSSI_DUMMY_MARKER)
879#define ATH_RSSI_IN(x) \
880 (ATH_EP_MUL((x), ATH_RSSI_EP_MULTIPLIER))
881#define ATH_LPF_RSSI(x, y, len) \
882 ((x != ATH_RSSI_DUMMY_MARKER) ? \
883 (((x) * ((len) - 1) + (y)) / (len)) : (y))
884#define ATH_RSSI_LPF(x, y) do { \
885 if ((y) >= RSSI_LPF_THRESHOLD) \
886 x = ATH_LPF_RSSI((x), \
887 ATH_RSSI_IN((y)), ATH_RSSI_LPF_LEN); \
888 } while (0)
889
890
891enum PROT_MODE {
892 PROT_M_NONE = 0,
893 PROT_M_RTSCTS,
894 PROT_M_CTSONLY
895};
896
897enum RATE_TYPE {
898 NORMAL_RATE = 0,
899 HALF_RATE,
900 QUARTER_RATE
901};
902
903struct ath_ht_info {
904 enum ath9k_ht_macmode tx_chan_width;
905 u16 maxampdu;
906 u8 mpdudensity;
907 u8 ext_chan_offset;
908};
909
910struct ath_softc {
911 struct ieee80211_hw *hw;
912 struct pci_dev *pdev;
913 void __iomem *mem;
914 struct tasklet_struct intr_tq;
915 struct tasklet_struct bcon_tasklet;
916 struct ath_config sc_config; /* load-time parameters */
917 int sc_debug;
918 struct ath_hal *sc_ah;
919 struct ath_rate_softc *sc_rc; /* tx rate control support */
920 u32 sc_intrstatus;
921 enum ath9k_opmode sc_opmode; /* current operating mode */
922
923 u8 sc_invalid; /* being detached */
924 u8 sc_beacons; /* beacons running */
925 u8 sc_scanning; /* scanning active */
926 u8 sc_txaggr; /* enable 11n tx aggregation */
927 u8 sc_rxaggr; /* enable 11n rx aggregation */
928 u8 sc_update_chainmask; /* change chain mask */
929 u8 sc_full_reset; /* force full reset */
930 enum wireless_mode sc_curmode; /* current phy mode */
931 u16 sc_curtxpow;
932 u16 sc_curaid;
933 u8 sc_curbssid[ETH_ALEN];
934 u8 sc_myaddr[ETH_ALEN];
935 enum PROT_MODE sc_protmode;
936 u8 sc_mcastantenna;
937 u8 sc_txantenna; /* data tx antenna (fixed or auto) */
938 u8 sc_nbcnvaps; /* # of vaps sending beacons */
939 u16 sc_nvaps; /* # of active virtual ap's */
940 struct ath_vap *sc_vaps[ATH_BCBUF];
941 enum ath9k_int sc_imask;
942 u8 sc_bssidmask[ETH_ALEN];
943 u8 sc_defant; /* current default antenna */
944 u8 sc_rxotherant; /* rx's on non-default antenna */
945 u16 sc_cachelsz;
946 int sc_slotupdate; /* slot to next advance fsm */
947 int sc_slottime;
948 u8 sc_noreset;
949 int sc_bslot[ATH_BCBUF];
950 struct ath9k_node_stats sc_halstats; /* station-mode rssi stats */
951 struct list_head node_list;
952 struct ath_ht_info sc_ht_info;
953 int16_t sc_noise_floor; /* signal noise floor in dBm */
954 enum ath9k_ht_extprotspacing sc_ht_extprotspacing;
955 u8 sc_tx_chainmask;
956 u8 sc_rx_chainmask;
957 u8 sc_rxchaindetect_ref;
958 u8 sc_rxchaindetect_thresh5GHz;
959 u8 sc_rxchaindetect_thresh2GHz;
960 u8 sc_rxchaindetect_delta5GHz;
961 u8 sc_rxchaindetect_delta2GHz;
962 u32 sc_rtsaggrlimit; /* Chipset specific aggr limit */
963 u32 sc_flags;
964#ifdef CONFIG_SLOW_ANT_DIV
965 struct ath_antdiv sc_antdiv;
966#endif
967 enum {
968 OK, /* no change needed */
969 UPDATE, /* update pending */
970 COMMIT /* beacon sent, commit change */
971 } sc_updateslot; /* slot time update fsm */
972
973 /* Crypto */
974 u32 sc_keymax; /* size of key cache */
975 DECLARE_BITMAP(sc_keymap, ATH_KEYMAX); /* key use bit map */
976 u8 sc_splitmic; /* split TKIP MIC keys */
977 int sc_keytype;
978
979 /* RX */
980 struct list_head sc_rxbuf;
981 struct ath_descdma sc_rxdma;
982 int sc_rxbufsize; /* rx size based on mtu */
983 u32 *sc_rxlink; /* link ptr in last RX desc */
984 u32 sc_rxflush; /* rx flush in progress */
985 u64 sc_lastrx; /* tsf of last rx'd frame */
986
987 /* TX */
988 struct list_head sc_txbuf;
989 struct ath_txq sc_txq[ATH9K_NUM_TX_QUEUES];
990 struct ath_descdma sc_txdma;
991 u32 sc_txqsetup;
992 u32 sc_txintrperiod; /* tx interrupt batching */
993 int sc_haltype2q[ATH9K_WME_AC_VO+1]; /* HAL WME AC -> h/w qnum */
994 u32 sc_ant_tx[8]; /* recent tx frames/antenna */
995
996 /* Beacon */
997 struct ath9k_tx_queue_info sc_beacon_qi;
998 struct ath_descdma sc_bdma;
999 struct ath_txq *sc_cabq;
1000 struct list_head sc_bbuf;
1001 u32 sc_bhalq;
1002 u32 sc_bmisscount;
1003 u32 ast_be_xmit; /* beacons transmitted */
1004
1005 /* Rate */
1006 struct ieee80211_rate rates[IEEE80211_NUM_BANDS][ATH_RATE_MAX];
1007 const struct ath9k_rate_table *sc_currates;
1008 u8 sc_rixmap[256]; /* IEEE to h/w rate table ix */
1009 u8 sc_protrix; /* protection rate index */
1010 struct {
1011 u32 rateKbps; /* transfer rate in kbs */
1012 u8 ieeerate; /* IEEE rate */
1013 } sc_hwmap[256]; /* h/w rate ix mappings */
1014
1015 /* Channel, Band */
1016 struct ieee80211_channel channels[IEEE80211_NUM_BANDS][ATH_CHAN_MAX];
1017 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
1018 struct ath9k_channel sc_curchan;
1019
1020 /* Locks */
1021 spinlock_t sc_rxflushlock;
1022 spinlock_t sc_rxbuflock;
1023 spinlock_t sc_txbuflock;
1024 spinlock_t sc_resetlock;
1025 spinlock_t node_lock;
1026};
1027
1028int ath_init(u16 devid, struct ath_softc *sc);
1029void ath_deinit(struct ath_softc *sc);
1030int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan);
1031int ath_suspend(struct ath_softc *sc);
1032irqreturn_t ath_isr(int irq, void *dev);
1033int ath_reset(struct ath_softc *sc);
1034void ath_scan_start(struct ath_softc *sc);
1035void ath_scan_end(struct ath_softc *sc);
1036int ath_set_channel(struct ath_softc *sc, struct ath9k_channel *hchan);
1037void ath_setup_rate(struct ath_softc *sc,
1038 enum wireless_mode wMode,
1039 enum RATE_TYPE type,
1040 const struct ath9k_rate_table *rt);
1041
1042/*********************/
1043/* Utility Functions */
1044/*********************/
1045
1046void ath_key_reset(struct ath_softc *sc, u16 keyix, int freeslot);
1047int ath_keyset(struct ath_softc *sc,
1048 u16 keyix,
1049 struct ath9k_keyval *hk,
1050 const u8 mac[ETH_ALEN]);
1051int ath_get_hal_qnum(u16 queue, struct ath_softc *sc);
1052int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc);
1053void ath_setslottime(struct ath_softc *sc);
1054void ath_update_txpow(struct ath_softc *sc);
1055int ath_cabq_update(struct ath_softc *);
1056void ath_get_currentCountry(struct ath_softc *sc,
1057 struct ath9k_country_entry *ctry);
1058u64 ath_extend_tsf(struct ath_softc *sc, u32 rstamp);
1059void ath_internal_reset(struct ath_softc *sc);
1060u32 ath_chan2flags(struct ieee80211_channel *chan, struct ath_softc *sc);
1061dma_addr_t ath_skb_map_single(struct ath_softc *sc,
1062 struct sk_buff *skb,
1063 int direction,
1064 dma_addr_t *pa);
1065void ath_skb_unmap_single(struct ath_softc *sc,
1066 struct sk_buff *skb,
1067 int direction,
1068 dma_addr_t *pa);
1069void ath_mcast_merge(struct ath_softc *sc, u32 mfilt[2]);
1070enum ath9k_ht_macmode ath_cwm_macmode(struct ath_softc *sc);
1071
1072#endif /* CORE_H */
diff --git a/drivers/net/wireless/ath9k/hw.c b/drivers/net/wireless/ath9k/hw.c
new file mode 100644
index 000000000000..bde162f128ab
--- /dev/null
+++ b/drivers/net/wireless/ath9k/hw.c
@@ -0,0 +1,8571 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/io.h>
18#include <asm/unaligned.h>
19
20#include "core.h"
21#include "hw.h"
22#include "reg.h"
23#include "phy.h"
24#include "initvals.h"
25
26static void ath9k_hw_iqcal_collect(struct ath_hal *ah);
27static void ath9k_hw_iqcalibrate(struct ath_hal *ah, u8 numChains);
28static void ath9k_hw_adc_gaincal_collect(struct ath_hal *ah);
29static void ath9k_hw_adc_gaincal_calibrate(struct ath_hal *ah,
30 u8 numChains);
31static void ath9k_hw_adc_dccal_collect(struct ath_hal *ah);
32static void ath9k_hw_adc_dccal_calibrate(struct ath_hal *ah,
33 u8 numChains);
34
35static const u8 CLOCK_RATE[] = { 40, 80, 22, 44, 88, 40 };
36static const int16_t NOISE_FLOOR[] = { -96, -93, -98, -96, -93, -96 };
37
38static const struct hal_percal_data iq_cal_multi_sample = {
39 IQ_MISMATCH_CAL,
40 MAX_CAL_SAMPLES,
41 PER_MIN_LOG_COUNT,
42 ath9k_hw_iqcal_collect,
43 ath9k_hw_iqcalibrate
44};
45static const struct hal_percal_data iq_cal_single_sample = {
46 IQ_MISMATCH_CAL,
47 MIN_CAL_SAMPLES,
48 PER_MAX_LOG_COUNT,
49 ath9k_hw_iqcal_collect,
50 ath9k_hw_iqcalibrate
51};
52static const struct hal_percal_data adc_gain_cal_multi_sample = {
53 ADC_GAIN_CAL,
54 MAX_CAL_SAMPLES,
55 PER_MIN_LOG_COUNT,
56 ath9k_hw_adc_gaincal_collect,
57 ath9k_hw_adc_gaincal_calibrate
58};
59static const struct hal_percal_data adc_gain_cal_single_sample = {
60 ADC_GAIN_CAL,
61 MIN_CAL_SAMPLES,
62 PER_MAX_LOG_COUNT,
63 ath9k_hw_adc_gaincal_collect,
64 ath9k_hw_adc_gaincal_calibrate
65};
66static const struct hal_percal_data adc_dc_cal_multi_sample = {
67 ADC_DC_CAL,
68 MAX_CAL_SAMPLES,
69 PER_MIN_LOG_COUNT,
70 ath9k_hw_adc_dccal_collect,
71 ath9k_hw_adc_dccal_calibrate
72};
73static const struct hal_percal_data adc_dc_cal_single_sample = {
74 ADC_DC_CAL,
75 MIN_CAL_SAMPLES,
76 PER_MAX_LOG_COUNT,
77 ath9k_hw_adc_dccal_collect,
78 ath9k_hw_adc_dccal_calibrate
79};
80static const struct hal_percal_data adc_init_dc_cal = {
81 ADC_DC_INIT_CAL,
82 MIN_CAL_SAMPLES,
83 INIT_LOG_COUNT,
84 ath9k_hw_adc_dccal_collect,
85 ath9k_hw_adc_dccal_calibrate
86};
87
88static const struct ath_hal ar5416hal = {
89 AR5416_MAGIC,
90 0,
91 0,
92 NULL,
93 NULL,
94 CTRY_DEFAULT,
95 0,
96 0,
97 0,
98 0,
99 0,
100 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 },
109};
110
111static struct ath9k_rate_table ar5416_11a_table = {
112 8,
113 {0},
114 {
115 {true, PHY_OFDM, 6000, 0x0b, 0x00, (0x80 | 12), 0},
116 {true, PHY_OFDM, 9000, 0x0f, 0x00, 18, 0},
117 {true, PHY_OFDM, 12000, 0x0a, 0x00, (0x80 | 24), 2},
118 {true, PHY_OFDM, 18000, 0x0e, 0x00, 36, 2},
119 {true, PHY_OFDM, 24000, 0x09, 0x00, (0x80 | 48), 4},
120 {true, PHY_OFDM, 36000, 0x0d, 0x00, 72, 4},
121 {true, PHY_OFDM, 48000, 0x08, 0x00, 96, 4},
122 {true, PHY_OFDM, 54000, 0x0c, 0x00, 108, 4}
123 },
124};
125
126static struct ath9k_rate_table ar5416_11b_table = {
127 4,
128 {0},
129 {
130 {true, PHY_CCK, 1000, 0x1b, 0x00, (0x80 | 2), 0},
131 {true, PHY_CCK, 2000, 0x1a, 0x04, (0x80 | 4), 1},
132 {true, PHY_CCK, 5500, 0x19, 0x04, (0x80 | 11), 1},
133 {true, PHY_CCK, 11000, 0x18, 0x04, (0x80 | 22), 1}
134 },
135};
136
137static struct ath9k_rate_table ar5416_11g_table = {
138 12,
139 {0},
140 {
141 {true, PHY_CCK, 1000, 0x1b, 0x00, (0x80 | 2), 0},
142 {true, PHY_CCK, 2000, 0x1a, 0x04, (0x80 | 4), 1},
143 {true, PHY_CCK, 5500, 0x19, 0x04, (0x80 | 11), 2},
144 {true, PHY_CCK, 11000, 0x18, 0x04, (0x80 | 22), 3},
145
146 {false, PHY_OFDM, 6000, 0x0b, 0x00, 12, 4},
147 {false, PHY_OFDM, 9000, 0x0f, 0x00, 18, 4},
148 {true, PHY_OFDM, 12000, 0x0a, 0x00, 24, 6},
149 {true, PHY_OFDM, 18000, 0x0e, 0x00, 36, 6},
150 {true, PHY_OFDM, 24000, 0x09, 0x00, 48, 8},
151 {true, PHY_OFDM, 36000, 0x0d, 0x00, 72, 8},
152 {true, PHY_OFDM, 48000, 0x08, 0x00, 96, 8},
153 {true, PHY_OFDM, 54000, 0x0c, 0x00, 108, 8}
154 },
155};
156
157static struct ath9k_rate_table ar5416_11ng_table = {
158 28,
159 {0},
160 {
161 {true, PHY_CCK, 1000, 0x1b, 0x00, (0x80 | 2), 0},
162 {true, PHY_CCK, 2000, 0x1a, 0x04, (0x80 | 4), 1},
163 {true, PHY_CCK, 5500, 0x19, 0x04, (0x80 | 11), 2},
164 {true, PHY_CCK, 11000, 0x18, 0x04, (0x80 | 22), 3},
165
166 {false, PHY_OFDM, 6000, 0x0b, 0x00, 12, 4},
167 {false, PHY_OFDM, 9000, 0x0f, 0x00, 18, 4},
168 {true, PHY_OFDM, 12000, 0x0a, 0x00, 24, 6},
169 {true, PHY_OFDM, 18000, 0x0e, 0x00, 36, 6},
170 {true, PHY_OFDM, 24000, 0x09, 0x00, 48, 8},
171 {true, PHY_OFDM, 36000, 0x0d, 0x00, 72, 8},
172 {true, PHY_OFDM, 48000, 0x08, 0x00, 96, 8},
173 {true, PHY_OFDM, 54000, 0x0c, 0x00, 108, 8},
174 {true, PHY_HT, 6500, 0x80, 0x00, 0, 4},
175 {true, PHY_HT, 13000, 0x81, 0x00, 1, 6},
176 {true, PHY_HT, 19500, 0x82, 0x00, 2, 6},
177 {true, PHY_HT, 26000, 0x83, 0x00, 3, 8},
178 {true, PHY_HT, 39000, 0x84, 0x00, 4, 8},
179 {true, PHY_HT, 52000, 0x85, 0x00, 5, 8},
180 {true, PHY_HT, 58500, 0x86, 0x00, 6, 8},
181 {true, PHY_HT, 65000, 0x87, 0x00, 7, 8},
182 {true, PHY_HT, 13000, 0x88, 0x00, 8, 4},
183 {true, PHY_HT, 26000, 0x89, 0x00, 9, 6},
184 {true, PHY_HT, 39000, 0x8a, 0x00, 10, 6},
185 {true, PHY_HT, 52000, 0x8b, 0x00, 11, 8},
186 {true, PHY_HT, 78000, 0x8c, 0x00, 12, 8},
187 {true, PHY_HT, 104000, 0x8d, 0x00, 13, 8},
188 {true, PHY_HT, 117000, 0x8e, 0x00, 14, 8},
189 {true, PHY_HT, 130000, 0x8f, 0x00, 15, 8},
190 },
191};
192
193static struct ath9k_rate_table ar5416_11na_table = {
194 24,
195 {0},
196 {
197 {true, PHY_OFDM, 6000, 0x0b, 0x00, (0x80 | 12), 0},
198 {true, PHY_OFDM, 9000, 0x0f, 0x00, 18, 0},
199 {true, PHY_OFDM, 12000, 0x0a, 0x00, (0x80 | 24), 2},
200 {true, PHY_OFDM, 18000, 0x0e, 0x00, 36, 2},
201 {true, PHY_OFDM, 24000, 0x09, 0x00, (0x80 | 48), 4},
202 {true, PHY_OFDM, 36000, 0x0d, 0x00, 72, 4},
203 {true, PHY_OFDM, 48000, 0x08, 0x00, 96, 4},
204 {true, PHY_OFDM, 54000, 0x0c, 0x00, 108, 4},
205 {true, PHY_HT, 6500, 0x80, 0x00, 0, 0},
206 {true, PHY_HT, 13000, 0x81, 0x00, 1, 2},
207 {true, PHY_HT, 19500, 0x82, 0x00, 2, 2},
208 {true, PHY_HT, 26000, 0x83, 0x00, 3, 4},
209 {true, PHY_HT, 39000, 0x84, 0x00, 4, 4},
210 {true, PHY_HT, 52000, 0x85, 0x00, 5, 4},
211 {true, PHY_HT, 58500, 0x86, 0x00, 6, 4},
212 {true, PHY_HT, 65000, 0x87, 0x00, 7, 4},
213 {true, PHY_HT, 13000, 0x88, 0x00, 8, 0},
214 {true, PHY_HT, 26000, 0x89, 0x00, 9, 2},
215 {true, PHY_HT, 39000, 0x8a, 0x00, 10, 2},
216 {true, PHY_HT, 52000, 0x8b, 0x00, 11, 4},
217 {true, PHY_HT, 78000, 0x8c, 0x00, 12, 4},
218 {true, PHY_HT, 104000, 0x8d, 0x00, 13, 4},
219 {true, PHY_HT, 117000, 0x8e, 0x00, 14, 4},
220 {true, PHY_HT, 130000, 0x8f, 0x00, 15, 4},
221 },
222};
223
224static enum wireless_mode ath9k_hw_chan2wmode(struct ath_hal *ah,
225 const struct ath9k_channel *chan)
226{
227 if (IS_CHAN_CCK(chan))
228 return ATH9K_MODE_11A;
229 if (IS_CHAN_G(chan))
230 return ATH9K_MODE_11G;
231 return ATH9K_MODE_11A;
232}
233
234static bool ath9k_hw_wait(struct ath_hal *ah,
235 u32 reg,
236 u32 mask,
237 u32 val)
238{
239 int i;
240
241 for (i = 0; i < (AH_TIMEOUT / AH_TIME_QUANTUM); i++) {
242 if ((REG_READ(ah, reg) & mask) == val)
243 return true;
244
245 udelay(AH_TIME_QUANTUM);
246 }
247 DPRINTF(ah->ah_sc, ATH_DBG_PHY_IO,
248 "%s: timeout on reg 0x%x: 0x%08x & 0x%08x != 0x%08x\n",
249 __func__, reg, REG_READ(ah, reg), mask, val);
250 return false;
251}
252
253static bool ath9k_hw_eeprom_read(struct ath_hal *ah, u32 off,
254 u16 *data)
255{
256 (void) REG_READ(ah, AR5416_EEPROM_OFFSET + (off << AR5416_EEPROM_S));
257
258 if (!ath9k_hw_wait(ah,
259 AR_EEPROM_STATUS_DATA,
260 AR_EEPROM_STATUS_DATA_BUSY |
261 AR_EEPROM_STATUS_DATA_PROT_ACCESS, 0)) {
262 return false;
263 }
264
265 *data = MS(REG_READ(ah, AR_EEPROM_STATUS_DATA),
266 AR_EEPROM_STATUS_DATA_VAL);
267
268 return true;
269}
270
271static int ath9k_hw_flash_map(struct ath_hal *ah)
272{
273 struct ath_hal_5416 *ahp = AH5416(ah);
274
275 ahp->ah_cal_mem = ioremap(AR5416_EEPROM_START_ADDR, AR5416_EEPROM_MAX);
276
277 if (!ahp->ah_cal_mem) {
278 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
279 "%s: cannot remap eeprom region \n", __func__);
280 return -EIO;
281 }
282
283 return 0;
284}
285
286static bool ath9k_hw_flash_read(struct ath_hal *ah, u32 off,
287 u16 *data)
288{
289 struct ath_hal_5416 *ahp = AH5416(ah);
290
291 *data = ioread16(ahp->ah_cal_mem + off);
292 return true;
293}
294
295static void ath9k_hw_read_revisions(struct ath_hal *ah)
296{
297 u32 val;
298
299 val = REG_READ(ah, AR_SREV) & AR_SREV_ID;
300
301 if (val == 0xFF) {
302 val = REG_READ(ah, AR_SREV);
303
304 ah->ah_macVersion =
305 (val & AR_SREV_VERSION2) >> AR_SREV_TYPE2_S;
306
307 ah->ah_macRev = MS(val, AR_SREV_REVISION2);
308 ah->ah_isPciExpress =
309 (val & AR_SREV_TYPE2_HOST_MODE) ? 0 : 1;
310
311 } else {
312 if (!AR_SREV_9100(ah))
313 ah->ah_macVersion = MS(val, AR_SREV_VERSION);
314
315 ah->ah_macRev = val & AR_SREV_REVISION;
316
317 if (ah->ah_macVersion == AR_SREV_VERSION_5416_PCIE)
318 ah->ah_isPciExpress = true;
319 }
320}
321
322u32 ath9k_hw_reverse_bits(u32 val, u32 n)
323{
324 u32 retval;
325 int i;
326
327 for (i = 0, retval = 0; i < n; i++) {
328 retval = (retval << 1) | (val & 1);
329 val >>= 1;
330 }
331 return retval;
332}
333
334static void ath9k_hw_set_defaults(struct ath_hal *ah)
335{
336 int i;
337
338 ah->ah_config.dma_beacon_response_time = 2;
339 ah->ah_config.sw_beacon_response_time = 10;
340 ah->ah_config.additional_swba_backoff = 0;
341 ah->ah_config.ack_6mb = 0x0;
342 ah->ah_config.cwm_ignore_extcca = 0;
343 ah->ah_config.pcie_powersave_enable = 0;
344 ah->ah_config.pcie_l1skp_enable = 0;
345 ah->ah_config.pcie_clock_req = 0;
346 ah->ah_config.pcie_power_reset = 0x100;
347 ah->ah_config.pcie_restore = 0;
348 ah->ah_config.pcie_waen = 0;
349 ah->ah_config.analog_shiftreg = 1;
350 ah->ah_config.ht_enable = 1;
351 ah->ah_config.ofdm_trig_low = 200;
352 ah->ah_config.ofdm_trig_high = 500;
353 ah->ah_config.cck_trig_high = 200;
354 ah->ah_config.cck_trig_low = 100;
355 ah->ah_config.enable_ani = 0;
356 ah->ah_config.noise_immunity_level = 4;
357 ah->ah_config.ofdm_weaksignal_det = 1;
358 ah->ah_config.cck_weaksignal_thr = 0;
359 ah->ah_config.spur_immunity_level = 2;
360 ah->ah_config.firstep_level = 0;
361 ah->ah_config.rssi_thr_high = 40;
362 ah->ah_config.rssi_thr_low = 7;
363 ah->ah_config.diversity_control = 0;
364 ah->ah_config.antenna_switch_swap = 0;
365
366 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
367 ah->ah_config.spurchans[i][0] = AR_NO_SPUR;
368 ah->ah_config.spurchans[i][1] = AR_NO_SPUR;
369 }
370
371 ah->ah_config.intr_mitigation = 0;
372}
373
374static inline void ath9k_hw_override_ini(struct ath_hal *ah,
375 struct ath9k_channel *chan)
376{
377 if (!AR_SREV_5416_V20_OR_LATER(ah)
378 || AR_SREV_9280_10_OR_LATER(ah))
379 return;
380
381 REG_WRITE(ah, 0x9800 + (651 << 2), 0x11);
382}
383
384static inline void ath9k_hw_init_bb(struct ath_hal *ah,
385 struct ath9k_channel *chan)
386{
387 u32 synthDelay;
388
389 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
390 if (IS_CHAN_CCK(chan))
391 synthDelay = (4 * synthDelay) / 22;
392 else
393 synthDelay /= 10;
394
395 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
396
397 udelay(synthDelay + BASE_ACTIVATE_DELAY);
398}
399
400static inline void ath9k_hw_init_interrupt_masks(struct ath_hal *ah,
401 enum ath9k_opmode opmode)
402{
403 struct ath_hal_5416 *ahp = AH5416(ah);
404
405 ahp->ah_maskReg = AR_IMR_TXERR |
406 AR_IMR_TXURN |
407 AR_IMR_RXERR |
408 AR_IMR_RXORN |
409 AR_IMR_BCNMISC;
410
411 if (ahp->ah_intrMitigation)
412 ahp->ah_maskReg |= AR_IMR_RXINTM | AR_IMR_RXMINTR;
413 else
414 ahp->ah_maskReg |= AR_IMR_RXOK;
415
416 ahp->ah_maskReg |= AR_IMR_TXOK;
417
418 if (opmode == ATH9K_M_HOSTAP)
419 ahp->ah_maskReg |= AR_IMR_MIB;
420
421 REG_WRITE(ah, AR_IMR, ahp->ah_maskReg);
422 REG_WRITE(ah, AR_IMR_S2, REG_READ(ah, AR_IMR_S2) | AR_IMR_S2_GTT);
423
424 if (!AR_SREV_9100(ah)) {
425 REG_WRITE(ah, AR_INTR_SYNC_CAUSE, 0xFFFFFFFF);
426 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, AR_INTR_SYNC_DEFAULT);
427 REG_WRITE(ah, AR_INTR_SYNC_MASK, 0);
428 }
429}
430
431static inline void ath9k_hw_init_qos(struct ath_hal *ah)
432{
433 REG_WRITE(ah, AR_MIC_QOS_CONTROL, 0x100aa);
434 REG_WRITE(ah, AR_MIC_QOS_SELECT, 0x3210);
435
436 REG_WRITE(ah, AR_QOS_NO_ACK,
437 SM(2, AR_QOS_NO_ACK_TWO_BIT) |
438 SM(5, AR_QOS_NO_ACK_BIT_OFF) |
439 SM(0, AR_QOS_NO_ACK_BYTE_OFF));
440
441 REG_WRITE(ah, AR_TXOP_X, AR_TXOP_X_VAL);
442 REG_WRITE(ah, AR_TXOP_0_3, 0xFFFFFFFF);
443 REG_WRITE(ah, AR_TXOP_4_7, 0xFFFFFFFF);
444 REG_WRITE(ah, AR_TXOP_8_11, 0xFFFFFFFF);
445 REG_WRITE(ah, AR_TXOP_12_15, 0xFFFFFFFF);
446}
447
448static void ath9k_hw_analog_shift_rmw(struct ath_hal *ah,
449 u32 reg,
450 u32 mask,
451 u32 shift,
452 u32 val)
453{
454 u32 regVal;
455
456 regVal = REG_READ(ah, reg) & ~mask;
457 regVal |= (val << shift) & mask;
458
459 REG_WRITE(ah, reg, regVal);
460
461 if (ah->ah_config.analog_shiftreg)
462 udelay(100);
463
464 return;
465}
466
467static u8 ath9k_hw_get_num_ant_config(struct ath_hal_5416 *ahp,
468 enum ieee80211_band freq_band)
469{
470 struct ar5416_eeprom *eep = &ahp->ah_eeprom;
471 struct modal_eep_header *pModal =
472 &(eep->modalHeader[IEEE80211_BAND_5GHZ == freq_band]);
473 struct base_eep_header *pBase = &eep->baseEepHeader;
474 u8 num_ant_config;
475
476 num_ant_config = 1;
477
478 if (pBase->version >= 0x0E0D)
479 if (pModal->useAnt1)
480 num_ant_config += 1;
481
482 return num_ant_config;
483}
484
485static int
486ath9k_hw_get_eeprom_antenna_cfg(struct ath_hal_5416 *ahp,
487 struct ath9k_channel *chan,
488 u8 index,
489 u16 *config)
490{
491 struct ar5416_eeprom *eep = &ahp->ah_eeprom;
492 struct modal_eep_header *pModal =
493 &(eep->modalHeader[IS_CHAN_2GHZ(chan)]);
494 struct base_eep_header *pBase = &eep->baseEepHeader;
495
496 switch (index) {
497 case 0:
498 *config = pModal->antCtrlCommon & 0xFFFF;
499 return 0;
500 case 1:
501 if (pBase->version >= 0x0E0D) {
502 if (pModal->useAnt1) {
503 *config =
504 ((pModal->antCtrlCommon & 0xFFFF0000) >> 16);
505 return 0;
506 }
507 }
508 break;
509 default:
510 break;
511 }
512
513 return -EINVAL;
514}
515
516static inline bool ath9k_hw_nvram_read(struct ath_hal *ah,
517 u32 off,
518 u16 *data)
519{
520 if (ath9k_hw_use_flash(ah))
521 return ath9k_hw_flash_read(ah, off, data);
522 else
523 return ath9k_hw_eeprom_read(ah, off, data);
524}
525
526static inline bool ath9k_hw_fill_eeprom(struct ath_hal *ah)
527{
528 struct ath_hal_5416 *ahp = AH5416(ah);
529 struct ar5416_eeprom *eep = &ahp->ah_eeprom;
530 u16 *eep_data;
531 int addr, ar5416_eep_start_loc = 0;
532
533 if (!ath9k_hw_use_flash(ah)) {
534 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
535 "%s: Reading from EEPROM, not flash\n", __func__);
536 ar5416_eep_start_loc = 256;
537 }
538 if (AR_SREV_9100(ah))
539 ar5416_eep_start_loc = 256;
540
541 eep_data = (u16 *) eep;
542 for (addr = 0;
543 addr < sizeof(struct ar5416_eeprom) / sizeof(u16);
544 addr++) {
545 if (!ath9k_hw_nvram_read(ah, addr + ar5416_eep_start_loc,
546 eep_data)) {
547 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
548 "%s: Unable to read eeprom region \n",
549 __func__);
550 return false;
551 }
552 eep_data++;
553 }
554 return true;
555}
556
557/* XXX: Clean me up, make me more legible */
558static bool
559ath9k_hw_eeprom_set_board_values(struct ath_hal *ah,
560 struct ath9k_channel *chan)
561{
562 struct modal_eep_header *pModal;
563 int i, regChainOffset;
564 struct ath_hal_5416 *ahp = AH5416(ah);
565 struct ar5416_eeprom *eep = &ahp->ah_eeprom;
566 u8 txRxAttenLocal;
567 u16 ant_config;
568
569 pModal = &(eep->modalHeader[IS_CHAN_2GHZ(chan)]);
570
571 txRxAttenLocal = IS_CHAN_2GHZ(chan) ? 23 : 44;
572
573 ath9k_hw_get_eeprom_antenna_cfg(ahp, chan, 1, &ant_config);
574 REG_WRITE(ah, AR_PHY_SWITCH_COM, ant_config);
575
576 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
577 if (AR_SREV_9280(ah)) {
578 if (i >= 2)
579 break;
580 }
581
582 if (AR_SREV_5416_V20_OR_LATER(ah) &&
583 (ahp->ah_rxchainmask == 5 || ahp->ah_txchainmask == 5)
584 && (i != 0))
585 regChainOffset = (i == 1) ? 0x2000 : 0x1000;
586 else
587 regChainOffset = i * 0x1000;
588
589 REG_WRITE(ah, AR_PHY_SWITCH_CHAIN_0 + regChainOffset,
590 pModal->antCtrlChain[i]);
591
592 REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset,
593 (REG_READ(ah,
594 AR_PHY_TIMING_CTRL4(0) +
595 regChainOffset) &
596 ~(AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF |
597 AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF)) |
598 SM(pModal->iqCalICh[i],
599 AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF) |
600 SM(pModal->iqCalQCh[i],
601 AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF));
602
603 if ((i == 0) || AR_SREV_5416_V20_OR_LATER(ah)) {
604 if ((eep->baseEepHeader.version &
605 AR5416_EEP_VER_MINOR_MASK) >=
606 AR5416_EEP_MINOR_VER_3) {
607 txRxAttenLocal = pModal->txRxAttenCh[i];
608 if (AR_SREV_9280_10_OR_LATER(ah)) {
609 REG_RMW_FIELD(ah,
610 AR_PHY_GAIN_2GHZ +
611 regChainOffset,
612 AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN,
613 pModal->
614 bswMargin[i]);
615 REG_RMW_FIELD(ah,
616 AR_PHY_GAIN_2GHZ +
617 regChainOffset,
618 AR_PHY_GAIN_2GHZ_XATTEN1_DB,
619 pModal->
620 bswAtten[i]);
621 REG_RMW_FIELD(ah,
622 AR_PHY_GAIN_2GHZ +
623 regChainOffset,
624 AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN,
625 pModal->
626 xatten2Margin[i]);
627 REG_RMW_FIELD(ah,
628 AR_PHY_GAIN_2GHZ +
629 regChainOffset,
630 AR_PHY_GAIN_2GHZ_XATTEN2_DB,
631 pModal->
632 xatten2Db[i]);
633 } else {
634 REG_WRITE(ah,
635 AR_PHY_GAIN_2GHZ +
636 regChainOffset,
637 (REG_READ(ah,
638 AR_PHY_GAIN_2GHZ +
639 regChainOffset) &
640 ~AR_PHY_GAIN_2GHZ_BSW_MARGIN)
641 | SM(pModal->
642 bswMargin[i],
643 AR_PHY_GAIN_2GHZ_BSW_MARGIN));
644 REG_WRITE(ah,
645 AR_PHY_GAIN_2GHZ +
646 regChainOffset,
647 (REG_READ(ah,
648 AR_PHY_GAIN_2GHZ +
649 regChainOffset) &
650 ~AR_PHY_GAIN_2GHZ_BSW_ATTEN)
651 | SM(pModal->bswAtten[i],
652 AR_PHY_GAIN_2GHZ_BSW_ATTEN));
653 }
654 }
655 if (AR_SREV_9280_10_OR_LATER(ah)) {
656 REG_RMW_FIELD(ah,
657 AR_PHY_RXGAIN +
658 regChainOffset,
659 AR9280_PHY_RXGAIN_TXRX_ATTEN,
660 txRxAttenLocal);
661 REG_RMW_FIELD(ah,
662 AR_PHY_RXGAIN +
663 regChainOffset,
664 AR9280_PHY_RXGAIN_TXRX_MARGIN,
665 pModal->rxTxMarginCh[i]);
666 } else {
667 REG_WRITE(ah,
668 AR_PHY_RXGAIN + regChainOffset,
669 (REG_READ(ah,
670 AR_PHY_RXGAIN +
671 regChainOffset) &
672 ~AR_PHY_RXGAIN_TXRX_ATTEN) |
673 SM(txRxAttenLocal,
674 AR_PHY_RXGAIN_TXRX_ATTEN));
675 REG_WRITE(ah,
676 AR_PHY_GAIN_2GHZ +
677 regChainOffset,
678 (REG_READ(ah,
679 AR_PHY_GAIN_2GHZ +
680 regChainOffset) &
681 ~AR_PHY_GAIN_2GHZ_RXTX_MARGIN) |
682 SM(pModal->rxTxMarginCh[i],
683 AR_PHY_GAIN_2GHZ_RXTX_MARGIN));
684 }
685 }
686 }
687
688 if (AR_SREV_9280_10_OR_LATER(ah)) {
689 if (IS_CHAN_2GHZ(chan)) {
690 ath9k_hw_analog_shift_rmw(ah, AR_AN_RF2G1_CH0,
691 AR_AN_RF2G1_CH0_OB,
692 AR_AN_RF2G1_CH0_OB_S,
693 pModal->ob);
694 ath9k_hw_analog_shift_rmw(ah, AR_AN_RF2G1_CH0,
695 AR_AN_RF2G1_CH0_DB,
696 AR_AN_RF2G1_CH0_DB_S,
697 pModal->db);
698 ath9k_hw_analog_shift_rmw(ah, AR_AN_RF2G1_CH1,
699 AR_AN_RF2G1_CH1_OB,
700 AR_AN_RF2G1_CH1_OB_S,
701 pModal->ob_ch1);
702 ath9k_hw_analog_shift_rmw(ah, AR_AN_RF2G1_CH1,
703 AR_AN_RF2G1_CH1_DB,
704 AR_AN_RF2G1_CH1_DB_S,
705 pModal->db_ch1);
706 } else {
707 ath9k_hw_analog_shift_rmw(ah, AR_AN_RF5G1_CH0,
708 AR_AN_RF5G1_CH0_OB5,
709 AR_AN_RF5G1_CH0_OB5_S,
710 pModal->ob);
711 ath9k_hw_analog_shift_rmw(ah, AR_AN_RF5G1_CH0,
712 AR_AN_RF5G1_CH0_DB5,
713 AR_AN_RF5G1_CH0_DB5_S,
714 pModal->db);
715 ath9k_hw_analog_shift_rmw(ah, AR_AN_RF5G1_CH1,
716 AR_AN_RF5G1_CH1_OB5,
717 AR_AN_RF5G1_CH1_OB5_S,
718 pModal->ob_ch1);
719 ath9k_hw_analog_shift_rmw(ah, AR_AN_RF5G1_CH1,
720 AR_AN_RF5G1_CH1_DB5,
721 AR_AN_RF5G1_CH1_DB5_S,
722 pModal->db_ch1);
723 }
724 ath9k_hw_analog_shift_rmw(ah, AR_AN_TOP2,
725 AR_AN_TOP2_XPABIAS_LVL,
726 AR_AN_TOP2_XPABIAS_LVL_S,
727 pModal->xpaBiasLvl);
728 ath9k_hw_analog_shift_rmw(ah, AR_AN_TOP2,
729 AR_AN_TOP2_LOCALBIAS,
730 AR_AN_TOP2_LOCALBIAS_S,
731 pModal->local_bias);
732 DPRINTF(ah->ah_sc, ATH_DBG_ANY, "ForceXPAon: %d\n",
733 pModal->force_xpaon);
734 REG_RMW_FIELD(ah, AR_PHY_XPA_CFG, AR_PHY_FORCE_XPA_CFG,
735 pModal->force_xpaon);
736 }
737
738 REG_RMW_FIELD(ah, AR_PHY_SETTLING, AR_PHY_SETTLING_SWITCH,
739 pModal->switchSettling);
740 REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ, AR_PHY_DESIRED_SZ_ADC,
741 pModal->adcDesiredSize);
742
743 if (!AR_SREV_9280_10_OR_LATER(ah))
744 REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ,
745 AR_PHY_DESIRED_SZ_PGA,
746 pModal->pgaDesiredSize);
747
748 REG_WRITE(ah, AR_PHY_RF_CTL4,
749 SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAA_OFF)
750 | SM(pModal->txEndToXpaOff,
751 AR_PHY_RF_CTL4_TX_END_XPAB_OFF)
752 | SM(pModal->txFrameToXpaOn,
753 AR_PHY_RF_CTL4_FRAME_XPAA_ON)
754 | SM(pModal->txFrameToXpaOn,
755 AR_PHY_RF_CTL4_FRAME_XPAB_ON));
756
757 REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_A2_RX_ON,
758 pModal->txEndToRxOn);
759 if (AR_SREV_9280_10_OR_LATER(ah)) {
760 REG_RMW_FIELD(ah, AR_PHY_CCA, AR9280_PHY_CCA_THRESH62,
761 pModal->thresh62);
762 REG_RMW_FIELD(ah, AR_PHY_EXT_CCA0,
763 AR_PHY_EXT_CCA0_THRESH62,
764 pModal->thresh62);
765 } else {
766 REG_RMW_FIELD(ah, AR_PHY_CCA, AR_PHY_CCA_THRESH62,
767 pModal->thresh62);
768 REG_RMW_FIELD(ah, AR_PHY_EXT_CCA,
769 AR_PHY_EXT_CCA_THRESH62,
770 pModal->thresh62);
771 }
772
773 if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
774 AR5416_EEP_MINOR_VER_2) {
775 REG_RMW_FIELD(ah, AR_PHY_RF_CTL2,
776 AR_PHY_TX_END_DATA_START,
777 pModal->txFrameToDataStart);
778 REG_RMW_FIELD(ah, AR_PHY_RF_CTL2, AR_PHY_TX_END_PA_ON,
779 pModal->txFrameToPaOn);
780 }
781
782 if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
783 AR5416_EEP_MINOR_VER_3) {
784 if (IS_CHAN_HT40(chan))
785 REG_RMW_FIELD(ah, AR_PHY_SETTLING,
786 AR_PHY_SETTLING_SWITCH,
787 pModal->swSettleHt40);
788 }
789
790 return true;
791}
792
793static inline int ath9k_hw_check_eeprom(struct ath_hal *ah)
794{
795 u32 sum = 0, el;
796 u16 *eepdata;
797 int i;
798 struct ath_hal_5416 *ahp = AH5416(ah);
799 bool need_swap = false;
800 struct ar5416_eeprom *eep =
801 (struct ar5416_eeprom *) &ahp->ah_eeprom;
802
803 if (!ath9k_hw_use_flash(ah)) {
804 u16 magic, magic2;
805 int addr;
806
807 if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET,
808 &magic)) {
809 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
810 "%s: Reading Magic # failed\n", __func__);
811 return false;
812 }
813 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, "%s: Read Magic = 0x%04X\n",
814 __func__, magic);
815
816 if (magic != AR5416_EEPROM_MAGIC) {
817 magic2 = swab16(magic);
818
819 if (magic2 == AR5416_EEPROM_MAGIC) {
820 need_swap = true;
821 eepdata = (u16 *) (&ahp->ah_eeprom);
822
823 for (addr = 0;
824 addr <
825 sizeof(struct ar5416_eeprom) /
826 sizeof(u16); addr++) {
827 u16 temp;
828
829 temp = swab16(*eepdata);
830 *eepdata = temp;
831 eepdata++;
832
833 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
834 "0x%04X ", *eepdata);
835 if (((addr + 1) % 6) == 0)
836 DPRINTF(ah->ah_sc,
837 ATH_DBG_EEPROM,
838 "\n");
839 }
840 } else {
841 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
842 "Invalid EEPROM Magic. "
843 "endianness missmatch.\n");
844 return -EINVAL;
845 }
846 }
847 }
848 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, "need_swap = %s.\n",
849 need_swap ? "True" : "False");
850
851 if (need_swap)
852 el = swab16(ahp->ah_eeprom.baseEepHeader.length);
853 else
854 el = ahp->ah_eeprom.baseEepHeader.length;
855
856 if (el > sizeof(struct ar5416_eeprom))
857 el = sizeof(struct ar5416_eeprom) / sizeof(u16);
858 else
859 el = el / sizeof(u16);
860
861 eepdata = (u16 *) (&ahp->ah_eeprom);
862
863 for (i = 0; i < el; i++)
864 sum ^= *eepdata++;
865
866 if (need_swap) {
867 u32 integer, j;
868 u16 word;
869
870 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
871 "EEPROM Endianness is not native.. Changing \n");
872
873 word = swab16(eep->baseEepHeader.length);
874 eep->baseEepHeader.length = word;
875
876 word = swab16(eep->baseEepHeader.checksum);
877 eep->baseEepHeader.checksum = word;
878
879 word = swab16(eep->baseEepHeader.version);
880 eep->baseEepHeader.version = word;
881
882 word = swab16(eep->baseEepHeader.regDmn[0]);
883 eep->baseEepHeader.regDmn[0] = word;
884
885 word = swab16(eep->baseEepHeader.regDmn[1]);
886 eep->baseEepHeader.regDmn[1] = word;
887
888 word = swab16(eep->baseEepHeader.rfSilent);
889 eep->baseEepHeader.rfSilent = word;
890
891 word = swab16(eep->baseEepHeader.blueToothOptions);
892 eep->baseEepHeader.blueToothOptions = word;
893
894 word = swab16(eep->baseEepHeader.deviceCap);
895 eep->baseEepHeader.deviceCap = word;
896
897 for (j = 0; j < ARRAY_SIZE(eep->modalHeader); j++) {
898 struct modal_eep_header *pModal =
899 &eep->modalHeader[j];
900 integer = swab32(pModal->antCtrlCommon);
901 pModal->antCtrlCommon = integer;
902
903 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
904 integer = swab32(pModal->antCtrlChain[i]);
905 pModal->antCtrlChain[i] = integer;
906 }
907
908 for (i = 0; i < AR5416_EEPROM_MODAL_SPURS; i++) {
909 word = swab16(pModal->spurChans[i].spurChan);
910 pModal->spurChans[i].spurChan = word;
911 }
912 }
913 }
914
915 if (sum != 0xffff || ar5416_get_eep_ver(ahp) != AR5416_EEP_VER ||
916 ar5416_get_eep_rev(ahp) < AR5416_EEP_NO_BACK_VER) {
917 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
918 "Bad EEPROM checksum 0x%x or revision 0x%04x\n",
919 sum, ar5416_get_eep_ver(ahp));
920 return -EINVAL;
921 }
922
923 return 0;
924}
925
926static bool ath9k_hw_chip_test(struct ath_hal *ah)
927{
928 u32 regAddr[2] = { AR_STA_ID0, AR_PHY_BASE + (8 << 2) };
929 u32 regHold[2];
930 u32 patternData[4] = { 0x55555555,
931 0xaaaaaaaa,
932 0x66666666,
933 0x99999999 };
934 int i, j;
935
936 for (i = 0; i < 2; i++) {
937 u32 addr = regAddr[i];
938 u32 wrData, rdData;
939
940 regHold[i] = REG_READ(ah, addr);
941 for (j = 0; j < 0x100; j++) {
942 wrData = (j << 16) | j;
943 REG_WRITE(ah, addr, wrData);
944 rdData = REG_READ(ah, addr);
945 if (rdData != wrData) {
946 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
947 "%s: address test failed "
948 "addr: 0x%08x - wr:0x%08x != rd:0x%08x\n",
949 __func__, addr, wrData, rdData);
950 return false;
951 }
952 }
953 for (j = 0; j < 4; j++) {
954 wrData = patternData[j];
955 REG_WRITE(ah, addr, wrData);
956 rdData = REG_READ(ah, addr);
957 if (wrData != rdData) {
958 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
959 "%s: address test failed "
960 "addr: 0x%08x - wr:0x%08x != rd:0x%08x\n",
961 __func__, addr, wrData, rdData);
962 return false;
963 }
964 }
965 REG_WRITE(ah, regAddr[i], regHold[i]);
966 }
967 udelay(100);
968 return true;
969}
970
971u32 ath9k_hw_getrxfilter(struct ath_hal *ah)
972{
973 u32 bits = REG_READ(ah, AR_RX_FILTER);
974 u32 phybits = REG_READ(ah, AR_PHY_ERR);
975
976 if (phybits & AR_PHY_ERR_RADAR)
977 bits |= ATH9K_RX_FILTER_PHYRADAR;
978 if (phybits & (AR_PHY_ERR_OFDM_TIMING | AR_PHY_ERR_CCK_TIMING))
979 bits |= ATH9K_RX_FILTER_PHYERR;
980 return bits;
981}
982
983void ath9k_hw_setrxfilter(struct ath_hal *ah, u32 bits)
984{
985 u32 phybits;
986
987 REG_WRITE(ah, AR_RX_FILTER, (bits & 0xffff) | AR_RX_COMPR_BAR);
988 phybits = 0;
989 if (bits & ATH9K_RX_FILTER_PHYRADAR)
990 phybits |= AR_PHY_ERR_RADAR;
991 if (bits & ATH9K_RX_FILTER_PHYERR)
992 phybits |= AR_PHY_ERR_OFDM_TIMING | AR_PHY_ERR_CCK_TIMING;
993 REG_WRITE(ah, AR_PHY_ERR, phybits);
994
995 if (phybits)
996 REG_WRITE(ah, AR_RXCFG,
997 REG_READ(ah, AR_RXCFG) | AR_RXCFG_ZLFDMA);
998 else
999 REG_WRITE(ah, AR_RXCFG,
1000 REG_READ(ah, AR_RXCFG) & ~AR_RXCFG_ZLFDMA);
1001}
1002
1003bool ath9k_hw_setcapability(struct ath_hal *ah,
1004 enum ath9k_capability_type type,
1005 u32 capability,
1006 u32 setting,
1007 int *status)
1008{
1009 struct ath_hal_5416 *ahp = AH5416(ah);
1010 u32 v;
1011
1012 switch (type) {
1013 case ATH9K_CAP_TKIP_MIC:
1014 if (setting)
1015 ahp->ah_staId1Defaults |=
1016 AR_STA_ID1_CRPT_MIC_ENABLE;
1017 else
1018 ahp->ah_staId1Defaults &=
1019 ~AR_STA_ID1_CRPT_MIC_ENABLE;
1020 return true;
1021 case ATH9K_CAP_DIVERSITY:
1022 v = REG_READ(ah, AR_PHY_CCK_DETECT);
1023 if (setting)
1024 v |= AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
1025 else
1026 v &= ~AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
1027 REG_WRITE(ah, AR_PHY_CCK_DETECT, v);
1028 return true;
1029 case ATH9K_CAP_MCAST_KEYSRCH:
1030 if (setting)
1031 ahp->ah_staId1Defaults |= AR_STA_ID1_MCAST_KSRCH;
1032 else
1033 ahp->ah_staId1Defaults &= ~AR_STA_ID1_MCAST_KSRCH;
1034 return true;
1035 case ATH9K_CAP_TSF_ADJUST:
1036 if (setting)
1037 ahp->ah_miscMode |= AR_PCU_TX_ADD_TSF;
1038 else
1039 ahp->ah_miscMode &= ~AR_PCU_TX_ADD_TSF;
1040 return true;
1041 default:
1042 return false;
1043 }
1044}
1045
1046void ath9k_hw_dmaRegDump(struct ath_hal *ah)
1047{
1048 u32 val[ATH9K_NUM_DMA_DEBUG_REGS];
1049 int qcuOffset = 0, dcuOffset = 0;
1050 u32 *qcuBase = &val[0], *dcuBase = &val[4];
1051 int i;
1052
1053 REG_WRITE(ah, AR_MACMISC,
1054 ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) |
1055 (AR_MACMISC_MISC_OBS_BUS_1 <<
1056 AR_MACMISC_MISC_OBS_BUS_MSB_S)));
1057
1058 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, "Raw DMA Debug values:\n");
1059 for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++) {
1060 if (i % 4 == 0)
1061 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, "\n");
1062
1063 val[i] = REG_READ(ah, AR_DMADBG_0 + (i * sizeof(u32)));
1064 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, "%d: %08x ", i, val[i]);
1065 }
1066
1067 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, "\n\n");
1068 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
1069 "Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n");
1070
1071 for (i = 0; i < ATH9K_NUM_QUEUES;
1072 i++, qcuOffset += 4, dcuOffset += 5) {
1073 if (i == 8) {
1074 qcuOffset = 0;
1075 qcuBase++;
1076 }
1077
1078 if (i == 6) {
1079 dcuOffset = 0;
1080 dcuBase++;
1081 }
1082
1083 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
1084 "%2d %2x %1x %2x %2x\n",
1085 i, (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset,
1086 (*qcuBase & (0x8 << qcuOffset)) >> (qcuOffset +
1087 3),
1088 val[2] & (0x7 << (i * 3)) >> (i * 3),
1089 (*dcuBase & (0x1f << dcuOffset)) >> dcuOffset);
1090 }
1091
1092 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, "\n");
1093 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
1094 "qcu_stitch state: %2x qcu_fetch state: %2x\n",
1095 (val[3] & 0x003c0000) >> 18, (val[3] & 0x03c00000) >> 22);
1096 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
1097 "qcu_complete state: %2x dcu_complete state: %2x\n",
1098 (val[3] & 0x1c000000) >> 26, (val[6] & 0x3));
1099 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
1100 "dcu_arb state: %2x dcu_fp state: %2x\n",
1101 (val[5] & 0x06000000) >> 25, (val[5] & 0x38000000) >> 27);
1102 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
1103 "chan_idle_dur: %3d chan_idle_dur_valid: %1d\n",
1104 (val[6] & 0x000003fc) >> 2, (val[6] & 0x00000400) >> 10);
1105 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
1106 "txfifo_valid_0: %1d txfifo_valid_1: %1d\n",
1107 (val[6] & 0x00000800) >> 11, (val[6] & 0x00001000) >> 12);
1108 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
1109 "txfifo_dcu_num_0: %2d txfifo_dcu_num_1: %2d\n",
1110 (val[6] & 0x0001e000) >> 13, (val[6] & 0x001e0000) >> 17);
1111
1112 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, "pcu observe 0x%x \n",
1113 REG_READ(ah, AR_OBS_BUS_1));
1114 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
1115 "AR_CR 0x%x \n", REG_READ(ah, AR_CR));
1116}
1117
1118u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hal *ah,
1119 u32 *rxc_pcnt,
1120 u32 *rxf_pcnt,
1121 u32 *txf_pcnt)
1122{
1123 static u32 cycles, rx_clear, rx_frame, tx_frame;
1124 u32 good = 1;
1125
1126 u32 rc = REG_READ(ah, AR_RCCNT);
1127 u32 rf = REG_READ(ah, AR_RFCNT);
1128 u32 tf = REG_READ(ah, AR_TFCNT);
1129 u32 cc = REG_READ(ah, AR_CCCNT);
1130
1131 if (cycles == 0 || cycles > cc) {
1132 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
1133 "%s: cycle counter wrap. ExtBusy = 0\n",
1134 __func__);
1135 good = 0;
1136 } else {
1137 u32 cc_d = cc - cycles;
1138 u32 rc_d = rc - rx_clear;
1139 u32 rf_d = rf - rx_frame;
1140 u32 tf_d = tf - tx_frame;
1141
1142 if (cc_d != 0) {
1143 *rxc_pcnt = rc_d * 100 / cc_d;
1144 *rxf_pcnt = rf_d * 100 / cc_d;
1145 *txf_pcnt = tf_d * 100 / cc_d;
1146 } else {
1147 good = 0;
1148 }
1149 }
1150
1151 cycles = cc;
1152 rx_frame = rf;
1153 rx_clear = rc;
1154 tx_frame = tf;
1155
1156 return good;
1157}
1158
1159void ath9k_hw_set11nmac2040(struct ath_hal *ah, enum ath9k_ht_macmode mode)
1160{
1161 u32 macmode;
1162
1163 if (mode == ATH9K_HT_MACMODE_2040 &&
1164 !ah->ah_config.cwm_ignore_extcca)
1165 macmode = AR_2040_JOINED_RX_CLEAR;
1166 else
1167 macmode = 0;
1168
1169 REG_WRITE(ah, AR_2040_MODE, macmode);
1170}
1171
1172static void ath9k_hw_mark_phy_inactive(struct ath_hal *ah)
1173{
1174 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
1175}
1176
1177
1178static struct ath_hal_5416 *ath9k_hw_newstate(u16 devid,
1179 struct ath_softc *sc,
1180 void __iomem *mem,
1181 int *status)
1182{
1183 static const u8 defbssidmask[ETH_ALEN] =
1184 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1185 struct ath_hal_5416 *ahp;
1186 struct ath_hal *ah;
1187
1188 ahp = kzalloc(sizeof(struct ath_hal_5416), GFP_KERNEL);
1189 if (ahp == NULL) {
1190 DPRINTF(sc, ATH_DBG_FATAL,
1191 "%s: cannot allocate memory for state block\n",
1192 __func__);
1193 *status = -ENOMEM;
1194 return NULL;
1195 }
1196
1197 ah = &ahp->ah;
1198
1199 memcpy(&ahp->ah, &ar5416hal, sizeof(struct ath_hal));
1200
1201 ah->ah_sc = sc;
1202 ah->ah_sh = mem;
1203
1204 ah->ah_devid = devid;
1205 ah->ah_subvendorid = 0;
1206
1207 ah->ah_flags = 0;
1208 if ((devid == AR5416_AR9100_DEVID))
1209 ah->ah_macVersion = AR_SREV_VERSION_9100;
1210 if (!AR_SREV_9100(ah))
1211 ah->ah_flags = AH_USE_EEPROM;
1212
1213 ah->ah_powerLimit = MAX_RATE_POWER;
1214 ah->ah_tpScale = ATH9K_TP_SCALE_MAX;
1215
1216 ahp->ah_atimWindow = 0;
1217 ahp->ah_diversityControl = ah->ah_config.diversity_control;
1218 ahp->ah_antennaSwitchSwap =
1219 ah->ah_config.antenna_switch_swap;
1220
1221 ahp->ah_staId1Defaults = AR_STA_ID1_CRPT_MIC_ENABLE;
1222 ahp->ah_beaconInterval = 100;
1223 ahp->ah_enable32kHzClock = DONT_USE_32KHZ;
1224 ahp->ah_slottime = (u32) -1;
1225 ahp->ah_acktimeout = (u32) -1;
1226 ahp->ah_ctstimeout = (u32) -1;
1227 ahp->ah_globaltxtimeout = (u32) -1;
1228 memcpy(&ahp->ah_bssidmask, defbssidmask, ETH_ALEN);
1229
1230 ahp->ah_gBeaconRate = 0;
1231
1232 return ahp;
1233}
1234
1235static int ath9k_hw_eeprom_attach(struct ath_hal *ah)
1236{
1237 int status;
1238
1239 if (ath9k_hw_use_flash(ah))
1240 ath9k_hw_flash_map(ah);
1241
1242 if (!ath9k_hw_fill_eeprom(ah))
1243 return -EIO;
1244
1245 status = ath9k_hw_check_eeprom(ah);
1246
1247 return status;
1248}
1249
1250u32 ath9k_hw_get_eeprom(struct ath_hal_5416 *ahp,
1251 enum eeprom_param param)
1252{
1253 struct ar5416_eeprom *eep = &ahp->ah_eeprom;
1254 struct modal_eep_header *pModal = eep->modalHeader;
1255 struct base_eep_header *pBase = &eep->baseEepHeader;
1256
1257 switch (param) {
1258 case EEP_NFTHRESH_5:
1259 return -pModal[0].noiseFloorThreshCh[0];
1260 case EEP_NFTHRESH_2:
1261 return -pModal[1].noiseFloorThreshCh[0];
1262 case AR_EEPROM_MAC(0):
1263 return pBase->macAddr[0] << 8 | pBase->macAddr[1];
1264 case AR_EEPROM_MAC(1):
1265 return pBase->macAddr[2] << 8 | pBase->macAddr[3];
1266 case AR_EEPROM_MAC(2):
1267 return pBase->macAddr[4] << 8 | pBase->macAddr[5];
1268 case EEP_REG_0:
1269 return pBase->regDmn[0];
1270 case EEP_REG_1:
1271 return pBase->regDmn[1];
1272 case EEP_OP_CAP:
1273 return pBase->deviceCap;
1274 case EEP_OP_MODE:
1275 return pBase->opCapFlags;
1276 case EEP_RF_SILENT:
1277 return pBase->rfSilent;
1278 case EEP_OB_5:
1279 return pModal[0].ob;
1280 case EEP_DB_5:
1281 return pModal[0].db;
1282 case EEP_OB_2:
1283 return pModal[1].ob;
1284 case EEP_DB_2:
1285 return pModal[1].db;
1286 case EEP_MINOR_REV:
1287 return pBase->version & AR5416_EEP_VER_MINOR_MASK;
1288 case EEP_TX_MASK:
1289 return pBase->txMask;
1290 case EEP_RX_MASK:
1291 return pBase->rxMask;
1292 default:
1293 return 0;
1294 }
1295}
1296
1297static inline int ath9k_hw_get_radiorev(struct ath_hal *ah)
1298{
1299 u32 val;
1300 int i;
1301
1302 REG_WRITE(ah, AR_PHY(0x36), 0x00007058);
1303 for (i = 0; i < 8; i++)
1304 REG_WRITE(ah, AR_PHY(0x20), 0x00010000);
1305 val = (REG_READ(ah, AR_PHY(256)) >> 24) & 0xff;
1306 val = ((val & 0xf0) >> 4) | ((val & 0x0f) << 4);
1307 return ath9k_hw_reverse_bits(val, 8);
1308}
1309
1310static inline int ath9k_hw_init_macaddr(struct ath_hal *ah)
1311{
1312 u32 sum;
1313 int i;
1314 u16 eeval;
1315 struct ath_hal_5416 *ahp = AH5416(ah);
1316 DECLARE_MAC_BUF(mac);
1317
1318 sum = 0;
1319 for (i = 0; i < 3; i++) {
1320 eeval = ath9k_hw_get_eeprom(ahp, AR_EEPROM_MAC(i));
1321 sum += eeval;
1322 ahp->ah_macaddr[2 * i] = eeval >> 8;
1323 ahp->ah_macaddr[2 * i + 1] = eeval & 0xff;
1324 }
1325 if (sum == 0 || sum == 0xffff * 3) {
1326 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
1327 "%s: mac address read failed: %s\n", __func__,
1328 print_mac(mac, ahp->ah_macaddr));
1329 return -EADDRNOTAVAIL;
1330 }
1331
1332 return 0;
1333}
1334
1335static inline int16_t ath9k_hw_interpolate(u16 target,
1336 u16 srcLeft,
1337 u16 srcRight,
1338 int16_t targetLeft,
1339 int16_t targetRight)
1340{
1341 int16_t rv;
1342
1343 if (srcRight == srcLeft) {
1344 rv = targetLeft;
1345 } else {
1346 rv = (int16_t) (((target - srcLeft) * targetRight +
1347 (srcRight - target) * targetLeft) /
1348 (srcRight - srcLeft));
1349 }
1350 return rv;
1351}
1352
1353static inline u16 ath9k_hw_fbin2freq(u8 fbin,
1354 bool is2GHz)
1355{
1356
1357 if (fbin == AR5416_BCHAN_UNUSED)
1358 return fbin;
1359
1360 return (u16) ((is2GHz) ? (2300 + fbin) : (4800 + 5 * fbin));
1361}
1362
1363static u16 ath9k_hw_eeprom_get_spur_chan(struct ath_hal *ah,
1364 u16 i,
1365 bool is2GHz)
1366{
1367 struct ath_hal_5416 *ahp = AH5416(ah);
1368 struct ar5416_eeprom *eep =
1369 (struct ar5416_eeprom *) &ahp->ah_eeprom;
1370 u16 spur_val = AR_NO_SPUR;
1371
1372 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
1373 "Getting spur idx %d is2Ghz. %d val %x\n",
1374 i, is2GHz, ah->ah_config.spurchans[i][is2GHz]);
1375
1376 switch (ah->ah_config.spurmode) {
1377 case SPUR_DISABLE:
1378 break;
1379 case SPUR_ENABLE_IOCTL:
1380 spur_val = ah->ah_config.spurchans[i][is2GHz];
1381 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
1382 "Getting spur val from new loc. %d\n", spur_val);
1383 break;
1384 case SPUR_ENABLE_EEPROM:
1385 spur_val = eep->modalHeader[is2GHz].spurChans[i].spurChan;
1386 break;
1387
1388 }
1389 return spur_val;
1390}
1391
1392static inline int ath9k_hw_rfattach(struct ath_hal *ah)
1393{
1394 bool rfStatus = false;
1395 int ecode = 0;
1396
1397 rfStatus = ath9k_hw_init_rf(ah, &ecode);
1398 if (!rfStatus) {
1399 DPRINTF(ah->ah_sc, ATH_DBG_RESET,
1400 "%s: RF setup failed, status %u\n", __func__,
1401 ecode);
1402 return ecode;
1403 }
1404
1405 return 0;
1406}
1407
1408static int ath9k_hw_rf_claim(struct ath_hal *ah)
1409{
1410 u32 val;
1411
1412 REG_WRITE(ah, AR_PHY(0), 0x00000007);
1413
1414 val = ath9k_hw_get_radiorev(ah);
1415 switch (val & AR_RADIO_SREV_MAJOR) {
1416 case 0:
1417 val = AR_RAD5133_SREV_MAJOR;
1418 break;
1419 case AR_RAD5133_SREV_MAJOR:
1420 case AR_RAD5122_SREV_MAJOR:
1421 case AR_RAD2133_SREV_MAJOR:
1422 case AR_RAD2122_SREV_MAJOR:
1423 break;
1424 default:
1425 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
1426 "%s: 5G Radio Chip Rev 0x%02X is not "
1427 "supported by this driver\n",
1428 __func__, ah->ah_analog5GhzRev);
1429 return -EOPNOTSUPP;
1430 }
1431
1432 ah->ah_analog5GhzRev = val;
1433
1434 return 0;
1435}
1436
1437static inline void ath9k_hw_init_pll(struct ath_hal *ah,
1438 struct ath9k_channel *chan)
1439{
1440 u32 pll;
1441
1442 if (AR_SREV_9100(ah)) {
1443 if (chan && IS_CHAN_5GHZ(chan))
1444 pll = 0x1450;
1445 else
1446 pll = 0x1458;
1447 } else {
1448 if (AR_SREV_9280_10_OR_LATER(ah)) {
1449 pll = SM(0x5, AR_RTC_9160_PLL_REFDIV);
1450
1451 if (chan && IS_CHAN_HALF_RATE(chan))
1452 pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL);
1453 else if (chan && IS_CHAN_QUARTER_RATE(chan))
1454 pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL);
1455
1456 if (chan && IS_CHAN_5GHZ(chan)) {
1457 pll |= SM(0x28, AR_RTC_9160_PLL_DIV);
1458
1459
1460 if (AR_SREV_9280_20(ah)) {
1461 if (((chan->channel % 20) == 0)
1462 || ((chan->channel % 10) == 0))
1463 pll = 0x2850;
1464 else
1465 pll = 0x142c;
1466 }
1467 } else {
1468 pll |= SM(0x2c, AR_RTC_9160_PLL_DIV);
1469 }
1470
1471 } else if (AR_SREV_9160_10_OR_LATER(ah)) {
1472
1473 pll = SM(0x5, AR_RTC_9160_PLL_REFDIV);
1474
1475 if (chan && IS_CHAN_HALF_RATE(chan))
1476 pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL);
1477 else if (chan && IS_CHAN_QUARTER_RATE(chan))
1478 pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL);
1479
1480 if (chan && IS_CHAN_5GHZ(chan))
1481 pll |= SM(0x50, AR_RTC_9160_PLL_DIV);
1482 else
1483 pll |= SM(0x58, AR_RTC_9160_PLL_DIV);
1484 } else {
1485 pll = AR_RTC_PLL_REFDIV_5 | AR_RTC_PLL_DIV2;
1486
1487 if (chan && IS_CHAN_HALF_RATE(chan))
1488 pll |= SM(0x1, AR_RTC_PLL_CLKSEL);
1489 else if (chan && IS_CHAN_QUARTER_RATE(chan))
1490 pll |= SM(0x2, AR_RTC_PLL_CLKSEL);
1491
1492 if (chan && IS_CHAN_5GHZ(chan))
1493 pll |= SM(0xa, AR_RTC_PLL_DIV);
1494 else
1495 pll |= SM(0xb, AR_RTC_PLL_DIV);
1496 }
1497 }
1498 REG_WRITE(ah, (u16) (AR_RTC_PLL_CONTROL), pll);
1499
1500 udelay(RTC_PLL_SETTLE_DELAY);
1501
1502 REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK);
1503}
1504
1505static void ath9k_hw_set_regs(struct ath_hal *ah, struct ath9k_channel *chan,
1506 enum ath9k_ht_macmode macmode)
1507{
1508 u32 phymode;
1509 struct ath_hal_5416 *ahp = AH5416(ah);
1510
1511 phymode = AR_PHY_FC_HT_EN | AR_PHY_FC_SHORT_GI_40
1512 | AR_PHY_FC_SINGLE_HT_LTF1 | AR_PHY_FC_WALSH;
1513
1514 if (IS_CHAN_HT40(chan)) {
1515 phymode |= AR_PHY_FC_DYN2040_EN;
1516
1517 if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
1518 (chan->chanmode == CHANNEL_G_HT40PLUS))
1519 phymode |= AR_PHY_FC_DYN2040_PRI_CH;
1520
1521 if (ahp->ah_extprotspacing == ATH9K_HT_EXTPROTSPACING_25)
1522 phymode |= AR_PHY_FC_DYN2040_EXT_CH;
1523 }
1524 REG_WRITE(ah, AR_PHY_TURBO, phymode);
1525
1526 ath9k_hw_set11nmac2040(ah, macmode);
1527
1528 REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S);
1529 REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S);
1530}
1531
1532static void ath9k_hw_set_operating_mode(struct ath_hal *ah, int opmode)
1533{
1534 u32 val;
1535
1536 val = REG_READ(ah, AR_STA_ID1);
1537 val &= ~(AR_STA_ID1_STA_AP | AR_STA_ID1_ADHOC);
1538 switch (opmode) {
1539 case ATH9K_M_HOSTAP:
1540 REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_STA_AP
1541 | AR_STA_ID1_KSRCH_MODE);
1542 REG_CLR_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
1543 break;
1544 case ATH9K_M_IBSS:
1545 REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_ADHOC
1546 | AR_STA_ID1_KSRCH_MODE);
1547 REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
1548 break;
1549 case ATH9K_M_STA:
1550 case ATH9K_M_MONITOR:
1551 REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_KSRCH_MODE);
1552 break;
1553 }
1554}
1555
1556static inline void
1557ath9k_hw_set_rfmode(struct ath_hal *ah, struct ath9k_channel *chan)
1558{
1559 u32 rfMode = 0;
1560
1561 if (chan == NULL)
1562 return;
1563
1564 rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan))
1565 ? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM;
1566
1567 if (!AR_SREV_9280_10_OR_LATER(ah))
1568 rfMode |= (IS_CHAN_5GHZ(chan)) ? AR_PHY_MODE_RF5GHZ :
1569 AR_PHY_MODE_RF2GHZ;
1570
1571 if (AR_SREV_9280_20(ah) && IS_CHAN_A_5MHZ_SPACED(chan))
1572 rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE);
1573
1574 REG_WRITE(ah, AR_PHY_MODE, rfMode);
1575}
1576
1577static bool ath9k_hw_set_reset(struct ath_hal *ah, int type)
1578{
1579 u32 rst_flags;
1580 u32 tmpReg;
1581
1582 REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN |
1583 AR_RTC_FORCE_WAKE_ON_INT);
1584
1585 if (AR_SREV_9100(ah)) {
1586 rst_flags = AR_RTC_RC_MAC_WARM | AR_RTC_RC_MAC_COLD |
1587 AR_RTC_RC_COLD_RESET | AR_RTC_RC_WARM_RESET;
1588 } else {
1589 tmpReg = REG_READ(ah, AR_INTR_SYNC_CAUSE);
1590 if (tmpReg &
1591 (AR_INTR_SYNC_LOCAL_TIMEOUT |
1592 AR_INTR_SYNC_RADM_CPL_TIMEOUT)) {
1593 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
1594 REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF);
1595 } else {
1596 REG_WRITE(ah, AR_RC, AR_RC_AHB);
1597 }
1598
1599 rst_flags = AR_RTC_RC_MAC_WARM;
1600 if (type == ATH9K_RESET_COLD)
1601 rst_flags |= AR_RTC_RC_MAC_COLD;
1602 }
1603
1604 REG_WRITE(ah, (u16) (AR_RTC_RC), rst_flags);
1605 udelay(50);
1606
1607 REG_WRITE(ah, (u16) (AR_RTC_RC), 0);
1608 if (!ath9k_hw_wait(ah, (u16) (AR_RTC_RC), AR_RTC_RC_M, 0)) {
1609 DPRINTF(ah->ah_sc, ATH_DBG_RESET,
1610 "%s: RTC stuck in MAC reset\n",
1611 __func__);
1612 return false;
1613 }
1614
1615 if (!AR_SREV_9100(ah))
1616 REG_WRITE(ah, AR_RC, 0);
1617
1618 ath9k_hw_init_pll(ah, NULL);
1619
1620 if (AR_SREV_9100(ah))
1621 udelay(50);
1622
1623 return true;
1624}
1625
1626static inline bool ath9k_hw_set_reset_power_on(struct ath_hal *ah)
1627{
1628 REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN |
1629 AR_RTC_FORCE_WAKE_ON_INT);
1630
1631 REG_WRITE(ah, (u16) (AR_RTC_RESET), 0);
1632 REG_WRITE(ah, (u16) (AR_RTC_RESET), 1);
1633
1634 if (!ath9k_hw_wait(ah,
1635 AR_RTC_STATUS,
1636 AR_RTC_STATUS_M,
1637 AR_RTC_STATUS_ON)) {
1638 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "%s: RTC not waking up\n",
1639 __func__);
1640 return false;
1641 }
1642
1643 ath9k_hw_read_revisions(ah);
1644
1645 return ath9k_hw_set_reset(ah, ATH9K_RESET_WARM);
1646}
1647
1648static bool ath9k_hw_set_reset_reg(struct ath_hal *ah,
1649 u32 type)
1650{
1651 REG_WRITE(ah, AR_RTC_FORCE_WAKE,
1652 AR_RTC_FORCE_WAKE_EN | AR_RTC_FORCE_WAKE_ON_INT);
1653
1654 switch (type) {
1655 case ATH9K_RESET_POWER_ON:
1656 return ath9k_hw_set_reset_power_on(ah);
1657 break;
1658 case ATH9K_RESET_WARM:
1659 case ATH9K_RESET_COLD:
1660 return ath9k_hw_set_reset(ah, type);
1661 break;
1662 default:
1663 return false;
1664 }
1665}
1666
1667static inline
1668struct ath9k_channel *ath9k_hw_check_chan(struct ath_hal *ah,
1669 struct ath9k_channel *chan)
1670{
1671 if (!(IS_CHAN_2GHZ(chan) ^ IS_CHAN_5GHZ(chan))) {
1672 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
1673 "%s: invalid channel %u/0x%x; not marked as "
1674 "2GHz or 5GHz\n", __func__, chan->channel,
1675 chan->channelFlags);
1676 return NULL;
1677 }
1678
1679 if (!IS_CHAN_OFDM(chan) &&
1680 !IS_CHAN_CCK(chan) &&
1681 !IS_CHAN_HT20(chan) &&
1682 !IS_CHAN_HT40(chan)) {
1683 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
1684 "%s: invalid channel %u/0x%x; not marked as "
1685 "OFDM or CCK or HT20 or HT40PLUS or HT40MINUS\n",
1686 __func__, chan->channel, chan->channelFlags);
1687 return NULL;
1688 }
1689
1690 return ath9k_regd_check_channel(ah, chan);
1691}
1692
1693static inline bool
1694ath9k_hw_get_lower_upper_index(u8 target,
1695 u8 *pList,
1696 u16 listSize,
1697 u16 *indexL,
1698 u16 *indexR)
1699{
1700 u16 i;
1701
1702 if (target <= pList[0]) {
1703 *indexL = *indexR = 0;
1704 return true;
1705 }
1706 if (target >= pList[listSize - 1]) {
1707 *indexL = *indexR = (u16) (listSize - 1);
1708 return true;
1709 }
1710
1711 for (i = 0; i < listSize - 1; i++) {
1712 if (pList[i] == target) {
1713 *indexL = *indexR = i;
1714 return true;
1715 }
1716 if (target < pList[i + 1]) {
1717 *indexL = i;
1718 *indexR = (u16) (i + 1);
1719 return false;
1720 }
1721 }
1722 return false;
1723}
1724
1725static int16_t ath9k_hw_get_nf_hist_mid(int16_t *nfCalBuffer)
1726{
1727 int16_t nfval;
1728 int16_t sort[ATH9K_NF_CAL_HIST_MAX];
1729 int i, j;
1730
1731 for (i = 0; i < ATH9K_NF_CAL_HIST_MAX; i++)
1732 sort[i] = nfCalBuffer[i];
1733
1734 for (i = 0; i < ATH9K_NF_CAL_HIST_MAX - 1; i++) {
1735 for (j = 1; j < ATH9K_NF_CAL_HIST_MAX - i; j++) {
1736 if (sort[j] > sort[j - 1]) {
1737 nfval = sort[j];
1738 sort[j] = sort[j - 1];
1739 sort[j - 1] = nfval;
1740 }
1741 }
1742 }
1743 nfval = sort[(ATH9K_NF_CAL_HIST_MAX - 1) >> 1];
1744
1745 return nfval;
1746}
1747
1748static void ath9k_hw_update_nfcal_hist_buffer(struct ath9k_nfcal_hist *h,
1749 int16_t *nfarray)
1750{
1751 int i;
1752
1753 for (i = 0; i < NUM_NF_READINGS; i++) {
1754 h[i].nfCalBuffer[h[i].currIndex] = nfarray[i];
1755
1756 if (++h[i].currIndex >= ATH9K_NF_CAL_HIST_MAX)
1757 h[i].currIndex = 0;
1758
1759 if (h[i].invalidNFcount > 0) {
1760 if (nfarray[i] < AR_PHY_CCA_MIN_BAD_VALUE
1761 || nfarray[i] > AR_PHY_CCA_MAX_HIGH_VALUE) {
1762 h[i].invalidNFcount = ATH9K_NF_CAL_HIST_MAX;
1763 } else {
1764 h[i].invalidNFcount--;
1765 h[i].privNF = nfarray[i];
1766 }
1767 } else {
1768 h[i].privNF =
1769 ath9k_hw_get_nf_hist_mid(h[i].nfCalBuffer);
1770 }
1771 }
1772 return;
1773}
1774
1775static void ar5416GetNoiseFloor(struct ath_hal *ah,
1776 int16_t nfarray[NUM_NF_READINGS])
1777{
1778 int16_t nf;
1779
1780 if (AR_SREV_9280_10_OR_LATER(ah))
1781 nf = MS(REG_READ(ah, AR_PHY_CCA), AR9280_PHY_MINCCA_PWR);
1782 else
1783 nf = MS(REG_READ(ah, AR_PHY_CCA), AR_PHY_MINCCA_PWR);
1784
1785 if (nf & 0x100)
1786 nf = 0 - ((nf ^ 0x1ff) + 1);
1787 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
1788 "NF calibrated [ctl] [chain 0] is %d\n", nf);
1789 nfarray[0] = nf;
1790
1791 if (AR_SREV_9280_10_OR_LATER(ah))
1792 nf = MS(REG_READ(ah, AR_PHY_CH1_CCA),
1793 AR9280_PHY_CH1_MINCCA_PWR);
1794 else
1795 nf = MS(REG_READ(ah, AR_PHY_CH1_CCA),
1796 AR_PHY_CH1_MINCCA_PWR);
1797
1798 if (nf & 0x100)
1799 nf = 0 - ((nf ^ 0x1ff) + 1);
1800 DPRINTF(ah->ah_sc, ATH_DBG_NF_CAL,
1801 "NF calibrated [ctl] [chain 1] is %d\n", nf);
1802 nfarray[1] = nf;
1803
1804 if (!AR_SREV_9280(ah)) {
1805 nf = MS(REG_READ(ah, AR_PHY_CH2_CCA),
1806 AR_PHY_CH2_MINCCA_PWR);
1807 if (nf & 0x100)
1808 nf = 0 - ((nf ^ 0x1ff) + 1);
1809 DPRINTF(ah->ah_sc, ATH_DBG_NF_CAL,
1810 "NF calibrated [ctl] [chain 2] is %d\n", nf);
1811 nfarray[2] = nf;
1812 }
1813
1814 if (AR_SREV_9280_10_OR_LATER(ah))
1815 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA),
1816 AR9280_PHY_EXT_MINCCA_PWR);
1817 else
1818 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA),
1819 AR_PHY_EXT_MINCCA_PWR);
1820
1821 if (nf & 0x100)
1822 nf = 0 - ((nf ^ 0x1ff) + 1);
1823 DPRINTF(ah->ah_sc, ATH_DBG_NF_CAL,
1824 "NF calibrated [ext] [chain 0] is %d\n", nf);
1825 nfarray[3] = nf;
1826
1827 if (AR_SREV_9280_10_OR_LATER(ah))
1828 nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA),
1829 AR9280_PHY_CH1_EXT_MINCCA_PWR);
1830 else
1831 nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA),
1832 AR_PHY_CH1_EXT_MINCCA_PWR);
1833
1834 if (nf & 0x100)
1835 nf = 0 - ((nf ^ 0x1ff) + 1);
1836 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
1837 "NF calibrated [ext] [chain 1] is %d\n", nf);
1838 nfarray[4] = nf;
1839
1840 if (!AR_SREV_9280(ah)) {
1841 nf = MS(REG_READ(ah, AR_PHY_CH2_EXT_CCA),
1842 AR_PHY_CH2_EXT_MINCCA_PWR);
1843 if (nf & 0x100)
1844 nf = 0 - ((nf ^ 0x1ff) + 1);
1845 DPRINTF(ah->ah_sc, ATH_DBG_NF_CAL,
1846 "NF calibrated [ext] [chain 2] is %d\n", nf);
1847 nfarray[5] = nf;
1848 }
1849}
1850
1851static bool
1852getNoiseFloorThresh(struct ath_hal *ah,
1853 const struct ath9k_channel *chan,
1854 int16_t *nft)
1855{
1856 struct ath_hal_5416 *ahp = AH5416(ah);
1857
1858 switch (chan->chanmode) {
1859 case CHANNEL_A:
1860 case CHANNEL_A_HT20:
1861 case CHANNEL_A_HT40PLUS:
1862 case CHANNEL_A_HT40MINUS:
1863 *nft = (int16_t) ath9k_hw_get_eeprom(ahp, EEP_NFTHRESH_5);
1864 break;
1865 case CHANNEL_B:
1866 case CHANNEL_G:
1867 case CHANNEL_G_HT20:
1868 case CHANNEL_G_HT40PLUS:
1869 case CHANNEL_G_HT40MINUS:
1870 *nft = (int16_t) ath9k_hw_get_eeprom(ahp, EEP_NFTHRESH_2);
1871 break;
1872 default:
1873 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
1874 "%s: invalid channel flags 0x%x\n", __func__,
1875 chan->channelFlags);
1876 return false;
1877 }
1878 return true;
1879}
1880
1881static void ath9k_hw_start_nfcal(struct ath_hal *ah)
1882{
1883 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
1884 AR_PHY_AGC_CONTROL_ENABLE_NF);
1885 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
1886 AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
1887 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
1888}
1889
1890static void
1891ath9k_hw_loadnf(struct ath_hal *ah, struct ath9k_channel *chan)
1892{
1893 struct ath9k_nfcal_hist *h;
1894 int i, j;
1895 int32_t val;
1896 const u32 ar5416_cca_regs[6] = {
1897 AR_PHY_CCA,
1898 AR_PHY_CH1_CCA,
1899 AR_PHY_CH2_CCA,
1900 AR_PHY_EXT_CCA,
1901 AR_PHY_CH1_EXT_CCA,
1902 AR_PHY_CH2_EXT_CCA
1903 };
1904 u8 chainmask;
1905
1906 if (AR_SREV_9280(ah))
1907 chainmask = 0x1B;
1908 else
1909 chainmask = 0x3F;
1910
1911#ifdef ATH_NF_PER_CHAN
1912 h = chan->nfCalHist;
1913#else
1914 h = ah->nfCalHist;
1915#endif
1916
1917 for (i = 0; i < NUM_NF_READINGS; i++) {
1918 if (chainmask & (1 << i)) {
1919 val = REG_READ(ah, ar5416_cca_regs[i]);
1920 val &= 0xFFFFFE00;
1921 val |= (((u32) (h[i].privNF) << 1) & 0x1ff);
1922 REG_WRITE(ah, ar5416_cca_regs[i], val);
1923 }
1924 }
1925
1926 REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
1927 AR_PHY_AGC_CONTROL_ENABLE_NF);
1928 REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
1929 AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
1930 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
1931
1932 for (j = 0; j < 1000; j++) {
1933 if ((REG_READ(ah, AR_PHY_AGC_CONTROL) &
1934 AR_PHY_AGC_CONTROL_NF) == 0)
1935 break;
1936 udelay(10);
1937 }
1938
1939 for (i = 0; i < NUM_NF_READINGS; i++) {
1940 if (chainmask & (1 << i)) {
1941 val = REG_READ(ah, ar5416_cca_regs[i]);
1942 val &= 0xFFFFFE00;
1943 val |= (((u32) (-50) << 1) & 0x1ff);
1944 REG_WRITE(ah, ar5416_cca_regs[i], val);
1945 }
1946 }
1947}
1948
1949static int16_t ath9k_hw_getnf(struct ath_hal *ah,
1950 struct ath9k_channel *chan)
1951{
1952 int16_t nf, nfThresh;
1953 int16_t nfarray[NUM_NF_READINGS] = { 0 };
1954 struct ath9k_nfcal_hist *h;
1955 u8 chainmask;
1956
1957 if (AR_SREV_9280(ah))
1958 chainmask = 0x1B;
1959 else
1960 chainmask = 0x3F;
1961
1962 chan->channelFlags &= (~CHANNEL_CW_INT);
1963 if (REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) {
1964 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
1965 "%s: NF did not complete in calibration window\n",
1966 __func__);
1967 nf = 0;
1968 chan->rawNoiseFloor = nf;
1969 return chan->rawNoiseFloor;
1970 } else {
1971 ar5416GetNoiseFloor(ah, nfarray);
1972 nf = nfarray[0];
1973 if (getNoiseFloorThresh(ah, chan, &nfThresh)
1974 && nf > nfThresh) {
1975 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
1976 "%s: noise floor failed detected; "
1977 "detected %d, threshold %d\n", __func__,
1978 nf, nfThresh);
1979 chan->channelFlags |= CHANNEL_CW_INT;
1980 }
1981 }
1982
1983#ifdef ATH_NF_PER_CHAN
1984 h = chan->nfCalHist;
1985#else
1986 h = ah->nfCalHist;
1987#endif
1988
1989 ath9k_hw_update_nfcal_hist_buffer(h, nfarray);
1990 chan->rawNoiseFloor = h[0].privNF;
1991
1992 return chan->rawNoiseFloor;
1993}
1994
1995static void ath9k_hw_update_mibstats(struct ath_hal *ah,
1996 struct ath9k_mib_stats *stats)
1997{
1998 stats->ackrcv_bad += REG_READ(ah, AR_ACK_FAIL);
1999 stats->rts_bad += REG_READ(ah, AR_RTS_FAIL);
2000 stats->fcs_bad += REG_READ(ah, AR_FCS_FAIL);
2001 stats->rts_good += REG_READ(ah, AR_RTS_OK);
2002 stats->beacons += REG_READ(ah, AR_BEACON_CNT);
2003}
2004
2005static void ath9k_enable_mib_counters(struct ath_hal *ah)
2006{
2007 struct ath_hal_5416 *ahp = AH5416(ah);
2008
2009 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Enable mib counters\n");
2010
2011 ath9k_hw_update_mibstats(ah, &ahp->ah_mibStats);
2012
2013 REG_WRITE(ah, AR_FILT_OFDM, 0);
2014 REG_WRITE(ah, AR_FILT_CCK, 0);
2015 REG_WRITE(ah, AR_MIBC,
2016 ~(AR_MIBC_COW | AR_MIBC_FMC | AR_MIBC_CMC | AR_MIBC_MCS)
2017 & 0x0f);
2018 REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
2019 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
2020}
2021
2022static void ath9k_hw_disable_mib_counters(struct ath_hal *ah)
2023{
2024 struct ath_hal_5416 *ahp = AH5416(ah);
2025
2026 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Disabling MIB counters\n");
2027
2028 REG_WRITE(ah, AR_MIBC, AR_MIBC_FMC | AR_MIBC_CMC);
2029
2030 ath9k_hw_update_mibstats(ah, &ahp->ah_mibStats);
2031
2032 REG_WRITE(ah, AR_FILT_OFDM, 0);
2033 REG_WRITE(ah, AR_FILT_CCK, 0);
2034}
2035
2036static int ath9k_hw_get_ani_channel_idx(struct ath_hal *ah,
2037 struct ath9k_channel *chan)
2038{
2039 struct ath_hal_5416 *ahp = AH5416(ah);
2040 int i;
2041
2042 for (i = 0; i < ARRAY_SIZE(ahp->ah_ani); i++) {
2043 if (ahp->ah_ani[i].c.channel == chan->channel)
2044 return i;
2045 if (ahp->ah_ani[i].c.channel == 0) {
2046 ahp->ah_ani[i].c.channel = chan->channel;
2047 ahp->ah_ani[i].c.channelFlags = chan->channelFlags;
2048 return i;
2049 }
2050 }
2051
2052 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2053 "No more channel states left. Using channel 0\n");
2054 return 0;
2055}
2056
2057static void ath9k_hw_ani_attach(struct ath_hal *ah)
2058{
2059 struct ath_hal_5416 *ahp = AH5416(ah);
2060 int i;
2061
2062 ahp->ah_hasHwPhyCounters = 1;
2063
2064 memset(ahp->ah_ani, 0, sizeof(ahp->ah_ani));
2065 for (i = 0; i < ARRAY_SIZE(ahp->ah_ani); i++) {
2066 ahp->ah_ani[i].ofdmTrigHigh = ATH9K_ANI_OFDM_TRIG_HIGH;
2067 ahp->ah_ani[i].ofdmTrigLow = ATH9K_ANI_OFDM_TRIG_LOW;
2068 ahp->ah_ani[i].cckTrigHigh = ATH9K_ANI_CCK_TRIG_HIGH;
2069 ahp->ah_ani[i].cckTrigLow = ATH9K_ANI_CCK_TRIG_LOW;
2070 ahp->ah_ani[i].rssiThrHigh = ATH9K_ANI_RSSI_THR_HIGH;
2071 ahp->ah_ani[i].rssiThrLow = ATH9K_ANI_RSSI_THR_LOW;
2072 ahp->ah_ani[i].ofdmWeakSigDetectOff =
2073 !ATH9K_ANI_USE_OFDM_WEAK_SIG;
2074 ahp->ah_ani[i].cckWeakSigThreshold =
2075 ATH9K_ANI_CCK_WEAK_SIG_THR;
2076 ahp->ah_ani[i].spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL;
2077 ahp->ah_ani[i].firstepLevel = ATH9K_ANI_FIRSTEP_LVL;
2078 if (ahp->ah_hasHwPhyCounters) {
2079 ahp->ah_ani[i].ofdmPhyErrBase =
2080 AR_PHY_COUNTMAX - ATH9K_ANI_OFDM_TRIG_HIGH;
2081 ahp->ah_ani[i].cckPhyErrBase =
2082 AR_PHY_COUNTMAX - ATH9K_ANI_CCK_TRIG_HIGH;
2083 }
2084 }
2085 if (ahp->ah_hasHwPhyCounters) {
2086 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2087 "Setting OfdmErrBase = 0x%08x\n",
2088 ahp->ah_ani[0].ofdmPhyErrBase);
2089 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Setting cckErrBase = 0x%08x\n",
2090 ahp->ah_ani[0].cckPhyErrBase);
2091
2092 REG_WRITE(ah, AR_PHY_ERR_1, ahp->ah_ani[0].ofdmPhyErrBase);
2093 REG_WRITE(ah, AR_PHY_ERR_2, ahp->ah_ani[0].cckPhyErrBase);
2094 ath9k_enable_mib_counters(ah);
2095 }
2096 ahp->ah_aniPeriod = ATH9K_ANI_PERIOD;
2097 if (ah->ah_config.enable_ani)
2098 ahp->ah_procPhyErr |= HAL_PROCESS_ANI;
2099}
2100
2101static inline void ath9k_hw_ani_setup(struct ath_hal *ah)
2102{
2103 struct ath_hal_5416 *ahp = AH5416(ah);
2104 int i;
2105
2106 const int totalSizeDesired[] = { -55, -55, -55, -55, -62 };
2107 const int coarseHigh[] = { -14, -14, -14, -14, -12 };
2108 const int coarseLow[] = { -64, -64, -64, -64, -70 };
2109 const int firpwr[] = { -78, -78, -78, -78, -80 };
2110
2111 for (i = 0; i < 5; i++) {
2112 ahp->ah_totalSizeDesired[i] = totalSizeDesired[i];
2113 ahp->ah_coarseHigh[i] = coarseHigh[i];
2114 ahp->ah_coarseLow[i] = coarseLow[i];
2115 ahp->ah_firpwr[i] = firpwr[i];
2116 }
2117}
2118
2119static void ath9k_hw_ani_detach(struct ath_hal *ah)
2120{
2121 struct ath_hal_5416 *ahp = AH5416(ah);
2122
2123 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Detaching Ani\n");
2124 if (ahp->ah_hasHwPhyCounters) {
2125 ath9k_hw_disable_mib_counters(ah);
2126 REG_WRITE(ah, AR_PHY_ERR_1, 0);
2127 REG_WRITE(ah, AR_PHY_ERR_2, 0);
2128 }
2129}
2130
2131
2132static bool ath9k_hw_ani_control(struct ath_hal *ah,
2133 enum ath9k_ani_cmd cmd, int param)
2134{
2135 struct ath_hal_5416 *ahp = AH5416(ah);
2136 struct ar5416AniState *aniState = ahp->ah_curani;
2137
2138 switch (cmd & ahp->ah_ani_function) {
2139 case ATH9K_ANI_NOISE_IMMUNITY_LEVEL:{
2140 u32 level = param;
2141
2142 if (level >= ARRAY_SIZE(ahp->ah_totalSizeDesired)) {
2143 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2144 "%s: level out of range (%u > %u)\n",
2145 __func__, level,
2146 (unsigned) ARRAY_SIZE(ahp->
2147 ah_totalSizeDesired));
2148 return false;
2149 }
2150
2151 REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ,
2152 AR_PHY_DESIRED_SZ_TOT_DES,
2153 ahp->ah_totalSizeDesired[level]);
2154 REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
2155 AR_PHY_AGC_CTL1_COARSE_LOW,
2156 ahp->ah_coarseLow[level]);
2157 REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
2158 AR_PHY_AGC_CTL1_COARSE_HIGH,
2159 ahp->ah_coarseHigh[level]);
2160 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
2161 AR_PHY_FIND_SIG_FIRPWR,
2162 ahp->ah_firpwr[level]);
2163
2164 if (level > aniState->noiseImmunityLevel)
2165 ahp->ah_stats.ast_ani_niup++;
2166 else if (level < aniState->noiseImmunityLevel)
2167 ahp->ah_stats.ast_ani_nidown++;
2168 aniState->noiseImmunityLevel = level;
2169 break;
2170 }
2171 case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
2172 const int m1ThreshLow[] = { 127, 50 };
2173 const int m2ThreshLow[] = { 127, 40 };
2174 const int m1Thresh[] = { 127, 0x4d };
2175 const int m2Thresh[] = { 127, 0x40 };
2176 const int m2CountThr[] = { 31, 16 };
2177 const int m2CountThrLow[] = { 63, 48 };
2178 u32 on = param ? 1 : 0;
2179
2180 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
2181 AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
2182 m1ThreshLow[on]);
2183 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
2184 AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
2185 m2ThreshLow[on]);
2186 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
2187 AR_PHY_SFCORR_M1_THRESH,
2188 m1Thresh[on]);
2189 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
2190 AR_PHY_SFCORR_M2_THRESH,
2191 m2Thresh[on]);
2192 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
2193 AR_PHY_SFCORR_M2COUNT_THR,
2194 m2CountThr[on]);
2195 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
2196 AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
2197 m2CountThrLow[on]);
2198
2199 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
2200 AR_PHY_SFCORR_EXT_M1_THRESH_LOW,
2201 m1ThreshLow[on]);
2202 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
2203 AR_PHY_SFCORR_EXT_M2_THRESH_LOW,
2204 m2ThreshLow[on]);
2205 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
2206 AR_PHY_SFCORR_EXT_M1_THRESH,
2207 m1Thresh[on]);
2208 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
2209 AR_PHY_SFCORR_EXT_M2_THRESH,
2210 m2Thresh[on]);
2211
2212 if (on)
2213 REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
2214 AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
2215 else
2216 REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW,
2217 AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
2218
2219 if (!on != aniState->ofdmWeakSigDetectOff) {
2220 if (on)
2221 ahp->ah_stats.ast_ani_ofdmon++;
2222 else
2223 ahp->ah_stats.ast_ani_ofdmoff++;
2224 aniState->ofdmWeakSigDetectOff = !on;
2225 }
2226 break;
2227 }
2228 case ATH9K_ANI_CCK_WEAK_SIGNAL_THR:{
2229 const int weakSigThrCck[] = { 8, 6 };
2230 u32 high = param ? 1 : 0;
2231
2232 REG_RMW_FIELD(ah, AR_PHY_CCK_DETECT,
2233 AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK,
2234 weakSigThrCck[high]);
2235 if (high != aniState->cckWeakSigThreshold) {
2236 if (high)
2237 ahp->ah_stats.ast_ani_cckhigh++;
2238 else
2239 ahp->ah_stats.ast_ani_ccklow++;
2240 aniState->cckWeakSigThreshold = high;
2241 }
2242 break;
2243 }
2244 case ATH9K_ANI_FIRSTEP_LEVEL:{
2245 const int firstep[] = { 0, 4, 8 };
2246 u32 level = param;
2247
2248 if (level >= ARRAY_SIZE(firstep)) {
2249 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2250 "%s: level out of range (%u > %u)\n",
2251 __func__, level,
2252 (unsigned) ARRAY_SIZE(firstep));
2253 return false;
2254 }
2255 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
2256 AR_PHY_FIND_SIG_FIRSTEP,
2257 firstep[level]);
2258 if (level > aniState->firstepLevel)
2259 ahp->ah_stats.ast_ani_stepup++;
2260 else if (level < aniState->firstepLevel)
2261 ahp->ah_stats.ast_ani_stepdown++;
2262 aniState->firstepLevel = level;
2263 break;
2264 }
2265 case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
2266 const int cycpwrThr1[] =
2267 { 2, 4, 6, 8, 10, 12, 14, 16 };
2268 u32 level = param;
2269
2270 if (level >= ARRAY_SIZE(cycpwrThr1)) {
2271 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2272 "%s: level out of range (%u > %u)\n",
2273 __func__, level,
2274 (unsigned)
2275 ARRAY_SIZE(cycpwrThr1));
2276 return false;
2277 }
2278 REG_RMW_FIELD(ah, AR_PHY_TIMING5,
2279 AR_PHY_TIMING5_CYCPWR_THR1,
2280 cycpwrThr1[level]);
2281 if (level > aniState->spurImmunityLevel)
2282 ahp->ah_stats.ast_ani_spurup++;
2283 else if (level < aniState->spurImmunityLevel)
2284 ahp->ah_stats.ast_ani_spurdown++;
2285 aniState->spurImmunityLevel = level;
2286 break;
2287 }
2288 case ATH9K_ANI_PRESENT:
2289 break;
2290 default:
2291 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2292 "%s: invalid cmd %u\n", __func__, cmd);
2293 return false;
2294 }
2295
2296 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "%s: ANI parameters:\n", __func__);
2297 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2298 "noiseImmunityLevel=%d, spurImmunityLevel=%d, "
2299 "ofdmWeakSigDetectOff=%d\n",
2300 aniState->noiseImmunityLevel, aniState->spurImmunityLevel,
2301 !aniState->ofdmWeakSigDetectOff);
2302 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2303 "cckWeakSigThreshold=%d, "
2304 "firstepLevel=%d, listenTime=%d\n",
2305 aniState->cckWeakSigThreshold, aniState->firstepLevel,
2306 aniState->listenTime);
2307 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2308 "cycleCount=%d, ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n",
2309 aniState->cycleCount, aniState->ofdmPhyErrCount,
2310 aniState->cckPhyErrCount);
2311 return true;
2312}
2313
2314static void ath9k_ani_restart(struct ath_hal *ah)
2315{
2316 struct ath_hal_5416 *ahp = AH5416(ah);
2317 struct ar5416AniState *aniState;
2318
2319 if (!DO_ANI(ah))
2320 return;
2321
2322 aniState = ahp->ah_curani;
2323
2324 aniState->listenTime = 0;
2325 if (ahp->ah_hasHwPhyCounters) {
2326 if (aniState->ofdmTrigHigh > AR_PHY_COUNTMAX) {
2327 aniState->ofdmPhyErrBase = 0;
2328 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2329 "OFDM Trigger is too high for hw counters\n");
2330 } else {
2331 aniState->ofdmPhyErrBase =
2332 AR_PHY_COUNTMAX - aniState->ofdmTrigHigh;
2333 }
2334 if (aniState->cckTrigHigh > AR_PHY_COUNTMAX) {
2335 aniState->cckPhyErrBase = 0;
2336 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2337 "CCK Trigger is too high for hw counters\n");
2338 } else {
2339 aniState->cckPhyErrBase =
2340 AR_PHY_COUNTMAX - aniState->cckTrigHigh;
2341 }
2342 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2343 "%s: Writing ofdmbase=%u cckbase=%u\n",
2344 __func__, aniState->ofdmPhyErrBase,
2345 aniState->cckPhyErrBase);
2346 REG_WRITE(ah, AR_PHY_ERR_1, aniState->ofdmPhyErrBase);
2347 REG_WRITE(ah, AR_PHY_ERR_2, aniState->cckPhyErrBase);
2348 REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
2349 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
2350
2351 ath9k_hw_update_mibstats(ah, &ahp->ah_mibStats);
2352 }
2353 aniState->ofdmPhyErrCount = 0;
2354 aniState->cckPhyErrCount = 0;
2355}
2356
2357static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hal *ah)
2358{
2359 struct ath_hal_5416 *ahp = AH5416(ah);
2360 struct ath9k_channel *chan = ah->ah_curchan;
2361 struct ar5416AniState *aniState;
2362 enum wireless_mode mode;
2363 int32_t rssi;
2364
2365 if (!DO_ANI(ah))
2366 return;
2367
2368 aniState = ahp->ah_curani;
2369
2370 if (aniState->noiseImmunityLevel < HAL_NOISE_IMMUNE_MAX) {
2371 if (ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL,
2372 aniState->noiseImmunityLevel + 1)) {
2373 return;
2374 }
2375 }
2376
2377 if (aniState->spurImmunityLevel < HAL_SPUR_IMMUNE_MAX) {
2378 if (ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL,
2379 aniState->spurImmunityLevel + 1)) {
2380 return;
2381 }
2382 }
2383
2384 if (ah->ah_opmode == ATH9K_M_HOSTAP) {
2385 if (aniState->firstepLevel < HAL_FIRST_STEP_MAX) {
2386 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
2387 aniState->firstepLevel + 1);
2388 }
2389 return;
2390 }
2391 rssi = BEACON_RSSI(ahp);
2392 if (rssi > aniState->rssiThrHigh) {
2393 if (!aniState->ofdmWeakSigDetectOff) {
2394 if (ath9k_hw_ani_control(ah,
2395 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
2396 false)) {
2397 ath9k_hw_ani_control(ah,
2398 ATH9K_ANI_SPUR_IMMUNITY_LEVEL,
2399 0);
2400 return;
2401 }
2402 }
2403 if (aniState->firstepLevel < HAL_FIRST_STEP_MAX) {
2404 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
2405 aniState->firstepLevel + 1);
2406 return;
2407 }
2408 } else if (rssi > aniState->rssiThrLow) {
2409 if (aniState->ofdmWeakSigDetectOff)
2410 ath9k_hw_ani_control(ah,
2411 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
2412 true);
2413 if (aniState->firstepLevel < HAL_FIRST_STEP_MAX)
2414 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
2415 aniState->firstepLevel + 1);
2416 return;
2417 } else {
2418 mode = ath9k_hw_chan2wmode(ah, chan);
2419 if (mode == ATH9K_MODE_11G || mode == ATH9K_MODE_11B) {
2420 if (!aniState->ofdmWeakSigDetectOff)
2421 ath9k_hw_ani_control(ah,
2422 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
2423 false);
2424 if (aniState->firstepLevel > 0)
2425 ath9k_hw_ani_control(ah,
2426 ATH9K_ANI_FIRSTEP_LEVEL,
2427 0);
2428 return;
2429 }
2430 }
2431}
2432
2433static void ath9k_hw_ani_cck_err_trigger(struct ath_hal *ah)
2434{
2435 struct ath_hal_5416 *ahp = AH5416(ah);
2436 struct ath9k_channel *chan = ah->ah_curchan;
2437 struct ar5416AniState *aniState;
2438 enum wireless_mode mode;
2439 int32_t rssi;
2440
2441 if (!DO_ANI(ah))
2442 return;
2443
2444 aniState = ahp->ah_curani;
2445 if (aniState->noiseImmunityLevel < HAL_NOISE_IMMUNE_MAX) {
2446 if (ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL,
2447 aniState->noiseImmunityLevel + 1)) {
2448 return;
2449 }
2450 }
2451 if (ah->ah_opmode == ATH9K_M_HOSTAP) {
2452 if (aniState->firstepLevel < HAL_FIRST_STEP_MAX) {
2453 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
2454 aniState->firstepLevel + 1);
2455 }
2456 return;
2457 }
2458 rssi = BEACON_RSSI(ahp);
2459 if (rssi > aniState->rssiThrLow) {
2460 if (aniState->firstepLevel < HAL_FIRST_STEP_MAX)
2461 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
2462 aniState->firstepLevel + 1);
2463 } else {
2464 mode = ath9k_hw_chan2wmode(ah, chan);
2465 if (mode == ATH9K_MODE_11G || mode == ATH9K_MODE_11B) {
2466 if (aniState->firstepLevel > 0)
2467 ath9k_hw_ani_control(ah,
2468 ATH9K_ANI_FIRSTEP_LEVEL,
2469 0);
2470 }
2471 }
2472}
2473
2474static void ath9k_ani_reset(struct ath_hal *ah)
2475{
2476 struct ath_hal_5416 *ahp = AH5416(ah);
2477 struct ar5416AniState *aniState;
2478 struct ath9k_channel *chan = ah->ah_curchan;
2479 int index;
2480
2481 if (!DO_ANI(ah))
2482 return;
2483
2484 index = ath9k_hw_get_ani_channel_idx(ah, chan);
2485 aniState = &ahp->ah_ani[index];
2486 ahp->ah_curani = aniState;
2487
2488 if (DO_ANI(ah) && ah->ah_opmode != ATH9K_M_STA
2489 && ah->ah_opmode != ATH9K_M_IBSS) {
2490 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2491 "%s: Reset ANI state opmode %u\n", __func__,
2492 ah->ah_opmode);
2493 ahp->ah_stats.ast_ani_reset++;
2494 ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL, 0);
2495 ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL, 0);
2496 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL, 0);
2497 ath9k_hw_ani_control(ah,
2498 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
2499 !ATH9K_ANI_USE_OFDM_WEAK_SIG);
2500 ath9k_hw_ani_control(ah, ATH9K_ANI_CCK_WEAK_SIGNAL_THR,
2501 ATH9K_ANI_CCK_WEAK_SIG_THR);
2502 ath9k_hw_setrxfilter(ah,
2503 ath9k_hw_getrxfilter(ah) |
2504 ATH9K_RX_FILTER_PHYERR);
2505 if (ah->ah_opmode == ATH9K_M_HOSTAP) {
2506 ahp->ah_curani->ofdmTrigHigh =
2507 ah->ah_config.ofdm_trig_high;
2508 ahp->ah_curani->ofdmTrigLow =
2509 ah->ah_config.ofdm_trig_low;
2510 ahp->ah_curani->cckTrigHigh =
2511 ah->ah_config.cck_trig_high;
2512 ahp->ah_curani->cckTrigLow =
2513 ah->ah_config.cck_trig_low;
2514 }
2515 ath9k_ani_restart(ah);
2516 return;
2517 }
2518
2519 if (aniState->noiseImmunityLevel != 0)
2520 ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL,
2521 aniState->noiseImmunityLevel);
2522 if (aniState->spurImmunityLevel != 0)
2523 ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL,
2524 aniState->spurImmunityLevel);
2525 if (aniState->ofdmWeakSigDetectOff)
2526 ath9k_hw_ani_control(ah,
2527 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
2528 !aniState->ofdmWeakSigDetectOff);
2529 if (aniState->cckWeakSigThreshold)
2530 ath9k_hw_ani_control(ah, ATH9K_ANI_CCK_WEAK_SIGNAL_THR,
2531 aniState->cckWeakSigThreshold);
2532 if (aniState->firstepLevel != 0)
2533 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
2534 aniState->firstepLevel);
2535 if (ahp->ah_hasHwPhyCounters) {
2536 ath9k_hw_setrxfilter(ah,
2537 ath9k_hw_getrxfilter(ah) &
2538 ~ATH9K_RX_FILTER_PHYERR);
2539 ath9k_ani_restart(ah);
2540 REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
2541 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
2542
2543 } else {
2544 ath9k_ani_restart(ah);
2545 ath9k_hw_setrxfilter(ah,
2546 ath9k_hw_getrxfilter(ah) |
2547 ATH9K_RX_FILTER_PHYERR);
2548 }
2549}
2550
2551void ath9k_hw_procmibevent(struct ath_hal *ah,
2552 const struct ath9k_node_stats *stats)
2553{
2554 struct ath_hal_5416 *ahp = AH5416(ah);
2555 u32 phyCnt1, phyCnt2;
2556
2557 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Processing Mib Intr\n");
2558
2559 REG_WRITE(ah, AR_FILT_OFDM, 0);
2560 REG_WRITE(ah, AR_FILT_CCK, 0);
2561 if (!(REG_READ(ah, AR_SLP_MIB_CTRL) & AR_SLP_MIB_PENDING))
2562 REG_WRITE(ah, AR_SLP_MIB_CTRL, AR_SLP_MIB_CLEAR);
2563
2564 ath9k_hw_update_mibstats(ah, &ahp->ah_mibStats);
2565 ahp->ah_stats.ast_nodestats = *stats;
2566
2567 if (!DO_ANI(ah))
2568 return;
2569
2570 phyCnt1 = REG_READ(ah, AR_PHY_ERR_1);
2571 phyCnt2 = REG_READ(ah, AR_PHY_ERR_2);
2572 if (((phyCnt1 & AR_MIBCNT_INTRMASK) == AR_MIBCNT_INTRMASK) ||
2573 ((phyCnt2 & AR_MIBCNT_INTRMASK) == AR_MIBCNT_INTRMASK)) {
2574 struct ar5416AniState *aniState = ahp->ah_curani;
2575 u32 ofdmPhyErrCnt, cckPhyErrCnt;
2576
2577 ofdmPhyErrCnt = phyCnt1 - aniState->ofdmPhyErrBase;
2578 ahp->ah_stats.ast_ani_ofdmerrs +=
2579 ofdmPhyErrCnt - aniState->ofdmPhyErrCount;
2580 aniState->ofdmPhyErrCount = ofdmPhyErrCnt;
2581
2582 cckPhyErrCnt = phyCnt2 - aniState->cckPhyErrBase;
2583 ahp->ah_stats.ast_ani_cckerrs +=
2584 cckPhyErrCnt - aniState->cckPhyErrCount;
2585 aniState->cckPhyErrCount = cckPhyErrCnt;
2586
2587 if (aniState->ofdmPhyErrCount > aniState->ofdmTrigHigh)
2588 ath9k_hw_ani_ofdm_err_trigger(ah);
2589 if (aniState->cckPhyErrCount > aniState->cckTrigHigh)
2590 ath9k_hw_ani_cck_err_trigger(ah);
2591
2592 ath9k_ani_restart(ah);
2593 }
2594}
2595
2596static void ath9k_hw_ani_lower_immunity(struct ath_hal *ah)
2597{
2598 struct ath_hal_5416 *ahp = AH5416(ah);
2599 struct ar5416AniState *aniState;
2600 int32_t rssi;
2601
2602 aniState = ahp->ah_curani;
2603
2604 if (ah->ah_opmode == ATH9K_M_HOSTAP) {
2605 if (aniState->firstepLevel > 0) {
2606 if (ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
2607 aniState->firstepLevel - 1)) {
2608 return;
2609 }
2610 }
2611 } else {
2612 rssi = BEACON_RSSI(ahp);
2613 if (rssi > aniState->rssiThrHigh) {
2614 /* XXX: Handle me */
2615 } else if (rssi > aniState->rssiThrLow) {
2616 if (aniState->ofdmWeakSigDetectOff) {
2617 if (ath9k_hw_ani_control(ah,
2618 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
2619 true) ==
2620 true) {
2621 return;
2622 }
2623 }
2624 if (aniState->firstepLevel > 0) {
2625 if (ath9k_hw_ani_control
2626 (ah, ATH9K_ANI_FIRSTEP_LEVEL,
2627 aniState->firstepLevel - 1) ==
2628 true) {
2629 return;
2630 }
2631 }
2632 } else {
2633 if (aniState->firstepLevel > 0) {
2634 if (ath9k_hw_ani_control
2635 (ah, ATH9K_ANI_FIRSTEP_LEVEL,
2636 aniState->firstepLevel - 1) ==
2637 true) {
2638 return;
2639 }
2640 }
2641 }
2642 }
2643
2644 if (aniState->spurImmunityLevel > 0) {
2645 if (ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL,
2646 aniState->spurImmunityLevel - 1)) {
2647 return;
2648 }
2649 }
2650
2651 if (aniState->noiseImmunityLevel > 0) {
2652 ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL,
2653 aniState->noiseImmunityLevel - 1);
2654 return;
2655 }
2656}
2657
2658static int32_t ath9k_hw_ani_get_listen_time(struct ath_hal *ah)
2659{
2660 struct ath_hal_5416 *ahp = AH5416(ah);
2661 struct ar5416AniState *aniState;
2662 u32 txFrameCount, rxFrameCount, cycleCount;
2663 int32_t listenTime;
2664
2665 txFrameCount = REG_READ(ah, AR_TFCNT);
2666 rxFrameCount = REG_READ(ah, AR_RFCNT);
2667 cycleCount = REG_READ(ah, AR_CCCNT);
2668
2669 aniState = ahp->ah_curani;
2670 if (aniState->cycleCount == 0 || aniState->cycleCount > cycleCount) {
2671
2672 listenTime = 0;
2673 ahp->ah_stats.ast_ani_lzero++;
2674 } else {
2675 int32_t ccdelta = cycleCount - aniState->cycleCount;
2676 int32_t rfdelta = rxFrameCount - aniState->rxFrameCount;
2677 int32_t tfdelta = txFrameCount - aniState->txFrameCount;
2678 listenTime = (ccdelta - rfdelta - tfdelta) / 44000;
2679 }
2680 aniState->cycleCount = cycleCount;
2681 aniState->txFrameCount = txFrameCount;
2682 aniState->rxFrameCount = rxFrameCount;
2683
2684 return listenTime;
2685}
2686
2687void ath9k_hw_ani_monitor(struct ath_hal *ah,
2688 const struct ath9k_node_stats *stats,
2689 struct ath9k_channel *chan)
2690{
2691 struct ath_hal_5416 *ahp = AH5416(ah);
2692 struct ar5416AniState *aniState;
2693 int32_t listenTime;
2694
2695 aniState = ahp->ah_curani;
2696 ahp->ah_stats.ast_nodestats = *stats;
2697
2698 listenTime = ath9k_hw_ani_get_listen_time(ah);
2699 if (listenTime < 0) {
2700 ahp->ah_stats.ast_ani_lneg++;
2701 ath9k_ani_restart(ah);
2702 return;
2703 }
2704
2705 aniState->listenTime += listenTime;
2706
2707 if (ahp->ah_hasHwPhyCounters) {
2708 u32 phyCnt1, phyCnt2;
2709 u32 ofdmPhyErrCnt, cckPhyErrCnt;
2710
2711 ath9k_hw_update_mibstats(ah, &ahp->ah_mibStats);
2712
2713 phyCnt1 = REG_READ(ah, AR_PHY_ERR_1);
2714 phyCnt2 = REG_READ(ah, AR_PHY_ERR_2);
2715
2716 if (phyCnt1 < aniState->ofdmPhyErrBase ||
2717 phyCnt2 < aniState->cckPhyErrBase) {
2718 if (phyCnt1 < aniState->ofdmPhyErrBase) {
2719 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2720 "%s: phyCnt1 0x%x, resetting "
2721 "counter value to 0x%x\n",
2722 __func__, phyCnt1,
2723 aniState->ofdmPhyErrBase);
2724 REG_WRITE(ah, AR_PHY_ERR_1,
2725 aniState->ofdmPhyErrBase);
2726 REG_WRITE(ah, AR_PHY_ERR_MASK_1,
2727 AR_PHY_ERR_OFDM_TIMING);
2728 }
2729 if (phyCnt2 < aniState->cckPhyErrBase) {
2730 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2731 "%s: phyCnt2 0x%x, resetting "
2732 "counter value to 0x%x\n",
2733 __func__, phyCnt2,
2734 aniState->cckPhyErrBase);
2735 REG_WRITE(ah, AR_PHY_ERR_2,
2736 aniState->cckPhyErrBase);
2737 REG_WRITE(ah, AR_PHY_ERR_MASK_2,
2738 AR_PHY_ERR_CCK_TIMING);
2739 }
2740 return;
2741 }
2742
2743 ofdmPhyErrCnt = phyCnt1 - aniState->ofdmPhyErrBase;
2744 ahp->ah_stats.ast_ani_ofdmerrs +=
2745 ofdmPhyErrCnt - aniState->ofdmPhyErrCount;
2746 aniState->ofdmPhyErrCount = ofdmPhyErrCnt;
2747
2748 cckPhyErrCnt = phyCnt2 - aniState->cckPhyErrBase;
2749 ahp->ah_stats.ast_ani_cckerrs +=
2750 cckPhyErrCnt - aniState->cckPhyErrCount;
2751 aniState->cckPhyErrCount = cckPhyErrCnt;
2752 }
2753
2754 if (!DO_ANI(ah))
2755 return;
2756
2757 if (aniState->listenTime > 5 * ahp->ah_aniPeriod) {
2758 if (aniState->ofdmPhyErrCount <= aniState->listenTime *
2759 aniState->ofdmTrigLow / 1000 &&
2760 aniState->cckPhyErrCount <= aniState->listenTime *
2761 aniState->cckTrigLow / 1000)
2762 ath9k_hw_ani_lower_immunity(ah);
2763 ath9k_ani_restart(ah);
2764 } else if (aniState->listenTime > ahp->ah_aniPeriod) {
2765 if (aniState->ofdmPhyErrCount > aniState->listenTime *
2766 aniState->ofdmTrigHigh / 1000) {
2767 ath9k_hw_ani_ofdm_err_trigger(ah);
2768 ath9k_ani_restart(ah);
2769 } else if (aniState->cckPhyErrCount >
2770 aniState->listenTime * aniState->cckTrigHigh /
2771 1000) {
2772 ath9k_hw_ani_cck_err_trigger(ah);
2773 ath9k_ani_restart(ah);
2774 }
2775 }
2776}
2777
2778#ifndef ATH_NF_PER_CHAN
2779static void ath9k_init_nfcal_hist_buffer(struct ath_hal *ah)
2780{
2781 int i, j;
2782
2783 for (i = 0; i < NUM_NF_READINGS; i++) {
2784 ah->nfCalHist[i].currIndex = 0;
2785 ah->nfCalHist[i].privNF = AR_PHY_CCA_MAX_GOOD_VALUE;
2786 ah->nfCalHist[i].invalidNFcount =
2787 AR_PHY_CCA_FILTERWINDOW_LENGTH;
2788 for (j = 0; j < ATH9K_NF_CAL_HIST_MAX; j++) {
2789 ah->nfCalHist[i].nfCalBuffer[j] =
2790 AR_PHY_CCA_MAX_GOOD_VALUE;
2791 }
2792 }
2793 return;
2794}
2795#endif
2796
2797static void ath9k_hw_gpio_cfg_output_mux(struct ath_hal *ah,
2798 u32 gpio, u32 type)
2799{
2800 int addr;
2801 u32 gpio_shift, tmp;
2802
2803 if (gpio > 11)
2804 addr = AR_GPIO_OUTPUT_MUX3;
2805 else if (gpio > 5)
2806 addr = AR_GPIO_OUTPUT_MUX2;
2807 else
2808 addr = AR_GPIO_OUTPUT_MUX1;
2809
2810 gpio_shift = (gpio % 6) * 5;
2811
2812 if (AR_SREV_9280_20_OR_LATER(ah)
2813 || (addr != AR_GPIO_OUTPUT_MUX1)) {
2814 REG_RMW(ah, addr, (type << gpio_shift),
2815 (0x1f << gpio_shift));
2816 } else {
2817 tmp = REG_READ(ah, addr);
2818 tmp = ((tmp & 0x1F0) << 1) | (tmp & ~0x1F0);
2819 tmp &= ~(0x1f << gpio_shift);
2820 tmp |= (type << gpio_shift);
2821 REG_WRITE(ah, addr, tmp);
2822 }
2823}
2824
2825static bool ath9k_hw_cfg_output(struct ath_hal *ah, u32 gpio,
2826 enum ath9k_gpio_output_mux_type
2827 halSignalType)
2828{
2829 u32 ah_signal_type;
2830 u32 gpio_shift;
2831
2832 static u32 MuxSignalConversionTable[] = {
2833
2834 AR_GPIO_OUTPUT_MUX_AS_OUTPUT,
2835
2836 AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED,
2837
2838 AR_GPIO_OUTPUT_MUX_AS_PCIE_POWER_LED,
2839
2840 AR_GPIO_OUTPUT_MUX_AS_MAC_NETWORK_LED,
2841
2842 AR_GPIO_OUTPUT_MUX_AS_MAC_POWER_LED,
2843 };
2844
2845 if ((halSignalType >= 0)
2846 && (halSignalType < ARRAY_SIZE(MuxSignalConversionTable)))
2847 ah_signal_type = MuxSignalConversionTable[halSignalType];
2848 else
2849 return false;
2850
2851 ath9k_hw_gpio_cfg_output_mux(ah, gpio, ah_signal_type);
2852
2853 gpio_shift = 2 * gpio;
2854
2855 REG_RMW(ah,
2856 AR_GPIO_OE_OUT,
2857 (AR_GPIO_OE_OUT_DRV_ALL << gpio_shift),
2858 (AR_GPIO_OE_OUT_DRV << gpio_shift));
2859
2860 return true;
2861}
2862
2863static bool ath9k_hw_set_gpio(struct ath_hal *ah, u32 gpio,
2864 u32 val)
2865{
2866 REG_RMW(ah, AR_GPIO_IN_OUT, ((val & 1) << gpio),
2867 AR_GPIO_BIT(gpio));
2868 return true;
2869}
2870
2871static u32 ath9k_hw_gpio_get(struct ath_hal *ah, u32 gpio)
2872{
2873 if (gpio >= ah->ah_caps.num_gpio_pins)
2874 return 0xffffffff;
2875
2876 if (AR_SREV_9280_10_OR_LATER(ah)) {
2877 return (MS
2878 (REG_READ(ah, AR_GPIO_IN_OUT),
2879 AR928X_GPIO_IN_VAL) & AR_GPIO_BIT(gpio)) != 0;
2880 } else {
2881 return (MS(REG_READ(ah, AR_GPIO_IN_OUT), AR_GPIO_IN_VAL) &
2882 AR_GPIO_BIT(gpio)) != 0;
2883 }
2884}
2885
2886static inline int ath9k_hw_post_attach(struct ath_hal *ah)
2887{
2888 int ecode;
2889
2890 if (!ath9k_hw_chip_test(ah)) {
2891 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
2892 "%s: hardware self-test failed\n", __func__);
2893 return -ENODEV;
2894 }
2895
2896 ecode = ath9k_hw_rf_claim(ah);
2897 if (ecode != 0)
2898 return ecode;
2899
2900 ecode = ath9k_hw_eeprom_attach(ah);
2901 if (ecode != 0)
2902 return ecode;
2903 ecode = ath9k_hw_rfattach(ah);
2904 if (ecode != 0)
2905 return ecode;
2906
2907 if (!AR_SREV_9100(ah)) {
2908 ath9k_hw_ani_setup(ah);
2909 ath9k_hw_ani_attach(ah);
2910 }
2911 return 0;
2912}
2913
2914static u32 ath9k_hw_ini_fixup(struct ath_hal *ah,
2915 struct ar5416_eeprom *pEepData,
2916 u32 reg, u32 value)
2917{
2918 struct base_eep_header *pBase = &(pEepData->baseEepHeader);
2919
2920 switch (ah->ah_devid) {
2921 case AR9280_DEVID_PCI:
2922 if (reg == 0x7894) {
2923 DPRINTF(ah->ah_sc, ATH_DBG_ANY,
2924 "ini VAL: %x EEPROM: %x\n", value,
2925 (pBase->version & 0xff));
2926
2927 if ((pBase->version & 0xff) > 0x0a) {
2928 DPRINTF(ah->ah_sc, ATH_DBG_ANY,
2929 "PWDCLKIND: %d\n",
2930 pBase->pwdclkind);
2931 value &= ~AR_AN_TOP2_PWDCLKIND;
2932 value |= AR_AN_TOP2_PWDCLKIND & (pBase->
2933 pwdclkind << AR_AN_TOP2_PWDCLKIND_S);
2934 } else {
2935 DPRINTF(ah->ah_sc, ATH_DBG_ANY,
2936 "PWDCLKIND Earlier Rev\n");
2937 }
2938
2939 DPRINTF(ah->ah_sc, ATH_DBG_ANY,
2940 "final ini VAL: %x\n", value);
2941 }
2942 break;
2943 }
2944 return value;
2945}
2946
2947static bool ath9k_hw_fill_cap_info(struct ath_hal *ah)
2948{
2949 struct ath_hal_5416 *ahp = AH5416(ah);
2950 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
2951 u16 capField = 0, eeval;
2952
2953 eeval = ath9k_hw_get_eeprom(ahp, EEP_REG_0);
2954
2955 ah->ah_currentRD = eeval;
2956
2957 eeval = ath9k_hw_get_eeprom(ahp, EEP_REG_1);
2958 ah->ah_currentRDExt = eeval;
2959
2960 capField = ath9k_hw_get_eeprom(ahp, EEP_OP_CAP);
2961
2962 if (ah->ah_opmode != ATH9K_M_HOSTAP &&
2963 ah->ah_subvendorid == AR_SUBVENDOR_ID_NEW_A) {
2964 if (ah->ah_currentRD == 0x64 || ah->ah_currentRD == 0x65)
2965 ah->ah_currentRD += 5;
2966 else if (ah->ah_currentRD == 0x41)
2967 ah->ah_currentRD = 0x43;
2968 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
2969 "%s: regdomain mapped to 0x%x\n", __func__,
2970 ah->ah_currentRD);
2971 }
2972
2973 eeval = ath9k_hw_get_eeprom(ahp, EEP_OP_MODE);
2974 bitmap_zero(pCap->wireless_modes, ATH9K_MODE_MAX);
2975
2976 if (eeval & AR5416_OPFLAGS_11A) {
2977 set_bit(ATH9K_MODE_11A, pCap->wireless_modes);
2978 if (ah->ah_config.ht_enable) {
2979 if (!(eeval & AR5416_OPFLAGS_N_5G_HT20))
2980 set_bit(ATH9K_MODE_11NA_HT20,
2981 pCap->wireless_modes);
2982 if (!(eeval & AR5416_OPFLAGS_N_5G_HT40)) {
2983 set_bit(ATH9K_MODE_11NA_HT40PLUS,
2984 pCap->wireless_modes);
2985 set_bit(ATH9K_MODE_11NA_HT40MINUS,
2986 pCap->wireless_modes);
2987 }
2988 }
2989 }
2990
2991 if (eeval & AR5416_OPFLAGS_11G) {
2992 set_bit(ATH9K_MODE_11B, pCap->wireless_modes);
2993 set_bit(ATH9K_MODE_11G, pCap->wireless_modes);
2994 if (ah->ah_config.ht_enable) {
2995 if (!(eeval & AR5416_OPFLAGS_N_2G_HT20))
2996 set_bit(ATH9K_MODE_11NG_HT20,
2997 pCap->wireless_modes);
2998 if (!(eeval & AR5416_OPFLAGS_N_2G_HT40)) {
2999 set_bit(ATH9K_MODE_11NG_HT40PLUS,
3000 pCap->wireless_modes);
3001 set_bit(ATH9K_MODE_11NG_HT40MINUS,
3002 pCap->wireless_modes);
3003 }
3004 }
3005 }
3006
3007 pCap->tx_chainmask = ath9k_hw_get_eeprom(ahp, EEP_TX_MASK);
3008 if ((ah->ah_isPciExpress)
3009 || (eeval & AR5416_OPFLAGS_11A)) {
3010 pCap->rx_chainmask =
3011 ath9k_hw_get_eeprom(ahp, EEP_RX_MASK);
3012 } else {
3013 pCap->rx_chainmask =
3014 (ath9k_hw_gpio_get(ah, 0)) ? 0x5 : 0x7;
3015 }
3016
3017 if (!(AR_SREV_9280(ah) && (ah->ah_macRev == 0)))
3018 ahp->ah_miscMode |= AR_PCU_MIC_NEW_LOC_ENA;
3019
3020 pCap->low_2ghz_chan = 2312;
3021 pCap->high_2ghz_chan = 2732;
3022
3023 pCap->low_5ghz_chan = 4920;
3024 pCap->high_5ghz_chan = 6100;
3025
3026 pCap->hw_caps &= ~ATH9K_HW_CAP_CIPHER_CKIP;
3027 pCap->hw_caps |= ATH9K_HW_CAP_CIPHER_TKIP;
3028 pCap->hw_caps |= ATH9K_HW_CAP_CIPHER_AESCCM;
3029
3030 pCap->hw_caps &= ~ATH9K_HW_CAP_MIC_CKIP;
3031 pCap->hw_caps |= ATH9K_HW_CAP_MIC_TKIP;
3032 pCap->hw_caps |= ATH9K_HW_CAP_MIC_AESCCM;
3033
3034 pCap->hw_caps |= ATH9K_HW_CAP_CHAN_SPREAD;
3035
3036 if (ah->ah_config.ht_enable)
3037 pCap->hw_caps |= ATH9K_HW_CAP_HT;
3038 else
3039 pCap->hw_caps &= ~ATH9K_HW_CAP_HT;
3040
3041 pCap->hw_caps |= ATH9K_HW_CAP_GTT;
3042 pCap->hw_caps |= ATH9K_HW_CAP_VEOL;
3043 pCap->hw_caps |= ATH9K_HW_CAP_BSSIDMASK;
3044 pCap->hw_caps &= ~ATH9K_HW_CAP_MCAST_KEYSEARCH;
3045
3046 if (capField & AR_EEPROM_EEPCAP_MAXQCU)
3047 pCap->total_queues =
3048 MS(capField, AR_EEPROM_EEPCAP_MAXQCU);
3049 else
3050 pCap->total_queues = ATH9K_NUM_TX_QUEUES;
3051
3052 if (capField & AR_EEPROM_EEPCAP_KC_ENTRIES)
3053 pCap->keycache_size =
3054 1 << MS(capField, AR_EEPROM_EEPCAP_KC_ENTRIES);
3055 else
3056 pCap->keycache_size = AR_KEYTABLE_SIZE;
3057
3058 pCap->hw_caps |= ATH9K_HW_CAP_FASTCC;
3059 pCap->num_mr_retries = 4;
3060 pCap->tx_triglevel_max = MAX_TX_FIFO_THRESHOLD;
3061
3062 if (AR_SREV_9280_10_OR_LATER(ah))
3063 pCap->num_gpio_pins = AR928X_NUM_GPIO;
3064 else
3065 pCap->num_gpio_pins = AR_NUM_GPIO;
3066
3067 if (AR_SREV_9280_10_OR_LATER(ah)) {
3068 pCap->hw_caps |= ATH9K_HW_CAP_WOW;
3069 pCap->hw_caps |= ATH9K_HW_CAP_WOW_MATCHPATTERN_EXACT;
3070 } else {
3071 pCap->hw_caps &= ~ATH9K_HW_CAP_WOW;
3072 pCap->hw_caps &= ~ATH9K_HW_CAP_WOW_MATCHPATTERN_EXACT;
3073 }
3074
3075 if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah)) {
3076 pCap->hw_caps |= ATH9K_HW_CAP_CST;
3077 pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX;
3078 } else {
3079 pCap->rts_aggr_limit = (8 * 1024);
3080 }
3081
3082 pCap->hw_caps |= ATH9K_HW_CAP_ENHANCEDPM;
3083
3084 ah->ah_rfsilent = ath9k_hw_get_eeprom(ahp, EEP_RF_SILENT);
3085 if (ah->ah_rfsilent & EEP_RFSILENT_ENABLED) {
3086 ahp->ah_gpioSelect =
3087 MS(ah->ah_rfsilent, EEP_RFSILENT_GPIO_SEL);
3088 ahp->ah_polarity =
3089 MS(ah->ah_rfsilent, EEP_RFSILENT_POLARITY);
3090
3091 ath9k_hw_setcapability(ah, ATH9K_CAP_RFSILENT, 1, true,
3092 NULL);
3093 pCap->hw_caps |= ATH9K_HW_CAP_RFSILENT;
3094 }
3095
3096 if ((ah->ah_macVersion == AR_SREV_VERSION_5416_PCI) ||
3097 (ah->ah_macVersion == AR_SREV_VERSION_5416_PCIE) ||
3098 (ah->ah_macVersion == AR_SREV_VERSION_9160) ||
3099 (ah->ah_macVersion == AR_SREV_VERSION_9100) ||
3100 (ah->ah_macVersion == AR_SREV_VERSION_9280))
3101 pCap->hw_caps &= ~ATH9K_HW_CAP_AUTOSLEEP;
3102 else
3103 pCap->hw_caps |= ATH9K_HW_CAP_AUTOSLEEP;
3104
3105 if (AR_SREV_9280(ah))
3106 pCap->hw_caps &= ~ATH9K_HW_CAP_4KB_SPLITTRANS;
3107 else
3108 pCap->hw_caps |= ATH9K_HW_CAP_4KB_SPLITTRANS;
3109
3110 if (ah->ah_currentRDExt & (1 << REG_EXT_JAPAN_MIDBAND)) {
3111 pCap->reg_cap =
3112 AR_EEPROM_EEREGCAP_EN_KK_NEW_11A |
3113 AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN |
3114 AR_EEPROM_EEREGCAP_EN_KK_U2 |
3115 AR_EEPROM_EEREGCAP_EN_KK_MIDBAND;
3116 } else {
3117 pCap->reg_cap =
3118 AR_EEPROM_EEREGCAP_EN_KK_NEW_11A |
3119 AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN;
3120 }
3121
3122 pCap->reg_cap |= AR_EEPROM_EEREGCAP_EN_FCC_MIDBAND;
3123
3124 pCap->num_antcfg_5ghz =
3125 ath9k_hw_get_num_ant_config(ahp, IEEE80211_BAND_5GHZ);
3126 pCap->num_antcfg_2ghz =
3127 ath9k_hw_get_num_ant_config(ahp, IEEE80211_BAND_2GHZ);
3128
3129 return true;
3130}
3131
3132static void ar5416DisablePciePhy(struct ath_hal *ah)
3133{
3134 if (!AR_SREV_9100(ah))
3135 return;
3136
3137 REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00);
3138 REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
3139 REG_WRITE(ah, AR_PCIE_SERDES, 0x28000029);
3140 REG_WRITE(ah, AR_PCIE_SERDES, 0x57160824);
3141 REG_WRITE(ah, AR_PCIE_SERDES, 0x25980579);
3142 REG_WRITE(ah, AR_PCIE_SERDES, 0x00000000);
3143 REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
3144 REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
3145 REG_WRITE(ah, AR_PCIE_SERDES, 0x000e1007);
3146
3147 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
3148}
3149
3150static void ath9k_set_power_sleep(struct ath_hal *ah, int setChip)
3151{
3152 REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
3153 if (setChip) {
3154 REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE,
3155 AR_RTC_FORCE_WAKE_EN);
3156 if (!AR_SREV_9100(ah))
3157 REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF);
3158
3159 REG_CLR_BIT(ah, (u16) (AR_RTC_RESET),
3160 AR_RTC_RESET_EN);
3161 }
3162}
3163
3164static void ath9k_set_power_network_sleep(struct ath_hal *ah, int setChip)
3165{
3166 REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
3167 if (setChip) {
3168 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
3169
3170 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
3171 REG_WRITE(ah, AR_RTC_FORCE_WAKE,
3172 AR_RTC_FORCE_WAKE_ON_INT);
3173 } else {
3174 REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE,
3175 AR_RTC_FORCE_WAKE_EN);
3176 }
3177 }
3178}
3179
3180static bool ath9k_hw_set_power_awake(struct ath_hal *ah,
3181 int setChip)
3182{
3183 u32 val;
3184 int i;
3185
3186 if (setChip) {
3187 if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M) ==
3188 AR_RTC_STATUS_SHUTDOWN) {
3189 if (ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)
3190 != true) {
3191 return false;
3192 }
3193 }
3194 if (AR_SREV_9100(ah))
3195 REG_SET_BIT(ah, AR_RTC_RESET,
3196 AR_RTC_RESET_EN);
3197
3198 REG_SET_BIT(ah, AR_RTC_FORCE_WAKE,
3199 AR_RTC_FORCE_WAKE_EN);
3200 udelay(50);
3201
3202 for (i = POWER_UP_TIME / 50; i > 0; i--) {
3203 val = REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M;
3204 if (val == AR_RTC_STATUS_ON)
3205 break;
3206 udelay(50);
3207 REG_SET_BIT(ah, AR_RTC_FORCE_WAKE,
3208 AR_RTC_FORCE_WAKE_EN);
3209 }
3210 if (i == 0) {
3211 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
3212 "%s: Failed to wakeup in %uus\n",
3213 __func__, POWER_UP_TIME / 20);
3214 return false;
3215 }
3216 }
3217
3218 REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
3219 return true;
3220}
3221
3222bool ath9k_hw_setpower(struct ath_hal *ah,
3223 enum ath9k_power_mode mode)
3224{
3225 struct ath_hal_5416 *ahp = AH5416(ah);
3226 static const char *modes[] = {
3227 "AWAKE",
3228 "FULL-SLEEP",
3229 "NETWORK SLEEP",
3230 "UNDEFINED"
3231 };
3232 int status = true, setChip = true;
3233
3234 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT, "%s: %s -> %s (%s)\n", __func__,
3235 modes[ahp->ah_powerMode], modes[mode],
3236 setChip ? "set chip " : "");
3237
3238 switch (mode) {
3239 case ATH9K_PM_AWAKE:
3240 status = ath9k_hw_set_power_awake(ah, setChip);
3241 break;
3242 case ATH9K_PM_FULL_SLEEP:
3243 ath9k_set_power_sleep(ah, setChip);
3244 ahp->ah_chipFullSleep = true;
3245 break;
3246 case ATH9K_PM_NETWORK_SLEEP:
3247 ath9k_set_power_network_sleep(ah, setChip);
3248 break;
3249 default:
3250 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
3251 "%s: unknown power mode %u\n", __func__, mode);
3252 return false;
3253 }
3254 ahp->ah_powerMode = mode;
3255 return status;
3256}
3257
3258static struct ath_hal *ath9k_hw_do_attach(u16 devid,
3259 struct ath_softc *sc,
3260 void __iomem *mem,
3261 int *status)
3262{
3263 struct ath_hal_5416 *ahp;
3264 struct ath_hal *ah;
3265 int ecode;
3266#ifndef CONFIG_SLOW_ANT_DIV
3267 u32 i;
3268 u32 j;
3269#endif
3270
3271 ahp = ath9k_hw_newstate(devid, sc, mem, status);
3272 if (ahp == NULL)
3273 return NULL;
3274
3275 ah = &ahp->ah;
3276
3277 ath9k_hw_set_defaults(ah);
3278
3279 if (ah->ah_config.intr_mitigation != 0)
3280 ahp->ah_intrMitigation = true;
3281
3282 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) {
3283 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "%s: couldn't reset chip\n",
3284 __func__);
3285 ecode = -EIO;
3286 goto bad;
3287 }
3288
3289 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) {
3290 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "%s: couldn't wakeup chip\n",
3291 __func__);
3292 ecode = -EIO;
3293 goto bad;
3294 }
3295
3296 if (ah->ah_config.serialize_regmode == SER_REG_MODE_AUTO) {
3297 if (ah->ah_macVersion == AR_SREV_VERSION_5416_PCI) {
3298 ah->ah_config.serialize_regmode =
3299 SER_REG_MODE_ON;
3300 } else {
3301 ah->ah_config.serialize_regmode =
3302 SER_REG_MODE_OFF;
3303 }
3304 }
3305 DPRINTF(ah->ah_sc, ATH_DBG_RESET,
3306 "%s: serialize_regmode is %d\n",
3307 __func__, ah->ah_config.serialize_regmode);
3308
3309 if ((ah->ah_macVersion != AR_SREV_VERSION_5416_PCI) &&
3310 (ah->ah_macVersion != AR_SREV_VERSION_5416_PCIE) &&
3311 (ah->ah_macVersion != AR_SREV_VERSION_9160) &&
3312 (!AR_SREV_9100(ah)) && (!AR_SREV_9280(ah))) {
3313 DPRINTF(ah->ah_sc, ATH_DBG_RESET,
3314 "%s: Mac Chip Rev 0x%02x.%x is not supported by "
3315 "this driver\n", __func__,
3316 ah->ah_macVersion, ah->ah_macRev);
3317 ecode = -EOPNOTSUPP;
3318 goto bad;
3319 }
3320
3321 if (AR_SREV_9100(ah)) {
3322 ahp->ah_iqCalData.calData = &iq_cal_multi_sample;
3323 ahp->ah_suppCals = IQ_MISMATCH_CAL;
3324 ah->ah_isPciExpress = false;
3325 }
3326 ah->ah_phyRev = REG_READ(ah, AR_PHY_CHIP_ID);
3327
3328 if (AR_SREV_9160_10_OR_LATER(ah)) {
3329 if (AR_SREV_9280_10_OR_LATER(ah)) {
3330 ahp->ah_iqCalData.calData = &iq_cal_single_sample;
3331 ahp->ah_adcGainCalData.calData =
3332 &adc_gain_cal_single_sample;
3333 ahp->ah_adcDcCalData.calData =
3334 &adc_dc_cal_single_sample;
3335 ahp->ah_adcDcCalInitData.calData =
3336 &adc_init_dc_cal;
3337 } else {
3338 ahp->ah_iqCalData.calData = &iq_cal_multi_sample;
3339 ahp->ah_adcGainCalData.calData =
3340 &adc_gain_cal_multi_sample;
3341 ahp->ah_adcDcCalData.calData =
3342 &adc_dc_cal_multi_sample;
3343 ahp->ah_adcDcCalInitData.calData =
3344 &adc_init_dc_cal;
3345 }
3346 ahp->ah_suppCals =
3347 ADC_GAIN_CAL | ADC_DC_CAL | IQ_MISMATCH_CAL;
3348 }
3349
3350 if (AR_SREV_9160(ah)) {
3351 ah->ah_config.enable_ani = 1;
3352 ahp->ah_ani_function = (ATH9K_ANI_SPUR_IMMUNITY_LEVEL |
3353 ATH9K_ANI_FIRSTEP_LEVEL);
3354 } else {
3355 ahp->ah_ani_function = ATH9K_ANI_ALL;
3356 if (AR_SREV_9280_10_OR_LATER(ah)) {
3357 ahp->ah_ani_function &=
3358 ~ATH9K_ANI_NOISE_IMMUNITY_LEVEL;
3359 }
3360 }
3361
3362 DPRINTF(ah->ah_sc, ATH_DBG_RESET,
3363 "%s: This Mac Chip Rev 0x%02x.%x is \n", __func__,
3364 ah->ah_macVersion, ah->ah_macRev);
3365
3366 if (AR_SREV_9280_20_OR_LATER(ah)) {
3367 INIT_INI_ARRAY(&ahp->ah_iniModes, ar9280Modes_9280_2,
3368 ARRAY_SIZE(ar9280Modes_9280_2), 6);
3369 INIT_INI_ARRAY(&ahp->ah_iniCommon, ar9280Common_9280_2,
3370 ARRAY_SIZE(ar9280Common_9280_2), 2);
3371
3372 if (ah->ah_config.pcie_clock_req) {
3373 INIT_INI_ARRAY(&ahp->ah_iniPcieSerdes,
3374 ar9280PciePhy_clkreq_off_L1_9280,
3375 ARRAY_SIZE
3376 (ar9280PciePhy_clkreq_off_L1_9280),
3377 2);
3378 } else {
3379 INIT_INI_ARRAY(&ahp->ah_iniPcieSerdes,
3380 ar9280PciePhy_clkreq_always_on_L1_9280,
3381 ARRAY_SIZE
3382 (ar9280PciePhy_clkreq_always_on_L1_9280),
3383 2);
3384 }
3385 INIT_INI_ARRAY(&ahp->ah_iniModesAdditional,
3386 ar9280Modes_fast_clock_9280_2,
3387 ARRAY_SIZE(ar9280Modes_fast_clock_9280_2),
3388 3);
3389 } else if (AR_SREV_9280_10_OR_LATER(ah)) {
3390 INIT_INI_ARRAY(&ahp->ah_iniModes, ar9280Modes_9280,
3391 ARRAY_SIZE(ar9280Modes_9280), 6);
3392 INIT_INI_ARRAY(&ahp->ah_iniCommon, ar9280Common_9280,
3393 ARRAY_SIZE(ar9280Common_9280), 2);
3394 } else if (AR_SREV_9160_10_OR_LATER(ah)) {
3395 INIT_INI_ARRAY(&ahp->ah_iniModes, ar5416Modes_9160,
3396 ARRAY_SIZE(ar5416Modes_9160), 6);
3397 INIT_INI_ARRAY(&ahp->ah_iniCommon, ar5416Common_9160,
3398 ARRAY_SIZE(ar5416Common_9160), 2);
3399 INIT_INI_ARRAY(&ahp->ah_iniBank0, ar5416Bank0_9160,
3400 ARRAY_SIZE(ar5416Bank0_9160), 2);
3401 INIT_INI_ARRAY(&ahp->ah_iniBB_RfGain, ar5416BB_RfGain_9160,
3402 ARRAY_SIZE(ar5416BB_RfGain_9160), 3);
3403 INIT_INI_ARRAY(&ahp->ah_iniBank1, ar5416Bank1_9160,
3404 ARRAY_SIZE(ar5416Bank1_9160), 2);
3405 INIT_INI_ARRAY(&ahp->ah_iniBank2, ar5416Bank2_9160,
3406 ARRAY_SIZE(ar5416Bank2_9160), 2);
3407 INIT_INI_ARRAY(&ahp->ah_iniBank3, ar5416Bank3_9160,
3408 ARRAY_SIZE(ar5416Bank3_9160), 3);
3409 INIT_INI_ARRAY(&ahp->ah_iniBank6, ar5416Bank6_9160,
3410 ARRAY_SIZE(ar5416Bank6_9160), 3);
3411 INIT_INI_ARRAY(&ahp->ah_iniBank6TPC, ar5416Bank6TPC_9160,
3412 ARRAY_SIZE(ar5416Bank6TPC_9160), 3);
3413 INIT_INI_ARRAY(&ahp->ah_iniBank7, ar5416Bank7_9160,
3414 ARRAY_SIZE(ar5416Bank7_9160), 2);
3415 if (AR_SREV_9160_11(ah)) {
3416 INIT_INI_ARRAY(&ahp->ah_iniAddac,
3417 ar5416Addac_91601_1,
3418 ARRAY_SIZE(ar5416Addac_91601_1), 2);
3419 } else {
3420 INIT_INI_ARRAY(&ahp->ah_iniAddac, ar5416Addac_9160,
3421 ARRAY_SIZE(ar5416Addac_9160), 2);
3422 }
3423 } else if (AR_SREV_9100_OR_LATER(ah)) {
3424 INIT_INI_ARRAY(&ahp->ah_iniModes, ar5416Modes_9100,
3425 ARRAY_SIZE(ar5416Modes_9100), 6);
3426 INIT_INI_ARRAY(&ahp->ah_iniCommon, ar5416Common_9100,
3427 ARRAY_SIZE(ar5416Common_9100), 2);
3428 INIT_INI_ARRAY(&ahp->ah_iniBank0, ar5416Bank0_9100,
3429 ARRAY_SIZE(ar5416Bank0_9100), 2);
3430 INIT_INI_ARRAY(&ahp->ah_iniBB_RfGain, ar5416BB_RfGain_9100,
3431 ARRAY_SIZE(ar5416BB_RfGain_9100), 3);
3432 INIT_INI_ARRAY(&ahp->ah_iniBank1, ar5416Bank1_9100,
3433 ARRAY_SIZE(ar5416Bank1_9100), 2);
3434 INIT_INI_ARRAY(&ahp->ah_iniBank2, ar5416Bank2_9100,
3435 ARRAY_SIZE(ar5416Bank2_9100), 2);
3436 INIT_INI_ARRAY(&ahp->ah_iniBank3, ar5416Bank3_9100,
3437 ARRAY_SIZE(ar5416Bank3_9100), 3);
3438 INIT_INI_ARRAY(&ahp->ah_iniBank6, ar5416Bank6_9100,
3439 ARRAY_SIZE(ar5416Bank6_9100), 3);
3440 INIT_INI_ARRAY(&ahp->ah_iniBank6TPC, ar5416Bank6TPC_9100,
3441 ARRAY_SIZE(ar5416Bank6TPC_9100), 3);
3442 INIT_INI_ARRAY(&ahp->ah_iniBank7, ar5416Bank7_9100,
3443 ARRAY_SIZE(ar5416Bank7_9100), 2);
3444 INIT_INI_ARRAY(&ahp->ah_iniAddac, ar5416Addac_9100,
3445 ARRAY_SIZE(ar5416Addac_9100), 2);
3446 } else {
3447 INIT_INI_ARRAY(&ahp->ah_iniModes, ar5416Modes,
3448 ARRAY_SIZE(ar5416Modes), 6);
3449 INIT_INI_ARRAY(&ahp->ah_iniCommon, ar5416Common,
3450 ARRAY_SIZE(ar5416Common), 2);
3451 INIT_INI_ARRAY(&ahp->ah_iniBank0, ar5416Bank0,
3452 ARRAY_SIZE(ar5416Bank0), 2);
3453 INIT_INI_ARRAY(&ahp->ah_iniBB_RfGain, ar5416BB_RfGain,
3454 ARRAY_SIZE(ar5416BB_RfGain), 3);
3455 INIT_INI_ARRAY(&ahp->ah_iniBank1, ar5416Bank1,
3456 ARRAY_SIZE(ar5416Bank1), 2);
3457 INIT_INI_ARRAY(&ahp->ah_iniBank2, ar5416Bank2,
3458 ARRAY_SIZE(ar5416Bank2), 2);
3459 INIT_INI_ARRAY(&ahp->ah_iniBank3, ar5416Bank3,
3460 ARRAY_SIZE(ar5416Bank3), 3);
3461 INIT_INI_ARRAY(&ahp->ah_iniBank6, ar5416Bank6,
3462 ARRAY_SIZE(ar5416Bank6), 3);
3463 INIT_INI_ARRAY(&ahp->ah_iniBank6TPC, ar5416Bank6TPC,
3464 ARRAY_SIZE(ar5416Bank6TPC), 3);
3465 INIT_INI_ARRAY(&ahp->ah_iniBank7, ar5416Bank7,
3466 ARRAY_SIZE(ar5416Bank7), 2);
3467 INIT_INI_ARRAY(&ahp->ah_iniAddac, ar5416Addac,
3468 ARRAY_SIZE(ar5416Addac), 2);
3469 }
3470
3471 if (ah->ah_isPciExpress)
3472 ath9k_hw_configpcipowersave(ah, 0);
3473 else
3474 ar5416DisablePciePhy(ah);
3475
3476 ecode = ath9k_hw_post_attach(ah);
3477 if (ecode != 0)
3478 goto bad;
3479
3480#ifndef CONFIG_SLOW_ANT_DIV
3481 if (ah->ah_devid == AR9280_DEVID_PCI) {
3482 for (i = 0; i < ahp->ah_iniModes.ia_rows; i++) {
3483 u32 reg = INI_RA(&ahp->ah_iniModes, i, 0);
3484
3485 for (j = 1; j < ahp->ah_iniModes.ia_columns; j++) {
3486 u32 val = INI_RA(&ahp->ah_iniModes, i, j);
3487
3488 INI_RA(&ahp->ah_iniModes, i, j) =
3489 ath9k_hw_ini_fixup(ah, &ahp->ah_eeprom,
3490 reg, val);
3491 }
3492 }
3493 }
3494#endif
3495
3496 if (!ath9k_hw_fill_cap_info(ah)) {
3497 DPRINTF(ah->ah_sc, ATH_DBG_RESET,
3498 "%s:failed ath9k_hw_fill_cap_info\n", __func__);
3499 ecode = -EINVAL;
3500 goto bad;
3501 }
3502
3503 ecode = ath9k_hw_init_macaddr(ah);
3504 if (ecode != 0) {
3505 DPRINTF(ah->ah_sc, ATH_DBG_RESET,
3506 "%s: failed initializing mac address\n",
3507 __func__);
3508 goto bad;
3509 }
3510
3511 if (AR_SREV_9285(ah))
3512 ah->ah_txTrigLevel = (AR_FTRIG_256B >> AR_FTRIG_S);
3513 else
3514 ah->ah_txTrigLevel = (AR_FTRIG_512B >> AR_FTRIG_S);
3515
3516#ifndef ATH_NF_PER_CHAN
3517
3518 ath9k_init_nfcal_hist_buffer(ah);
3519#endif
3520
3521 return ah;
3522
3523bad:
3524 if (ahp)
3525 ath9k_hw_detach((struct ath_hal *) ahp);
3526 if (status)
3527 *status = ecode;
3528 return NULL;
3529}
3530
3531void ath9k_hw_detach(struct ath_hal *ah)
3532{
3533 if (!AR_SREV_9100(ah))
3534 ath9k_hw_ani_detach(ah);
3535 ath9k_hw_rfdetach(ah);
3536
3537 ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP);
3538 kfree(ah);
3539}
3540
3541bool ath9k_get_channel_edges(struct ath_hal *ah,
3542 u16 flags, u16 *low,
3543 u16 *high)
3544{
3545 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
3546
3547 if (flags & CHANNEL_5GHZ) {
3548 *low = pCap->low_5ghz_chan;
3549 *high = pCap->high_5ghz_chan;
3550 return true;
3551 }
3552 if ((flags & CHANNEL_2GHZ)) {
3553 *low = pCap->low_2ghz_chan;
3554 *high = pCap->high_2ghz_chan;
3555
3556 return true;
3557 }
3558 return false;
3559}
3560
3561static inline bool ath9k_hw_fill_vpd_table(u8 pwrMin,
3562 u8 pwrMax,
3563 u8 *pPwrList,
3564 u8 *pVpdList,
3565 u16
3566 numIntercepts,
3567 u8 *pRetVpdList)
3568{
3569 u16 i, k;
3570 u8 currPwr = pwrMin;
3571 u16 idxL = 0, idxR = 0;
3572
3573 for (i = 0; i <= (pwrMax - pwrMin) / 2; i++) {
3574 ath9k_hw_get_lower_upper_index(currPwr, pPwrList,
3575 numIntercepts, &(idxL),
3576 &(idxR));
3577 if (idxR < 1)
3578 idxR = 1;
3579 if (idxL == numIntercepts - 1)
3580 idxL = (u16) (numIntercepts - 2);
3581 if (pPwrList[idxL] == pPwrList[idxR])
3582 k = pVpdList[idxL];
3583 else
3584 k = (u16) (((currPwr -
3585 pPwrList[idxL]) *
3586 pVpdList[idxR] +
3587 (pPwrList[idxR] -
3588 currPwr) * pVpdList[idxL]) /
3589 (pPwrList[idxR] -
3590 pPwrList[idxL]));
3591 pRetVpdList[i] = (u8) k;
3592 currPwr += 2;
3593 }
3594
3595 return true;
3596}
3597
3598static inline void
3599ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hal *ah,
3600 struct ath9k_channel *chan,
3601 struct cal_data_per_freq *pRawDataSet,
3602 u8 *bChans,
3603 u16 availPiers,
3604 u16 tPdGainOverlap,
3605 int16_t *pMinCalPower,
3606 u16 *pPdGainBoundaries,
3607 u8 *pPDADCValues,
3608 u16 numXpdGains)
3609{
3610 int i, j, k;
3611 int16_t ss;
3612 u16 idxL = 0, idxR = 0, numPiers;
3613 static u8 vpdTableL[AR5416_NUM_PD_GAINS]
3614 [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
3615 static u8 vpdTableR[AR5416_NUM_PD_GAINS]
3616 [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
3617 static u8 vpdTableI[AR5416_NUM_PD_GAINS]
3618 [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
3619
3620 u8 *pVpdL, *pVpdR, *pPwrL, *pPwrR;
3621 u8 minPwrT4[AR5416_NUM_PD_GAINS];
3622 u8 maxPwrT4[AR5416_NUM_PD_GAINS];
3623 int16_t vpdStep;
3624 int16_t tmpVal;
3625 u16 sizeCurrVpdTable, maxIndex, tgtIndex;
3626 bool match;
3627 int16_t minDelta = 0;
3628 struct chan_centers centers;
3629
3630 ath9k_hw_get_channel_centers(ah, chan, &centers);
3631
3632 for (numPiers = 0; numPiers < availPiers; numPiers++) {
3633 if (bChans[numPiers] == AR5416_BCHAN_UNUSED)
3634 break;
3635 }
3636
3637 match = ath9k_hw_get_lower_upper_index((u8)
3638 FREQ2FBIN(centers.
3639 synth_center,
3640 IS_CHAN_2GHZ
3641 (chan)), bChans,
3642 numPiers, &idxL, &idxR);
3643
3644 if (match) {
3645 for (i = 0; i < numXpdGains; i++) {
3646 minPwrT4[i] = pRawDataSet[idxL].pwrPdg[i][0];
3647 maxPwrT4[i] = pRawDataSet[idxL].pwrPdg[i][4];
3648 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
3649 pRawDataSet[idxL].
3650 pwrPdg[i],
3651 pRawDataSet[idxL].
3652 vpdPdg[i],
3653 AR5416_PD_GAIN_ICEPTS,
3654 vpdTableI[i]);
3655 }
3656 } else {
3657 for (i = 0; i < numXpdGains; i++) {
3658 pVpdL = pRawDataSet[idxL].vpdPdg[i];
3659 pPwrL = pRawDataSet[idxL].pwrPdg[i];
3660 pVpdR = pRawDataSet[idxR].vpdPdg[i];
3661 pPwrR = pRawDataSet[idxR].pwrPdg[i];
3662
3663 minPwrT4[i] = max(pPwrL[0], pPwrR[0]);
3664
3665 maxPwrT4[i] =
3666 min(pPwrL[AR5416_PD_GAIN_ICEPTS - 1],
3667 pPwrR[AR5416_PD_GAIN_ICEPTS - 1]);
3668
3669
3670 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
3671 pPwrL, pVpdL,
3672 AR5416_PD_GAIN_ICEPTS,
3673 vpdTableL[i]);
3674 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
3675 pPwrR, pVpdR,
3676 AR5416_PD_GAIN_ICEPTS,
3677 vpdTableR[i]);
3678
3679 for (j = 0; j <= (maxPwrT4[i] - minPwrT4[i]) / 2; j++) {
3680 vpdTableI[i][j] =
3681 (u8) (ath9k_hw_interpolate
3682 ((u16)
3683 FREQ2FBIN(centers.
3684 synth_center,
3685 IS_CHAN_2GHZ
3686 (chan)),
3687 bChans[idxL],
3688 bChans[idxR], vpdTableL[i]
3689 [j], vpdTableR[i]
3690 [j]));
3691 }
3692 }
3693 }
3694
3695 *pMinCalPower = (int16_t) (minPwrT4[0] / 2);
3696
3697 k = 0;
3698 for (i = 0; i < numXpdGains; i++) {
3699 if (i == (numXpdGains - 1))
3700 pPdGainBoundaries[i] =
3701 (u16) (maxPwrT4[i] / 2);
3702 else
3703 pPdGainBoundaries[i] =
3704 (u16) ((maxPwrT4[i] +
3705 minPwrT4[i + 1]) / 4);
3706
3707 pPdGainBoundaries[i] =
3708 min((u16) AR5416_MAX_RATE_POWER,
3709 pPdGainBoundaries[i]);
3710
3711 if ((i == 0) && !AR_SREV_5416_V20_OR_LATER(ah)) {
3712 minDelta = pPdGainBoundaries[0] - 23;
3713 pPdGainBoundaries[0] = 23;
3714 } else {
3715 minDelta = 0;
3716 }
3717
3718 if (i == 0) {
3719 if (AR_SREV_9280_10_OR_LATER(ah))
3720 ss = (int16_t) (0 - (minPwrT4[i] / 2));
3721 else
3722 ss = 0;
3723 } else {
3724 ss = (int16_t) ((pPdGainBoundaries[i - 1] -
3725 (minPwrT4[i] / 2)) -
3726 tPdGainOverlap + 1 + minDelta);
3727 }
3728 vpdStep = (int16_t) (vpdTableI[i][1] - vpdTableI[i][0]);
3729 vpdStep = (int16_t) ((vpdStep < 1) ? 1 : vpdStep);
3730
3731 while ((ss < 0) && (k < (AR5416_NUM_PDADC_VALUES - 1))) {
3732 tmpVal = (int16_t) (vpdTableI[i][0] + ss * vpdStep);
3733 pPDADCValues[k++] =
3734 (u8) ((tmpVal < 0) ? 0 : tmpVal);
3735 ss++;
3736 }
3737
3738 sizeCurrVpdTable =
3739 (u8) ((maxPwrT4[i] - minPwrT4[i]) / 2 + 1);
3740 tgtIndex = (u8) (pPdGainBoundaries[i] + tPdGainOverlap -
3741 (minPwrT4[i] / 2));
3742 maxIndex = (tgtIndex <
3743 sizeCurrVpdTable) ? tgtIndex : sizeCurrVpdTable;
3744
3745 while ((ss < maxIndex)
3746 && (k < (AR5416_NUM_PDADC_VALUES - 1))) {
3747 pPDADCValues[k++] = vpdTableI[i][ss++];
3748 }
3749
3750 vpdStep = (int16_t) (vpdTableI[i][sizeCurrVpdTable - 1] -
3751 vpdTableI[i][sizeCurrVpdTable - 2]);
3752 vpdStep = (int16_t) ((vpdStep < 1) ? 1 : vpdStep);
3753
3754 if (tgtIndex > maxIndex) {
3755 while ((ss <= tgtIndex)
3756 && (k < (AR5416_NUM_PDADC_VALUES - 1))) {
3757 tmpVal = (int16_t) ((vpdTableI[i]
3758 [sizeCurrVpdTable -
3759 1] + (ss - maxIndex +
3760 1) * vpdStep));
3761 pPDADCValues[k++] = (u8) ((tmpVal >
3762 255) ? 255 : tmpVal);
3763 ss++;
3764 }
3765 }
3766 }
3767
3768 while (i < AR5416_PD_GAINS_IN_MASK) {
3769 pPdGainBoundaries[i] = pPdGainBoundaries[i - 1];
3770 i++;
3771 }
3772
3773 while (k < AR5416_NUM_PDADC_VALUES) {
3774 pPDADCValues[k] = pPDADCValues[k - 1];
3775 k++;
3776 }
3777 return;
3778}
3779
3780static inline bool
3781ath9k_hw_set_power_cal_table(struct ath_hal *ah,
3782 struct ar5416_eeprom *pEepData,
3783 struct ath9k_channel *chan,
3784 int16_t *pTxPowerIndexOffset)
3785{
3786 struct cal_data_per_freq *pRawDataset;
3787 u8 *pCalBChans = NULL;
3788 u16 pdGainOverlap_t2;
3789 static u8 pdadcValues[AR5416_NUM_PDADC_VALUES];
3790 u16 gainBoundaries[AR5416_PD_GAINS_IN_MASK];
3791 u16 numPiers, i, j;
3792 int16_t tMinCalPower;
3793 u16 numXpdGain, xpdMask;
3794 u16 xpdGainValues[AR5416_NUM_PD_GAINS] = { 0, 0, 0, 0 };
3795 u32 reg32, regOffset, regChainOffset;
3796 int16_t modalIdx;
3797 struct ath_hal_5416 *ahp = AH5416(ah);
3798
3799 modalIdx = IS_CHAN_2GHZ(chan) ? 1 : 0;
3800 xpdMask = pEepData->modalHeader[modalIdx].xpdGain;
3801
3802 if ((pEepData->baseEepHeader.
3803 version & AR5416_EEP_VER_MINOR_MASK) >=
3804 AR5416_EEP_MINOR_VER_2) {
3805 pdGainOverlap_t2 =
3806 pEepData->modalHeader[modalIdx].pdGainOverlap;
3807 } else {
3808 pdGainOverlap_t2 =
3809 (u16) (MS
3810 (REG_READ(ah, AR_PHY_TPCRG5),
3811 AR_PHY_TPCRG5_PD_GAIN_OVERLAP));
3812 }
3813
3814 if (IS_CHAN_2GHZ(chan)) {
3815 pCalBChans = pEepData->calFreqPier2G;
3816 numPiers = AR5416_NUM_2G_CAL_PIERS;
3817 } else {
3818 pCalBChans = pEepData->calFreqPier5G;
3819 numPiers = AR5416_NUM_5G_CAL_PIERS;
3820 }
3821
3822 numXpdGain = 0;
3823
3824 for (i = 1; i <= AR5416_PD_GAINS_IN_MASK; i++) {
3825 if ((xpdMask >> (AR5416_PD_GAINS_IN_MASK - i)) & 1) {
3826 if (numXpdGain >= AR5416_NUM_PD_GAINS)
3827 break;
3828 xpdGainValues[numXpdGain] =
3829 (u16) (AR5416_PD_GAINS_IN_MASK - i);
3830 numXpdGain++;
3831 }
3832 }
3833
3834 REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_NUM_PD_GAIN,
3835 (numXpdGain - 1) & 0x3);
3836 REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_1,
3837 xpdGainValues[0]);
3838 REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_2,
3839 xpdGainValues[1]);
3840 REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_3,
3841 xpdGainValues[2]);
3842
3843 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
3844 if (AR_SREV_5416_V20_OR_LATER(ah) &&
3845 (ahp->ah_rxchainmask == 5 || ahp->ah_txchainmask == 5)
3846 && (i != 0)) {
3847 regChainOffset = (i == 1) ? 0x2000 : 0x1000;
3848 } else
3849 regChainOffset = i * 0x1000;
3850 if (pEepData->baseEepHeader.txMask & (1 << i)) {
3851 if (IS_CHAN_2GHZ(chan))
3852 pRawDataset = pEepData->calPierData2G[i];
3853 else
3854 pRawDataset = pEepData->calPierData5G[i];
3855
3856 ath9k_hw_get_gain_boundaries_pdadcs(ah, chan,
3857 pRawDataset,
3858 pCalBChans,
3859 numPiers,
3860 pdGainOverlap_t2,
3861 &tMinCalPower,
3862 gainBoundaries,
3863 pdadcValues,
3864 numXpdGain);
3865
3866 if ((i == 0) || AR_SREV_5416_V20_OR_LATER(ah)) {
3867
3868 REG_WRITE(ah,
3869 AR_PHY_TPCRG5 + regChainOffset,
3870 SM(pdGainOverlap_t2,
3871 AR_PHY_TPCRG5_PD_GAIN_OVERLAP)
3872 | SM(gainBoundaries[0],
3873 AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1)
3874 | SM(gainBoundaries[1],
3875 AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2)
3876 | SM(gainBoundaries[2],
3877 AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3)
3878 | SM(gainBoundaries[3],
3879 AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4));
3880 }
3881
3882 regOffset =
3883 AR_PHY_BASE + (672 << 2) + regChainOffset;
3884 for (j = 0; j < 32; j++) {
3885 reg32 =
3886 ((pdadcValues[4 * j + 0] & 0xFF) << 0)
3887 | ((pdadcValues[4 * j + 1] & 0xFF) <<
3888 8) | ((pdadcValues[4 * j + 2] &
3889 0xFF) << 16) |
3890 ((pdadcValues[4 * j + 3] & 0xFF) <<
3891 24);
3892 REG_WRITE(ah, regOffset, reg32);
3893
3894 DPRINTF(ah->ah_sc, ATH_DBG_PHY_IO,
3895 "PDADC (%d,%4x): %4.4x %8.8x\n",
3896 i, regChainOffset, regOffset,
3897 reg32);
3898 DPRINTF(ah->ah_sc, ATH_DBG_PHY_IO,
3899 "PDADC: Chain %d | PDADC %3d Value %3d | "
3900 "PDADC %3d Value %3d | PDADC %3d Value %3d | "
3901 "PDADC %3d Value %3d |\n",
3902 i, 4 * j, pdadcValues[4 * j],
3903 4 * j + 1, pdadcValues[4 * j + 1],
3904 4 * j + 2, pdadcValues[4 * j + 2],
3905 4 * j + 3,
3906 pdadcValues[4 * j + 3]);
3907
3908 regOffset += 4;
3909 }
3910 }
3911 }
3912 *pTxPowerIndexOffset = 0;
3913
3914 return true;
3915}
3916
3917void ath9k_hw_configpcipowersave(struct ath_hal *ah, int restore)
3918{
3919 struct ath_hal_5416 *ahp = AH5416(ah);
3920 u8 i;
3921
3922 if (ah->ah_isPciExpress != true)
3923 return;
3924
3925 if (ah->ah_config.pcie_powersave_enable == 2)
3926 return;
3927
3928 if (restore)
3929 return;
3930
3931 if (AR_SREV_9280_20_OR_LATER(ah)) {
3932 for (i = 0; i < ahp->ah_iniPcieSerdes.ia_rows; i++) {
3933 REG_WRITE(ah, INI_RA(&ahp->ah_iniPcieSerdes, i, 0),
3934 INI_RA(&ahp->ah_iniPcieSerdes, i, 1));
3935 }
3936 udelay(1000);
3937 } else if (AR_SREV_9280(ah)
3938 && (ah->ah_macRev == AR_SREV_REVISION_9280_10)) {
3939 REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fd00);
3940 REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
3941
3942 REG_WRITE(ah, AR_PCIE_SERDES, 0xa8000019);
3943 REG_WRITE(ah, AR_PCIE_SERDES, 0x13160820);
3944 REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980560);
3945
3946 if (ah->ah_config.pcie_clock_req)
3947 REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffc);
3948 else
3949 REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffd);
3950
3951 REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
3952 REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
3953 REG_WRITE(ah, AR_PCIE_SERDES, 0x00043007);
3954
3955 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
3956
3957 udelay(1000);
3958 } else {
3959 REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00);
3960 REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
3961 REG_WRITE(ah, AR_PCIE_SERDES, 0x28000039);
3962 REG_WRITE(ah, AR_PCIE_SERDES, 0x53160824);
3963 REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980579);
3964 REG_WRITE(ah, AR_PCIE_SERDES, 0x001defff);
3965 REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
3966 REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
3967 REG_WRITE(ah, AR_PCIE_SERDES, 0x000e3007);
3968 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
3969 }
3970
3971 REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
3972
3973 if (ah->ah_config.pcie_waen) {
3974 REG_WRITE(ah, AR_WA, ah->ah_config.pcie_waen);
3975 } else {
3976 if (AR_SREV_9280(ah))
3977 REG_WRITE(ah, AR_WA, 0x0040073f);
3978 else
3979 REG_WRITE(ah, AR_WA, 0x0000073f);
3980 }
3981}
3982
3983static inline void
3984ath9k_hw_get_legacy_target_powers(struct ath_hal *ah,
3985 struct ath9k_channel *chan,
3986 struct cal_target_power_leg *powInfo,
3987 u16 numChannels,
3988 struct cal_target_power_leg *pNewPower,
3989 u16 numRates,
3990 bool isExtTarget)
3991{
3992 u16 clo, chi;
3993 int i;
3994 int matchIndex = -1, lowIndex = -1;
3995 u16 freq;
3996 struct chan_centers centers;
3997
3998 ath9k_hw_get_channel_centers(ah, chan, &centers);
3999 freq = (isExtTarget) ? centers.ext_center : centers.ctl_center;
4000
4001 if (freq <= ath9k_hw_fbin2freq(powInfo[0].bChannel,
4002 IS_CHAN_2GHZ(chan))) {
4003 matchIndex = 0;
4004 } else {
4005 for (i = 0; (i < numChannels)
4006 && (powInfo[i].bChannel != AR5416_BCHAN_UNUSED); i++) {
4007 if (freq ==
4008 ath9k_hw_fbin2freq(powInfo[i].bChannel,
4009 IS_CHAN_2GHZ(chan))) {
4010 matchIndex = i;
4011 break;
4012 } else if ((freq <
4013 ath9k_hw_fbin2freq(powInfo[i].bChannel,
4014 IS_CHAN_2GHZ(chan)))
4015 && (freq >
4016 ath9k_hw_fbin2freq(powInfo[i - 1].
4017 bChannel,
4018 IS_CHAN_2GHZ
4019 (chan)))) {
4020 lowIndex = i - 1;
4021 break;
4022 }
4023 }
4024 if ((matchIndex == -1) && (lowIndex == -1))
4025 matchIndex = i - 1;
4026 }
4027
4028 if (matchIndex != -1) {
4029 *pNewPower = powInfo[matchIndex];
4030 } else {
4031 clo = ath9k_hw_fbin2freq(powInfo[lowIndex].bChannel,
4032 IS_CHAN_2GHZ(chan));
4033 chi = ath9k_hw_fbin2freq(powInfo[lowIndex + 1].bChannel,
4034 IS_CHAN_2GHZ(chan));
4035
4036 for (i = 0; i < numRates; i++) {
4037 pNewPower->tPow2x[i] =
4038 (u8) ath9k_hw_interpolate(freq, clo, chi,
4039 powInfo
4040 [lowIndex].
4041 tPow2x[i],
4042 powInfo
4043 [lowIndex +
4044 1].tPow2x[i]);
4045 }
4046 }
4047}
4048
4049static inline void
4050ath9k_hw_get_target_powers(struct ath_hal *ah,
4051 struct ath9k_channel *chan,
4052 struct cal_target_power_ht *powInfo,
4053 u16 numChannels,
4054 struct cal_target_power_ht *pNewPower,
4055 u16 numRates,
4056 bool isHt40Target)
4057{
4058 u16 clo, chi;
4059 int i;
4060 int matchIndex = -1, lowIndex = -1;
4061 u16 freq;
4062 struct chan_centers centers;
4063
4064 ath9k_hw_get_channel_centers(ah, chan, &centers);
4065 freq = isHt40Target ? centers.synth_center : centers.ctl_center;
4066
4067 if (freq <=
4068 ath9k_hw_fbin2freq(powInfo[0].bChannel, IS_CHAN_2GHZ(chan))) {
4069 matchIndex = 0;
4070 } else {
4071 for (i = 0; (i < numChannels)
4072 && (powInfo[i].bChannel != AR5416_BCHAN_UNUSED); i++) {
4073 if (freq ==
4074 ath9k_hw_fbin2freq(powInfo[i].bChannel,
4075 IS_CHAN_2GHZ(chan))) {
4076 matchIndex = i;
4077 break;
4078 } else
4079 if ((freq <
4080 ath9k_hw_fbin2freq(powInfo[i].bChannel,
4081 IS_CHAN_2GHZ(chan)))
4082 && (freq >
4083 ath9k_hw_fbin2freq(powInfo[i - 1].
4084 bChannel,
4085 IS_CHAN_2GHZ
4086 (chan)))) {
4087 lowIndex = i - 1;
4088 break;
4089 }
4090 }
4091 if ((matchIndex == -1) && (lowIndex == -1))
4092 matchIndex = i - 1;
4093 }
4094
4095 if (matchIndex != -1) {
4096 *pNewPower = powInfo[matchIndex];
4097 } else {
4098 clo = ath9k_hw_fbin2freq(powInfo[lowIndex].bChannel,
4099 IS_CHAN_2GHZ(chan));
4100 chi = ath9k_hw_fbin2freq(powInfo[lowIndex + 1].bChannel,
4101 IS_CHAN_2GHZ(chan));
4102
4103 for (i = 0; i < numRates; i++) {
4104 pNewPower->tPow2x[i] =
4105 (u8) ath9k_hw_interpolate(freq, clo, chi,
4106 powInfo
4107 [lowIndex].
4108 tPow2x[i],
4109 powInfo
4110 [lowIndex +
4111 1].tPow2x[i]);
4112 }
4113 }
4114}
4115
4116static inline u16
4117ath9k_hw_get_max_edge_power(u16 freq,
4118 struct cal_ctl_edges *pRdEdgesPower,
4119 bool is2GHz)
4120{
4121 u16 twiceMaxEdgePower = AR5416_MAX_RATE_POWER;
4122 int i;
4123
4124 for (i = 0; (i < AR5416_NUM_BAND_EDGES)
4125 && (pRdEdgesPower[i].bChannel != AR5416_BCHAN_UNUSED); i++) {
4126 if (freq == ath9k_hw_fbin2freq(pRdEdgesPower[i].bChannel,
4127 is2GHz)) {
4128 twiceMaxEdgePower = pRdEdgesPower[i].tPower;
4129 break;
4130 } else if ((i > 0)
4131 && (freq <
4132 ath9k_hw_fbin2freq(pRdEdgesPower[i].
4133 bChannel, is2GHz))) {
4134 if (ath9k_hw_fbin2freq
4135 (pRdEdgesPower[i - 1].bChannel, is2GHz) < freq
4136 && pRdEdgesPower[i - 1].flag) {
4137 twiceMaxEdgePower =
4138 pRdEdgesPower[i - 1].tPower;
4139 }
4140 break;
4141 }
4142 }
4143 return twiceMaxEdgePower;
4144}
4145
4146static inline bool
4147ath9k_hw_set_power_per_rate_table(struct ath_hal *ah,
4148 struct ar5416_eeprom *pEepData,
4149 struct ath9k_channel *chan,
4150 int16_t *ratesArray,
4151 u16 cfgCtl,
4152 u8 AntennaReduction,
4153 u8 twiceMaxRegulatoryPower,
4154 u8 powerLimit)
4155{
4156 u8 twiceMaxEdgePower = AR5416_MAX_RATE_POWER;
4157 static const u16 tpScaleReductionTable[5] =
4158 { 0, 3, 6, 9, AR5416_MAX_RATE_POWER };
4159
4160 int i;
4161 int8_t twiceLargestAntenna;
4162 struct cal_ctl_data *rep;
4163 struct cal_target_power_leg targetPowerOfdm, targetPowerCck = {
4164 0, { 0, 0, 0, 0}
4165 };
4166 struct cal_target_power_leg targetPowerOfdmExt = {
4167 0, { 0, 0, 0, 0} }, targetPowerCckExt = {
4168 0, { 0, 0, 0, 0 }
4169 };
4170 struct cal_target_power_ht targetPowerHt20, targetPowerHt40 = {
4171 0, {0, 0, 0, 0}
4172 };
4173 u8 scaledPower = 0, minCtlPower, maxRegAllowedPower;
4174 u16 ctlModesFor11a[] =
4175 { CTL_11A, CTL_5GHT20, CTL_11A_EXT, CTL_5GHT40 };
4176 u16 ctlModesFor11g[] =
4177 { CTL_11B, CTL_11G, CTL_2GHT20, CTL_11B_EXT, CTL_11G_EXT,
4178 CTL_2GHT40
4179 };
4180 u16 numCtlModes, *pCtlMode, ctlMode, freq;
4181 struct chan_centers centers;
4182 int tx_chainmask;
4183 u8 twiceMinEdgePower;
4184 struct ath_hal_5416 *ahp = AH5416(ah);
4185
4186 tx_chainmask = ahp->ah_txchainmask;
4187
4188 ath9k_hw_get_channel_centers(ah, chan, &centers);
4189
4190 twiceLargestAntenna = max(
4191 pEepData->modalHeader
4192 [IS_CHAN_2GHZ(chan)].antennaGainCh[0],
4193 pEepData->modalHeader
4194 [IS_CHAN_2GHZ(chan)].antennaGainCh[1]);
4195
4196 twiceLargestAntenna = max((u8) twiceLargestAntenna,
4197 pEepData->modalHeader
4198 [IS_CHAN_2GHZ(chan)].antennaGainCh[2]);
4199
4200 twiceLargestAntenna =
4201 (int8_t) min(AntennaReduction - twiceLargestAntenna, 0);
4202
4203 maxRegAllowedPower = twiceMaxRegulatoryPower + twiceLargestAntenna;
4204
4205 if (ah->ah_tpScale != ATH9K_TP_SCALE_MAX) {
4206 maxRegAllowedPower -=
4207 (tpScaleReductionTable[(ah->ah_tpScale)] * 2);
4208 }
4209
4210 scaledPower = min(powerLimit, maxRegAllowedPower);
4211
4212 switch (ar5416_get_ntxchains(tx_chainmask)) {
4213 case 1:
4214 break;
4215 case 2:
4216 scaledPower -=
4217 pEepData->modalHeader[IS_CHAN_2GHZ(chan)].
4218 pwrDecreaseFor2Chain;
4219 break;
4220 case 3:
4221 scaledPower -=
4222 pEepData->modalHeader[IS_CHAN_2GHZ(chan)].
4223 pwrDecreaseFor3Chain;
4224 break;
4225 }
4226
4227 scaledPower = max(0, (int32_t) scaledPower);
4228
4229 if (IS_CHAN_2GHZ(chan)) {
4230 numCtlModes =
4231 ARRAY_SIZE(ctlModesFor11g) -
4232 SUB_NUM_CTL_MODES_AT_2G_40;
4233 pCtlMode = ctlModesFor11g;
4234
4235 ath9k_hw_get_legacy_target_powers(ah, chan,
4236 pEepData->
4237 calTargetPowerCck,
4238 AR5416_NUM_2G_CCK_TARGET_POWERS,
4239 &targetPowerCck, 4,
4240 false);
4241 ath9k_hw_get_legacy_target_powers(ah, chan,
4242 pEepData->
4243 calTargetPower2G,
4244 AR5416_NUM_2G_20_TARGET_POWERS,
4245 &targetPowerOfdm, 4,
4246 false);
4247 ath9k_hw_get_target_powers(ah, chan,
4248 pEepData->calTargetPower2GHT20,
4249 AR5416_NUM_2G_20_TARGET_POWERS,
4250 &targetPowerHt20, 8, false);
4251
4252 if (IS_CHAN_HT40(chan)) {
4253 numCtlModes = ARRAY_SIZE(ctlModesFor11g);
4254 ath9k_hw_get_target_powers(ah, chan,
4255 pEepData->
4256 calTargetPower2GHT40,
4257 AR5416_NUM_2G_40_TARGET_POWERS,
4258 &targetPowerHt40, 8,
4259 true);
4260 ath9k_hw_get_legacy_target_powers(ah, chan,
4261 pEepData->
4262 calTargetPowerCck,
4263 AR5416_NUM_2G_CCK_TARGET_POWERS,
4264 &targetPowerCckExt,
4265 4, true);
4266 ath9k_hw_get_legacy_target_powers(ah, chan,
4267 pEepData->
4268 calTargetPower2G,
4269 AR5416_NUM_2G_20_TARGET_POWERS,
4270 &targetPowerOfdmExt,
4271 4, true);
4272 }
4273 } else {
4274
4275 numCtlModes =
4276 ARRAY_SIZE(ctlModesFor11a) -
4277 SUB_NUM_CTL_MODES_AT_5G_40;
4278 pCtlMode = ctlModesFor11a;
4279
4280 ath9k_hw_get_legacy_target_powers(ah, chan,
4281 pEepData->
4282 calTargetPower5G,
4283 AR5416_NUM_5G_20_TARGET_POWERS,
4284 &targetPowerOfdm, 4,
4285 false);
4286 ath9k_hw_get_target_powers(ah, chan,
4287 pEepData->calTargetPower5GHT20,
4288 AR5416_NUM_5G_20_TARGET_POWERS,
4289 &targetPowerHt20, 8, false);
4290
4291 if (IS_CHAN_HT40(chan)) {
4292 numCtlModes = ARRAY_SIZE(ctlModesFor11a);
4293 ath9k_hw_get_target_powers(ah, chan,
4294 pEepData->
4295 calTargetPower5GHT40,
4296 AR5416_NUM_5G_40_TARGET_POWERS,
4297 &targetPowerHt40, 8,
4298 true);
4299 ath9k_hw_get_legacy_target_powers(ah, chan,
4300 pEepData->
4301 calTargetPower5G,
4302 AR5416_NUM_5G_20_TARGET_POWERS,
4303 &targetPowerOfdmExt,
4304 4, true);
4305 }
4306 }
4307
4308 for (ctlMode = 0; ctlMode < numCtlModes; ctlMode++) {
4309 bool isHt40CtlMode =
4310 (pCtlMode[ctlMode] == CTL_5GHT40)
4311 || (pCtlMode[ctlMode] == CTL_2GHT40);
4312 if (isHt40CtlMode)
4313 freq = centers.synth_center;
4314 else if (pCtlMode[ctlMode] & EXT_ADDITIVE)
4315 freq = centers.ext_center;
4316 else
4317 freq = centers.ctl_center;
4318
4319 if (ar5416_get_eep_ver(ahp) == 14
4320 && ar5416_get_eep_rev(ahp) <= 2)
4321 twiceMaxEdgePower = AR5416_MAX_RATE_POWER;
4322
4323 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
4324 "LOOP-Mode ctlMode %d < %d, isHt40CtlMode %d, "
4325 "EXT_ADDITIVE %d\n",
4326 ctlMode, numCtlModes, isHt40CtlMode,
4327 (pCtlMode[ctlMode] & EXT_ADDITIVE));
4328
4329 for (i = 0; (i < AR5416_NUM_CTLS) && pEepData->ctlIndex[i];
4330 i++) {
4331 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
4332 " LOOP-Ctlidx %d: cfgCtl 0x%2.2x "
4333 "pCtlMode 0x%2.2x ctlIndex 0x%2.2x "
4334 "chan %d\n",
4335 i, cfgCtl, pCtlMode[ctlMode],
4336 pEepData->ctlIndex[i], chan->channel);
4337
4338 if ((((cfgCtl & ~CTL_MODE_M) |
4339 (pCtlMode[ctlMode] & CTL_MODE_M)) ==
4340 pEepData->ctlIndex[i])
4341 ||
4342 (((cfgCtl & ~CTL_MODE_M) |
4343 (pCtlMode[ctlMode] & CTL_MODE_M)) ==
4344 ((pEepData->
4345 ctlIndex[i] & CTL_MODE_M) | SD_NO_CTL))) {
4346 rep = &(pEepData->ctlData[i]);
4347
4348 twiceMinEdgePower =
4349 ath9k_hw_get_max_edge_power(freq,
4350 rep->
4351 ctlEdges
4352 [ar5416_get_ntxchains
4353 (tx_chainmask)
4354 - 1],
4355 IS_CHAN_2GHZ
4356 (chan));
4357
4358 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
4359 " MATCH-EE_IDX %d: ch %d is2 %d "
4360 "2xMinEdge %d chainmask %d chains %d\n",
4361 i, freq, IS_CHAN_2GHZ(chan),
4362 twiceMinEdgePower, tx_chainmask,
4363 ar5416_get_ntxchains
4364 (tx_chainmask));
4365 if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL) {
4366 twiceMaxEdgePower =
4367 min(twiceMaxEdgePower,
4368 twiceMinEdgePower);
4369 } else {
4370 twiceMaxEdgePower =
4371 twiceMinEdgePower;
4372 break;
4373 }
4374 }
4375 }
4376
4377 minCtlPower = min(twiceMaxEdgePower, scaledPower);
4378
4379 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
4380 " SEL-Min ctlMode %d pCtlMode %d "
4381 "2xMaxEdge %d sP %d minCtlPwr %d\n",
4382 ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower,
4383 scaledPower, minCtlPower);
4384
4385 switch (pCtlMode[ctlMode]) {
4386 case CTL_11B:
4387 for (i = 0; i < ARRAY_SIZE(targetPowerCck.tPow2x);
4388 i++) {
4389 targetPowerCck.tPow2x[i] =
4390 min(targetPowerCck.tPow2x[i],
4391 minCtlPower);
4392 }
4393 break;
4394 case CTL_11A:
4395 case CTL_11G:
4396 for (i = 0; i < ARRAY_SIZE(targetPowerOfdm.tPow2x);
4397 i++) {
4398 targetPowerOfdm.tPow2x[i] =
4399 min(targetPowerOfdm.tPow2x[i],
4400 minCtlPower);
4401 }
4402 break;
4403 case CTL_5GHT20:
4404 case CTL_2GHT20:
4405 for (i = 0; i < ARRAY_SIZE(targetPowerHt20.tPow2x);
4406 i++) {
4407 targetPowerHt20.tPow2x[i] =
4408 min(targetPowerHt20.tPow2x[i],
4409 minCtlPower);
4410 }
4411 break;
4412 case CTL_11B_EXT:
4413 targetPowerCckExt.tPow2x[0] =
4414 min(targetPowerCckExt.tPow2x[0], minCtlPower);
4415 break;
4416 case CTL_11A_EXT:
4417 case CTL_11G_EXT:
4418 targetPowerOfdmExt.tPow2x[0] =
4419 min(targetPowerOfdmExt.tPow2x[0], minCtlPower);
4420 break;
4421 case CTL_5GHT40:
4422 case CTL_2GHT40:
4423 for (i = 0; i < ARRAY_SIZE(targetPowerHt40.tPow2x);
4424 i++) {
4425 targetPowerHt40.tPow2x[i] =
4426 min(targetPowerHt40.tPow2x[i],
4427 minCtlPower);
4428 }
4429 break;
4430 default:
4431 break;
4432 }
4433 }
4434
4435 ratesArray[rate6mb] = ratesArray[rate9mb] = ratesArray[rate12mb] =
4436 ratesArray[rate18mb] = ratesArray[rate24mb] =
4437 targetPowerOfdm.tPow2x[0];
4438 ratesArray[rate36mb] = targetPowerOfdm.tPow2x[1];
4439 ratesArray[rate48mb] = targetPowerOfdm.tPow2x[2];
4440 ratesArray[rate54mb] = targetPowerOfdm.tPow2x[3];
4441 ratesArray[rateXr] = targetPowerOfdm.tPow2x[0];
4442
4443 for (i = 0; i < ARRAY_SIZE(targetPowerHt20.tPow2x); i++)
4444 ratesArray[rateHt20_0 + i] = targetPowerHt20.tPow2x[i];
4445
4446 if (IS_CHAN_2GHZ(chan)) {
4447 ratesArray[rate1l] = targetPowerCck.tPow2x[0];
4448 ratesArray[rate2s] = ratesArray[rate2l] =
4449 targetPowerCck.tPow2x[1];
4450 ratesArray[rate5_5s] = ratesArray[rate5_5l] =
4451 targetPowerCck.tPow2x[2];
4452 ;
4453 ratesArray[rate11s] = ratesArray[rate11l] =
4454 targetPowerCck.tPow2x[3];
4455 ;
4456 }
4457 if (IS_CHAN_HT40(chan)) {
4458 for (i = 0; i < ARRAY_SIZE(targetPowerHt40.tPow2x); i++) {
4459 ratesArray[rateHt40_0 + i] =
4460 targetPowerHt40.tPow2x[i];
4461 }
4462 ratesArray[rateDupOfdm] = targetPowerHt40.tPow2x[0];
4463 ratesArray[rateDupCck] = targetPowerHt40.tPow2x[0];
4464 ratesArray[rateExtOfdm] = targetPowerOfdmExt.tPow2x[0];
4465 if (IS_CHAN_2GHZ(chan)) {
4466 ratesArray[rateExtCck] =
4467 targetPowerCckExt.tPow2x[0];
4468 }
4469 }
4470 return true;
4471}
4472
4473static int
4474ath9k_hw_set_txpower(struct ath_hal *ah,
4475 struct ar5416_eeprom *pEepData,
4476 struct ath9k_channel *chan,
4477 u16 cfgCtl,
4478 u8 twiceAntennaReduction,
4479 u8 twiceMaxRegulatoryPower,
4480 u8 powerLimit)
4481{
4482 struct modal_eep_header *pModal =
4483 &(pEepData->modalHeader[IS_CHAN_2GHZ(chan)]);
4484 int16_t ratesArray[Ar5416RateSize];
4485 int16_t txPowerIndexOffset = 0;
4486 u8 ht40PowerIncForPdadc = 2;
4487 int i;
4488
4489 memset(ratesArray, 0, sizeof(ratesArray));
4490
4491 if ((pEepData->baseEepHeader.
4492 version & AR5416_EEP_VER_MINOR_MASK) >=
4493 AR5416_EEP_MINOR_VER_2) {
4494 ht40PowerIncForPdadc = pModal->ht40PowerIncForPdadc;
4495 }
4496
4497 if (!ath9k_hw_set_power_per_rate_table(ah, pEepData, chan,
4498 &ratesArray[0], cfgCtl,
4499 twiceAntennaReduction,
4500 twiceMaxRegulatoryPower,
4501 powerLimit)) {
4502 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
4503 "ath9k_hw_set_txpower: unable to set "
4504 "tx power per rate table\n");
4505 return -EIO;
4506 }
4507
4508 if (!ath9k_hw_set_power_cal_table
4509 (ah, pEepData, chan, &txPowerIndexOffset)) {
4510 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
4511 "ath9k_hw_set_txpower: unable to set power table\n");
4512 return -EIO;
4513 }
4514
4515 for (i = 0; i < ARRAY_SIZE(ratesArray); i++) {
4516 ratesArray[i] =
4517 (int16_t) (txPowerIndexOffset + ratesArray[i]);
4518 if (ratesArray[i] > AR5416_MAX_RATE_POWER)
4519 ratesArray[i] = AR5416_MAX_RATE_POWER;
4520 }
4521
4522 if (AR_SREV_9280_10_OR_LATER(ah)) {
4523 for (i = 0; i < Ar5416RateSize; i++)
4524 ratesArray[i] -= AR5416_PWR_TABLE_OFFSET * 2;
4525 }
4526
4527 REG_WRITE(ah, AR_PHY_POWER_TX_RATE1,
4528 ATH9K_POW_SM(ratesArray[rate18mb], 24)
4529 | ATH9K_POW_SM(ratesArray[rate12mb], 16)
4530 | ATH9K_POW_SM(ratesArray[rate9mb], 8)
4531 | ATH9K_POW_SM(ratesArray[rate6mb], 0)
4532 );
4533 REG_WRITE(ah, AR_PHY_POWER_TX_RATE2,
4534 ATH9K_POW_SM(ratesArray[rate54mb], 24)
4535 | ATH9K_POW_SM(ratesArray[rate48mb], 16)
4536 | ATH9K_POW_SM(ratesArray[rate36mb], 8)
4537 | ATH9K_POW_SM(ratesArray[rate24mb], 0)
4538 );
4539
4540 if (IS_CHAN_2GHZ(chan)) {
4541 REG_WRITE(ah, AR_PHY_POWER_TX_RATE3,
4542 ATH9K_POW_SM(ratesArray[rate2s], 24)
4543 | ATH9K_POW_SM(ratesArray[rate2l], 16)
4544 | ATH9K_POW_SM(ratesArray[rateXr], 8)
4545 | ATH9K_POW_SM(ratesArray[rate1l], 0)
4546 );
4547 REG_WRITE(ah, AR_PHY_POWER_TX_RATE4,
4548 ATH9K_POW_SM(ratesArray[rate11s], 24)
4549 | ATH9K_POW_SM(ratesArray[rate11l], 16)
4550 | ATH9K_POW_SM(ratesArray[rate5_5s], 8)
4551 | ATH9K_POW_SM(ratesArray[rate5_5l], 0)
4552 );
4553 }
4554
4555 REG_WRITE(ah, AR_PHY_POWER_TX_RATE5,
4556 ATH9K_POW_SM(ratesArray[rateHt20_3], 24)
4557 | ATH9K_POW_SM(ratesArray[rateHt20_2], 16)
4558 | ATH9K_POW_SM(ratesArray[rateHt20_1], 8)
4559 | ATH9K_POW_SM(ratesArray[rateHt20_0], 0)
4560 );
4561 REG_WRITE(ah, AR_PHY_POWER_TX_RATE6,
4562 ATH9K_POW_SM(ratesArray[rateHt20_7], 24)
4563 | ATH9K_POW_SM(ratesArray[rateHt20_6], 16)
4564 | ATH9K_POW_SM(ratesArray[rateHt20_5], 8)
4565 | ATH9K_POW_SM(ratesArray[rateHt20_4], 0)
4566 );
4567
4568 if (IS_CHAN_HT40(chan)) {
4569 REG_WRITE(ah, AR_PHY_POWER_TX_RATE7,
4570 ATH9K_POW_SM(ratesArray[rateHt40_3] +
4571 ht40PowerIncForPdadc, 24)
4572 | ATH9K_POW_SM(ratesArray[rateHt40_2] +
4573 ht40PowerIncForPdadc, 16)
4574 | ATH9K_POW_SM(ratesArray[rateHt40_1] +
4575 ht40PowerIncForPdadc, 8)
4576 | ATH9K_POW_SM(ratesArray[rateHt40_0] +
4577 ht40PowerIncForPdadc, 0)
4578 );
4579 REG_WRITE(ah, AR_PHY_POWER_TX_RATE8,
4580 ATH9K_POW_SM(ratesArray[rateHt40_7] +
4581 ht40PowerIncForPdadc, 24)
4582 | ATH9K_POW_SM(ratesArray[rateHt40_6] +
4583 ht40PowerIncForPdadc, 16)
4584 | ATH9K_POW_SM(ratesArray[rateHt40_5] +
4585 ht40PowerIncForPdadc, 8)
4586 | ATH9K_POW_SM(ratesArray[rateHt40_4] +
4587 ht40PowerIncForPdadc, 0)
4588 );
4589
4590 REG_WRITE(ah, AR_PHY_POWER_TX_RATE9,
4591 ATH9K_POW_SM(ratesArray[rateExtOfdm], 24)
4592 | ATH9K_POW_SM(ratesArray[rateExtCck], 16)
4593 | ATH9K_POW_SM(ratesArray[rateDupOfdm], 8)
4594 | ATH9K_POW_SM(ratesArray[rateDupCck], 0)
4595 );
4596 }
4597
4598 REG_WRITE(ah, AR_PHY_POWER_TX_SUB,
4599 ATH9K_POW_SM(pModal->pwrDecreaseFor3Chain, 6)
4600 | ATH9K_POW_SM(pModal->pwrDecreaseFor2Chain, 0)
4601 );
4602
4603 i = rate6mb;
4604 if (IS_CHAN_HT40(chan))
4605 i = rateHt40_0;
4606 else if (IS_CHAN_HT20(chan))
4607 i = rateHt20_0;
4608
4609 if (AR_SREV_9280_10_OR_LATER(ah))
4610 ah->ah_maxPowerLevel =
4611 ratesArray[i] + AR5416_PWR_TABLE_OFFSET * 2;
4612 else
4613 ah->ah_maxPowerLevel = ratesArray[i];
4614
4615 return 0;
4616}
4617
4618static inline void ath9k_hw_get_delta_slope_vals(struct ath_hal *ah,
4619 u32 coef_scaled,
4620 u32 *coef_mantissa,
4621 u32 *coef_exponent)
4622{
4623 u32 coef_exp, coef_man;
4624
4625 for (coef_exp = 31; coef_exp > 0; coef_exp--)
4626 if ((coef_scaled >> coef_exp) & 0x1)
4627 break;
4628
4629 coef_exp = 14 - (coef_exp - COEF_SCALE_S);
4630
4631 coef_man = coef_scaled + (1 << (COEF_SCALE_S - coef_exp - 1));
4632
4633 *coef_mantissa = coef_man >> (COEF_SCALE_S - coef_exp);
4634 *coef_exponent = coef_exp - 16;
4635}
4636
4637static void
4638ath9k_hw_set_delta_slope(struct ath_hal *ah,
4639 struct ath9k_channel *chan)
4640{
4641 u32 coef_scaled, ds_coef_exp, ds_coef_man;
4642 u32 clockMhzScaled = 0x64000000;
4643 struct chan_centers centers;
4644
4645 if (IS_CHAN_HALF_RATE(chan))
4646 clockMhzScaled = clockMhzScaled >> 1;
4647 else if (IS_CHAN_QUARTER_RATE(chan))
4648 clockMhzScaled = clockMhzScaled >> 2;
4649
4650 ath9k_hw_get_channel_centers(ah, chan, &centers);
4651 coef_scaled = clockMhzScaled / centers.synth_center;
4652
4653 ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
4654 &ds_coef_exp);
4655
4656 REG_RMW_FIELD(ah, AR_PHY_TIMING3,
4657 AR_PHY_TIMING3_DSC_MAN, ds_coef_man);
4658 REG_RMW_FIELD(ah, AR_PHY_TIMING3,
4659 AR_PHY_TIMING3_DSC_EXP, ds_coef_exp);
4660
4661 coef_scaled = (9 * coef_scaled) / 10;
4662
4663 ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
4664 &ds_coef_exp);
4665
4666 REG_RMW_FIELD(ah, AR_PHY_HALFGI,
4667 AR_PHY_HALFGI_DSC_MAN, ds_coef_man);
4668 REG_RMW_FIELD(ah, AR_PHY_HALFGI,
4669 AR_PHY_HALFGI_DSC_EXP, ds_coef_exp);
4670}
4671
4672static void ath9k_hw_9280_spur_mitigate(struct ath_hal *ah,
4673 struct ath9k_channel *chan)
4674{
4675 int bb_spur = AR_NO_SPUR;
4676 int freq;
4677 int bin, cur_bin;
4678 int bb_spur_off, spur_subchannel_sd;
4679 int spur_freq_sd;
4680 int spur_delta_phase;
4681 int denominator;
4682 int upper, lower, cur_vit_mask;
4683 int tmp, newVal;
4684 int i;
4685 int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8,
4686 AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
4687 };
4688 int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10,
4689 AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
4690 };
4691 int inc[4] = { 0, 100, 0, 0 };
4692 struct chan_centers centers;
4693
4694 int8_t mask_m[123];
4695 int8_t mask_p[123];
4696 int8_t mask_amt;
4697 int tmp_mask;
4698 int cur_bb_spur;
4699 bool is2GHz = IS_CHAN_2GHZ(chan);
4700
4701 memset(&mask_m, 0, sizeof(int8_t) * 123);
4702 memset(&mask_p, 0, sizeof(int8_t) * 123);
4703
4704 ath9k_hw_get_channel_centers(ah, chan, &centers);
4705 freq = centers.synth_center;
4706
4707 ah->ah_config.spurmode = SPUR_ENABLE_EEPROM;
4708 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
4709 cur_bb_spur = ath9k_hw_eeprom_get_spur_chan(ah, i, is2GHz);
4710
4711 if (is2GHz)
4712 cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_2GHZ;
4713 else
4714 cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_5GHZ;
4715
4716 if (AR_NO_SPUR == cur_bb_spur)
4717 break;
4718 cur_bb_spur = cur_bb_spur - freq;
4719
4720 if (IS_CHAN_HT40(chan)) {
4721 if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT40) &&
4722 (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT40)) {
4723 bb_spur = cur_bb_spur;
4724 break;
4725 }
4726 } else if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT20) &&
4727 (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT20)) {
4728 bb_spur = cur_bb_spur;
4729 break;
4730 }
4731 }
4732
4733 if (AR_NO_SPUR == bb_spur) {
4734 REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK,
4735 AR_PHY_FORCE_CLKEN_CCK_MRC_MUX);
4736 return;
4737 } else {
4738 REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK,
4739 AR_PHY_FORCE_CLKEN_CCK_MRC_MUX);
4740 }
4741
4742 bin = bb_spur * 320;
4743
4744 tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
4745
4746 newVal = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
4747 AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
4748 AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
4749 AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
4750 REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), newVal);
4751
4752 newVal = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
4753 AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
4754 AR_PHY_SPUR_REG_MASK_RATE_SELECT |
4755 AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
4756 SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
4757 REG_WRITE(ah, AR_PHY_SPUR_REG, newVal);
4758
4759 if (IS_CHAN_HT40(chan)) {
4760 if (bb_spur < 0) {
4761 spur_subchannel_sd = 1;
4762 bb_spur_off = bb_spur + 10;
4763 } else {
4764 spur_subchannel_sd = 0;
4765 bb_spur_off = bb_spur - 10;
4766 }
4767 } else {
4768 spur_subchannel_sd = 0;
4769 bb_spur_off = bb_spur;
4770 }
4771
4772 if (IS_CHAN_HT40(chan))
4773 spur_delta_phase =
4774 ((bb_spur * 262144) /
4775 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE;
4776 else
4777 spur_delta_phase =
4778 ((bb_spur * 524288) /
4779 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE;
4780
4781 denominator = IS_CHAN_2GHZ(chan) ? 44 : 40;
4782 spur_freq_sd = ((bb_spur_off * 2048) / denominator) & 0x3ff;
4783
4784 newVal = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
4785 SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
4786 SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
4787 REG_WRITE(ah, AR_PHY_TIMING11, newVal);
4788
4789 newVal = spur_subchannel_sd << AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S;
4790 REG_WRITE(ah, AR_PHY_SFCORR_EXT, newVal);
4791
4792 cur_bin = -6000;
4793 upper = bin + 100;
4794 lower = bin - 100;
4795
4796 for (i = 0; i < 4; i++) {
4797 int pilot_mask = 0;
4798 int chan_mask = 0;
4799 int bp = 0;
4800 for (bp = 0; bp < 30; bp++) {
4801 if ((cur_bin > lower) && (cur_bin < upper)) {
4802 pilot_mask = pilot_mask | 0x1 << bp;
4803 chan_mask = chan_mask | 0x1 << bp;
4804 }
4805 cur_bin += 100;
4806 }
4807 cur_bin += inc[i];
4808 REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
4809 REG_WRITE(ah, chan_mask_reg[i], chan_mask);
4810 }
4811
4812 cur_vit_mask = 6100;
4813 upper = bin + 120;
4814 lower = bin - 120;
4815
4816 for (i = 0; i < 123; i++) {
4817 if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
4818
4819 /* workaround for gcc bug #37014 */
4820 volatile int tmp = abs(cur_vit_mask - bin);
4821
4822 if (tmp < 75)
4823 mask_amt = 1;
4824 else
4825 mask_amt = 0;
4826 if (cur_vit_mask < 0)
4827 mask_m[abs(cur_vit_mask / 100)] = mask_amt;
4828 else
4829 mask_p[cur_vit_mask / 100] = mask_amt;
4830 }
4831 cur_vit_mask -= 100;
4832 }
4833
4834 tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
4835 | (mask_m[48] << 26) | (mask_m[49] << 24)
4836 | (mask_m[50] << 22) | (mask_m[51] << 20)
4837 | (mask_m[52] << 18) | (mask_m[53] << 16)
4838 | (mask_m[54] << 14) | (mask_m[55] << 12)
4839 | (mask_m[56] << 10) | (mask_m[57] << 8)
4840 | (mask_m[58] << 6) | (mask_m[59] << 4)
4841 | (mask_m[60] << 2) | (mask_m[61] << 0);
4842 REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
4843 REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
4844
4845 tmp_mask = (mask_m[31] << 28)
4846 | (mask_m[32] << 26) | (mask_m[33] << 24)
4847 | (mask_m[34] << 22) | (mask_m[35] << 20)
4848 | (mask_m[36] << 18) | (mask_m[37] << 16)
4849 | (mask_m[48] << 14) | (mask_m[39] << 12)
4850 | (mask_m[40] << 10) | (mask_m[41] << 8)
4851 | (mask_m[42] << 6) | (mask_m[43] << 4)
4852 | (mask_m[44] << 2) | (mask_m[45] << 0);
4853 REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
4854 REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
4855
4856 tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
4857 | (mask_m[18] << 26) | (mask_m[18] << 24)
4858 | (mask_m[20] << 22) | (mask_m[20] << 20)
4859 | (mask_m[22] << 18) | (mask_m[22] << 16)
4860 | (mask_m[24] << 14) | (mask_m[24] << 12)
4861 | (mask_m[25] << 10) | (mask_m[26] << 8)
4862 | (mask_m[27] << 6) | (mask_m[28] << 4)
4863 | (mask_m[29] << 2) | (mask_m[30] << 0);
4864 REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
4865 REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
4866
4867 tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
4868 | (mask_m[2] << 26) | (mask_m[3] << 24)
4869 | (mask_m[4] << 22) | (mask_m[5] << 20)
4870 | (mask_m[6] << 18) | (mask_m[7] << 16)
4871 | (mask_m[8] << 14) | (mask_m[9] << 12)
4872 | (mask_m[10] << 10) | (mask_m[11] << 8)
4873 | (mask_m[12] << 6) | (mask_m[13] << 4)
4874 | (mask_m[14] << 2) | (mask_m[15] << 0);
4875 REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
4876 REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
4877
4878 tmp_mask = (mask_p[15] << 28)
4879 | (mask_p[14] << 26) | (mask_p[13] << 24)
4880 | (mask_p[12] << 22) | (mask_p[11] << 20)
4881 | (mask_p[10] << 18) | (mask_p[9] << 16)
4882 | (mask_p[8] << 14) | (mask_p[7] << 12)
4883 | (mask_p[6] << 10) | (mask_p[5] << 8)
4884 | (mask_p[4] << 6) | (mask_p[3] << 4)
4885 | (mask_p[2] << 2) | (mask_p[1] << 0);
4886 REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
4887 REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
4888
4889 tmp_mask = (mask_p[30] << 28)
4890 | (mask_p[29] << 26) | (mask_p[28] << 24)
4891 | (mask_p[27] << 22) | (mask_p[26] << 20)
4892 | (mask_p[25] << 18) | (mask_p[24] << 16)
4893 | (mask_p[23] << 14) | (mask_p[22] << 12)
4894 | (mask_p[21] << 10) | (mask_p[20] << 8)
4895 | (mask_p[19] << 6) | (mask_p[18] << 4)
4896 | (mask_p[17] << 2) | (mask_p[16] << 0);
4897 REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
4898 REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
4899
4900 tmp_mask = (mask_p[45] << 28)
4901 | (mask_p[44] << 26) | (mask_p[43] << 24)
4902 | (mask_p[42] << 22) | (mask_p[41] << 20)
4903 | (mask_p[40] << 18) | (mask_p[39] << 16)
4904 | (mask_p[38] << 14) | (mask_p[37] << 12)
4905 | (mask_p[36] << 10) | (mask_p[35] << 8)
4906 | (mask_p[34] << 6) | (mask_p[33] << 4)
4907 | (mask_p[32] << 2) | (mask_p[31] << 0);
4908 REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
4909 REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
4910
4911 tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
4912 | (mask_p[59] << 26) | (mask_p[58] << 24)
4913 | (mask_p[57] << 22) | (mask_p[56] << 20)
4914 | (mask_p[55] << 18) | (mask_p[54] << 16)
4915 | (mask_p[53] << 14) | (mask_p[52] << 12)
4916 | (mask_p[51] << 10) | (mask_p[50] << 8)
4917 | (mask_p[49] << 6) | (mask_p[48] << 4)
4918 | (mask_p[47] << 2) | (mask_p[46] << 0);
4919 REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
4920 REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
4921}
4922
4923static void ath9k_hw_spur_mitigate(struct ath_hal *ah,
4924 struct ath9k_channel *chan)
4925{
4926 int bb_spur = AR_NO_SPUR;
4927 int bin, cur_bin;
4928 int spur_freq_sd;
4929 int spur_delta_phase;
4930 int denominator;
4931 int upper, lower, cur_vit_mask;
4932 int tmp, new;
4933 int i;
4934 int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8,
4935 AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
4936 };
4937 int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10,
4938 AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
4939 };
4940 int inc[4] = { 0, 100, 0, 0 };
4941
4942 int8_t mask_m[123];
4943 int8_t mask_p[123];
4944 int8_t mask_amt;
4945 int tmp_mask;
4946 int cur_bb_spur;
4947 bool is2GHz = IS_CHAN_2GHZ(chan);
4948
4949 memset(&mask_m, 0, sizeof(int8_t) * 123);
4950 memset(&mask_p, 0, sizeof(int8_t) * 123);
4951
4952 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
4953 cur_bb_spur = ath9k_hw_eeprom_get_spur_chan(ah, i, is2GHz);
4954 if (AR_NO_SPUR == cur_bb_spur)
4955 break;
4956 cur_bb_spur = cur_bb_spur - (chan->channel * 10);
4957 if ((cur_bb_spur > -95) && (cur_bb_spur < 95)) {
4958 bb_spur = cur_bb_spur;
4959 break;
4960 }
4961 }
4962
4963 if (AR_NO_SPUR == bb_spur)
4964 return;
4965
4966 bin = bb_spur * 32;
4967
4968 tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
4969 new = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
4970 AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
4971 AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
4972 AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
4973
4974 REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), new);
4975
4976 new = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
4977 AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
4978 AR_PHY_SPUR_REG_MASK_RATE_SELECT |
4979 AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
4980 SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
4981 REG_WRITE(ah, AR_PHY_SPUR_REG, new);
4982
4983 spur_delta_phase = ((bb_spur * 524288) / 100) &
4984 AR_PHY_TIMING11_SPUR_DELTA_PHASE;
4985
4986 denominator = IS_CHAN_2GHZ(chan) ? 440 : 400;
4987 spur_freq_sd = ((bb_spur * 2048) / denominator) & 0x3ff;
4988
4989 new = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
4990 SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
4991 SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
4992 REG_WRITE(ah, AR_PHY_TIMING11, new);
4993
4994 cur_bin = -6000;
4995 upper = bin + 100;
4996 lower = bin - 100;
4997
4998 for (i = 0; i < 4; i++) {
4999 int pilot_mask = 0;
5000 int chan_mask = 0;
5001 int bp = 0;
5002 for (bp = 0; bp < 30; bp++) {
5003 if ((cur_bin > lower) && (cur_bin < upper)) {
5004 pilot_mask = pilot_mask | 0x1 << bp;
5005 chan_mask = chan_mask | 0x1 << bp;
5006 }
5007 cur_bin += 100;
5008 }
5009 cur_bin += inc[i];
5010 REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
5011 REG_WRITE(ah, chan_mask_reg[i], chan_mask);
5012 }
5013
5014 cur_vit_mask = 6100;
5015 upper = bin + 120;
5016 lower = bin - 120;
5017
5018 for (i = 0; i < 123; i++) {
5019 if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
5020 if ((abs(cur_vit_mask - bin)) < 75)
5021 mask_amt = 1;
5022 else
5023 mask_amt = 0;
5024 if (cur_vit_mask < 0)
5025 mask_m[abs(cur_vit_mask / 100)] = mask_amt;
5026 else
5027 mask_p[cur_vit_mask / 100] = mask_amt;
5028 }
5029 cur_vit_mask -= 100;
5030 }
5031
5032 tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
5033 | (mask_m[48] << 26) | (mask_m[49] << 24)
5034 | (mask_m[50] << 22) | (mask_m[51] << 20)
5035 | (mask_m[52] << 18) | (mask_m[53] << 16)
5036 | (mask_m[54] << 14) | (mask_m[55] << 12)
5037 | (mask_m[56] << 10) | (mask_m[57] << 8)
5038 | (mask_m[58] << 6) | (mask_m[59] << 4)
5039 | (mask_m[60] << 2) | (mask_m[61] << 0);
5040 REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
5041 REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
5042
5043 tmp_mask = (mask_m[31] << 28)
5044 | (mask_m[32] << 26) | (mask_m[33] << 24)
5045 | (mask_m[34] << 22) | (mask_m[35] << 20)
5046 | (mask_m[36] << 18) | (mask_m[37] << 16)
5047 | (mask_m[48] << 14) | (mask_m[39] << 12)
5048 | (mask_m[40] << 10) | (mask_m[41] << 8)
5049 | (mask_m[42] << 6) | (mask_m[43] << 4)
5050 | (mask_m[44] << 2) | (mask_m[45] << 0);
5051 REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
5052 REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
5053
5054 tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
5055 | (mask_m[18] << 26) | (mask_m[18] << 24)
5056 | (mask_m[20] << 22) | (mask_m[20] << 20)
5057 | (mask_m[22] << 18) | (mask_m[22] << 16)
5058 | (mask_m[24] << 14) | (mask_m[24] << 12)
5059 | (mask_m[25] << 10) | (mask_m[26] << 8)
5060 | (mask_m[27] << 6) | (mask_m[28] << 4)
5061 | (mask_m[29] << 2) | (mask_m[30] << 0);
5062 REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
5063 REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
5064
5065 tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
5066 | (mask_m[2] << 26) | (mask_m[3] << 24)
5067 | (mask_m[4] << 22) | (mask_m[5] << 20)
5068 | (mask_m[6] << 18) | (mask_m[7] << 16)
5069 | (mask_m[8] << 14) | (mask_m[9] << 12)
5070 | (mask_m[10] << 10) | (mask_m[11] << 8)
5071 | (mask_m[12] << 6) | (mask_m[13] << 4)
5072 | (mask_m[14] << 2) | (mask_m[15] << 0);
5073 REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
5074 REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
5075
5076 tmp_mask = (mask_p[15] << 28)
5077 | (mask_p[14] << 26) | (mask_p[13] << 24)
5078 | (mask_p[12] << 22) | (mask_p[11] << 20)
5079 | (mask_p[10] << 18) | (mask_p[9] << 16)
5080 | (mask_p[8] << 14) | (mask_p[7] << 12)
5081 | (mask_p[6] << 10) | (mask_p[5] << 8)
5082 | (mask_p[4] << 6) | (mask_p[3] << 4)
5083 | (mask_p[2] << 2) | (mask_p[1] << 0);
5084 REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
5085 REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
5086
5087 tmp_mask = (mask_p[30] << 28)
5088 | (mask_p[29] << 26) | (mask_p[28] << 24)
5089 | (mask_p[27] << 22) | (mask_p[26] << 20)
5090 | (mask_p[25] << 18) | (mask_p[24] << 16)
5091 | (mask_p[23] << 14) | (mask_p[22] << 12)
5092 | (mask_p[21] << 10) | (mask_p[20] << 8)
5093 | (mask_p[19] << 6) | (mask_p[18] << 4)
5094 | (mask_p[17] << 2) | (mask_p[16] << 0);
5095 REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
5096 REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
5097
5098 tmp_mask = (mask_p[45] << 28)
5099 | (mask_p[44] << 26) | (mask_p[43] << 24)
5100 | (mask_p[42] << 22) | (mask_p[41] << 20)
5101 | (mask_p[40] << 18) | (mask_p[39] << 16)
5102 | (mask_p[38] << 14) | (mask_p[37] << 12)
5103 | (mask_p[36] << 10) | (mask_p[35] << 8)
5104 | (mask_p[34] << 6) | (mask_p[33] << 4)
5105 | (mask_p[32] << 2) | (mask_p[31] << 0);
5106 REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
5107 REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
5108
5109 tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
5110 | (mask_p[59] << 26) | (mask_p[58] << 24)
5111 | (mask_p[57] << 22) | (mask_p[56] << 20)
5112 | (mask_p[55] << 18) | (mask_p[54] << 16)
5113 | (mask_p[53] << 14) | (mask_p[52] << 12)
5114 | (mask_p[51] << 10) | (mask_p[50] << 8)
5115 | (mask_p[49] << 6) | (mask_p[48] << 4)
5116 | (mask_p[47] << 2) | (mask_p[46] << 0);
5117 REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
5118 REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
5119}
5120
5121static inline void ath9k_hw_init_chain_masks(struct ath_hal *ah)
5122{
5123 struct ath_hal_5416 *ahp = AH5416(ah);
5124 int rx_chainmask, tx_chainmask;
5125
5126 rx_chainmask = ahp->ah_rxchainmask;
5127 tx_chainmask = ahp->ah_txchainmask;
5128
5129 switch (rx_chainmask) {
5130 case 0x5:
5131 REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
5132 AR_PHY_SWAP_ALT_CHAIN);
5133 case 0x3:
5134 if (((ah)->ah_macVersion <= AR_SREV_VERSION_9160)) {
5135 REG_WRITE(ah, AR_PHY_RX_CHAINMASK, 0x7);
5136 REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, 0x7);
5137 break;
5138 }
5139 case 0x1:
5140 case 0x2:
5141 if (!AR_SREV_9280(ah))
5142 break;
5143 case 0x7:
5144 REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask);
5145 REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask);
5146 break;
5147 default:
5148 break;
5149 }
5150
5151 REG_WRITE(ah, AR_SELFGEN_MASK, tx_chainmask);
5152 if (tx_chainmask == 0x5) {
5153 REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
5154 AR_PHY_SWAP_ALT_CHAIN);
5155 }
5156 if (AR_SREV_9100(ah))
5157 REG_WRITE(ah, AR_PHY_ANALOG_SWAP,
5158 REG_READ(ah, AR_PHY_ANALOG_SWAP) | 0x00000001);
5159}
5160
5161static void ath9k_hw_set_addac(struct ath_hal *ah,
5162 struct ath9k_channel *chan)
5163{
5164 struct modal_eep_header *pModal;
5165 struct ath_hal_5416 *ahp = AH5416(ah);
5166 struct ar5416_eeprom *eep = &ahp->ah_eeprom;
5167 u8 biaslevel;
5168
5169 if (ah->ah_macVersion != AR_SREV_VERSION_9160)
5170 return;
5171
5172 if (ar5416_get_eep_rev(ahp) < AR5416_EEP_MINOR_VER_7)
5173 return;
5174
5175 pModal = &(eep->modalHeader[IS_CHAN_2GHZ(chan)]);
5176
5177 if (pModal->xpaBiasLvl != 0xff) {
5178 biaslevel = pModal->xpaBiasLvl;
5179 } else {
5180
5181 u16 resetFreqBin, freqBin, freqCount = 0;
5182 struct chan_centers centers;
5183
5184 ath9k_hw_get_channel_centers(ah, chan, &centers);
5185
5186 resetFreqBin =
5187 FREQ2FBIN(centers.synth_center, IS_CHAN_2GHZ(chan));
5188 freqBin = pModal->xpaBiasLvlFreq[0] & 0xff;
5189 biaslevel = (u8) (pModal->xpaBiasLvlFreq[0] >> 14);
5190
5191 freqCount++;
5192
5193 while (freqCount < 3) {
5194 if (pModal->xpaBiasLvlFreq[freqCount] == 0x0)
5195 break;
5196
5197 freqBin = pModal->xpaBiasLvlFreq[freqCount] & 0xff;
5198 if (resetFreqBin >= freqBin) {
5199 biaslevel =
5200 (u8) (pModal->
5201 xpaBiasLvlFreq[freqCount]
5202 >> 14);
5203 } else {
5204 break;
5205 }
5206 freqCount++;
5207 }
5208 }
5209
5210 if (IS_CHAN_2GHZ(chan)) {
5211 INI_RA(&ahp->ah_iniAddac, 7, 1) =
5212 (INI_RA(&ahp->ah_iniAddac, 7, 1) & (~0x18)) | biaslevel
5213 << 3;
5214 } else {
5215 INI_RA(&ahp->ah_iniAddac, 6, 1) =
5216 (INI_RA(&ahp->ah_iniAddac, 6, 1) & (~0xc0)) | biaslevel
5217 << 6;
5218 }
5219}
5220
5221static u32 ath9k_hw_mac_usec(struct ath_hal *ah, u32 clks)
5222{
5223 if (ah->ah_curchan != NULL)
5224 return clks /
5225 CLOCK_RATE[ath9k_hw_chan2wmode(ah, ah->ah_curchan)];
5226 else
5227 return clks / CLOCK_RATE[ATH9K_MODE_11B];
5228}
5229
5230static u32 ath9k_hw_mac_to_usec(struct ath_hal *ah, u32 clks)
5231{
5232 struct ath9k_channel *chan = ah->ah_curchan;
5233
5234 if (chan && IS_CHAN_HT40(chan))
5235 return ath9k_hw_mac_usec(ah, clks) / 2;
5236 else
5237 return ath9k_hw_mac_usec(ah, clks);
5238}
5239
5240static u32 ath9k_hw_mac_clks(struct ath_hal *ah, u32 usecs)
5241{
5242 if (ah->ah_curchan != NULL)
5243 return usecs * CLOCK_RATE[ath9k_hw_chan2wmode(ah,
5244 ah->ah_curchan)];
5245 else
5246 return usecs * CLOCK_RATE[ATH9K_MODE_11B];
5247}
5248
5249static u32 ath9k_hw_mac_to_clks(struct ath_hal *ah, u32 usecs)
5250{
5251 struct ath9k_channel *chan = ah->ah_curchan;
5252
5253 if (chan && IS_CHAN_HT40(chan))
5254 return ath9k_hw_mac_clks(ah, usecs) * 2;
5255 else
5256 return ath9k_hw_mac_clks(ah, usecs);
5257}
5258
5259static bool ath9k_hw_set_ack_timeout(struct ath_hal *ah, u32 us)
5260{
5261 struct ath_hal_5416 *ahp = AH5416(ah);
5262
5263 if (us > ath9k_hw_mac_to_usec(ah, MS(0xffffffff, AR_TIME_OUT_ACK))) {
5264 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "%s: bad ack timeout %u\n",
5265 __func__, us);
5266 ahp->ah_acktimeout = (u32) -1;
5267 return false;
5268 } else {
5269 REG_RMW_FIELD(ah, AR_TIME_OUT,
5270 AR_TIME_OUT_ACK, ath9k_hw_mac_to_clks(ah, us));
5271 ahp->ah_acktimeout = us;
5272 return true;
5273 }
5274}
5275
5276static bool ath9k_hw_set_cts_timeout(struct ath_hal *ah, u32 us)
5277{
5278 struct ath_hal_5416 *ahp = AH5416(ah);
5279
5280 if (us > ath9k_hw_mac_to_usec(ah, MS(0xffffffff, AR_TIME_OUT_CTS))) {
5281 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "%s: bad cts timeout %u\n",
5282 __func__, us);
5283 ahp->ah_ctstimeout = (u32) -1;
5284 return false;
5285 } else {
5286 REG_RMW_FIELD(ah, AR_TIME_OUT,
5287 AR_TIME_OUT_CTS, ath9k_hw_mac_to_clks(ah, us));
5288 ahp->ah_ctstimeout = us;
5289 return true;
5290 }
5291}
5292static bool ath9k_hw_set_global_txtimeout(struct ath_hal *ah,
5293 u32 tu)
5294{
5295 struct ath_hal_5416 *ahp = AH5416(ah);
5296
5297 if (tu > 0xFFFF) {
5298 DPRINTF(ah->ah_sc, ATH_DBG_XMIT,
5299 "%s: bad global tx timeout %u\n", __func__, tu);
5300 ahp->ah_globaltxtimeout = (u32) -1;
5301 return false;
5302 } else {
5303 REG_RMW_FIELD(ah, AR_GTXTO, AR_GTXTO_TIMEOUT_LIMIT, tu);
5304 ahp->ah_globaltxtimeout = tu;
5305 return true;
5306 }
5307}
5308
5309bool ath9k_hw_setslottime(struct ath_hal *ah, u32 us)
5310{
5311 struct ath_hal_5416 *ahp = AH5416(ah);
5312
5313 if (us < ATH9K_SLOT_TIME_9 || us > ath9k_hw_mac_to_usec(ah, 0xffff)) {
5314 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "%s: bad slot time %u\n",
5315 __func__, us);
5316 ahp->ah_slottime = (u32) -1;
5317 return false;
5318 } else {
5319 REG_WRITE(ah, AR_D_GBL_IFS_SLOT, ath9k_hw_mac_to_clks(ah, us));
5320 ahp->ah_slottime = us;
5321 return true;
5322 }
5323}
5324
5325static inline void ath9k_hw_init_user_settings(struct ath_hal *ah)
5326{
5327 struct ath_hal_5416 *ahp = AH5416(ah);
5328
5329 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "--AP %s ahp->ah_miscMode 0x%x\n",
5330 __func__, ahp->ah_miscMode);
5331 if (ahp->ah_miscMode != 0)
5332 REG_WRITE(ah, AR_PCU_MISC,
5333 REG_READ(ah, AR_PCU_MISC) | ahp->ah_miscMode);
5334 if (ahp->ah_slottime != (u32) -1)
5335 ath9k_hw_setslottime(ah, ahp->ah_slottime);
5336 if (ahp->ah_acktimeout != (u32) -1)
5337 ath9k_hw_set_ack_timeout(ah, ahp->ah_acktimeout);
5338 if (ahp->ah_ctstimeout != (u32) -1)
5339 ath9k_hw_set_cts_timeout(ah, ahp->ah_ctstimeout);
5340 if (ahp->ah_globaltxtimeout != (u32) -1)
5341 ath9k_hw_set_global_txtimeout(ah, ahp->ah_globaltxtimeout);
5342}
5343
5344static inline int
5345ath9k_hw_process_ini(struct ath_hal *ah,
5346 struct ath9k_channel *chan,
5347 enum ath9k_ht_macmode macmode)
5348{
5349 int i, regWrites = 0;
5350 struct ath_hal_5416 *ahp = AH5416(ah);
5351 u32 modesIndex, freqIndex;
5352 int status;
5353
5354 switch (chan->chanmode) {
5355 case CHANNEL_A:
5356 case CHANNEL_A_HT20:
5357 modesIndex = 1;
5358 freqIndex = 1;
5359 break;
5360 case CHANNEL_A_HT40PLUS:
5361 case CHANNEL_A_HT40MINUS:
5362 modesIndex = 2;
5363 freqIndex = 1;
5364 break;
5365 case CHANNEL_G:
5366 case CHANNEL_G_HT20:
5367 case CHANNEL_B:
5368 modesIndex = 4;
5369 freqIndex = 2;
5370 break;
5371 case CHANNEL_G_HT40PLUS:
5372 case CHANNEL_G_HT40MINUS:
5373 modesIndex = 3;
5374 freqIndex = 2;
5375 break;
5376
5377 default:
5378 return -EINVAL;
5379 }
5380
5381 REG_WRITE(ah, AR_PHY(0), 0x00000007);
5382
5383 REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_EXTERNAL_RADIO);
5384
5385 ath9k_hw_set_addac(ah, chan);
5386
5387 if (AR_SREV_5416_V22_OR_LATER(ah)) {
5388 REG_WRITE_ARRAY(&ahp->ah_iniAddac, 1, regWrites);
5389 } else {
5390 struct ar5416IniArray temp;
5391 u32 addacSize =
5392 sizeof(u32) * ahp->ah_iniAddac.ia_rows *
5393 ahp->ah_iniAddac.ia_columns;
5394
5395 memcpy(ahp->ah_addac5416_21,
5396 ahp->ah_iniAddac.ia_array, addacSize);
5397
5398 (ahp->ah_addac5416_21)[31 *
5399 ahp->ah_iniAddac.ia_columns + 1] = 0;
5400
5401 temp.ia_array = ahp->ah_addac5416_21;
5402 temp.ia_columns = ahp->ah_iniAddac.ia_columns;
5403 temp.ia_rows = ahp->ah_iniAddac.ia_rows;
5404 REG_WRITE_ARRAY(&temp, 1, regWrites);
5405 }
5406 REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_INTERNAL_ADDAC);
5407
5408 for (i = 0; i < ahp->ah_iniModes.ia_rows; i++) {
5409 u32 reg = INI_RA(&ahp->ah_iniModes, i, 0);
5410 u32 val = INI_RA(&ahp->ah_iniModes, i, modesIndex);
5411
5412#ifdef CONFIG_SLOW_ANT_DIV
5413 if (ah->ah_devid == AR9280_DEVID_PCI)
5414 val = ath9k_hw_ini_fixup(ah, &ahp->ah_eeprom, reg,
5415 val);
5416#endif
5417
5418 REG_WRITE(ah, reg, val);
5419
5420 if (reg >= 0x7800 && reg < 0x78a0
5421 && ah->ah_config.analog_shiftreg) {
5422 udelay(100);
5423 }
5424
5425 DO_DELAY(regWrites);
5426 }
5427
5428 for (i = 0; i < ahp->ah_iniCommon.ia_rows; i++) {
5429 u32 reg = INI_RA(&ahp->ah_iniCommon, i, 0);
5430 u32 val = INI_RA(&ahp->ah_iniCommon, i, 1);
5431
5432 REG_WRITE(ah, reg, val);
5433
5434 if (reg >= 0x7800 && reg < 0x78a0
5435 && ah->ah_config.analog_shiftreg) {
5436 udelay(100);
5437 }
5438
5439 DO_DELAY(regWrites);
5440 }
5441
5442 ath9k_hw_write_regs(ah, modesIndex, freqIndex, regWrites);
5443
5444 if (AR_SREV_9280_20(ah) && IS_CHAN_A_5MHZ_SPACED(chan)) {
5445 REG_WRITE_ARRAY(&ahp->ah_iniModesAdditional, modesIndex,
5446 regWrites);
5447 }
5448
5449 ath9k_hw_override_ini(ah, chan);
5450 ath9k_hw_set_regs(ah, chan, macmode);
5451 ath9k_hw_init_chain_masks(ah);
5452
5453 status = ath9k_hw_set_txpower(ah, &ahp->ah_eeprom, chan,
5454 ath9k_regd_get_ctl(ah, chan),
5455 ath9k_regd_get_antenna_allowed(ah,
5456 chan),
5457 chan->maxRegTxPower * 2,
5458 min((u32) MAX_RATE_POWER,
5459 (u32) ah->ah_powerLimit));
5460 if (status != 0) {
5461 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
5462 "%s: error init'ing transmit power\n", __func__);
5463 return -EIO;
5464 }
5465
5466 if (!ath9k_hw_set_rf_regs(ah, chan, freqIndex)) {
5467 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
5468 "%s: ar5416SetRfRegs failed\n", __func__);
5469 return -EIO;
5470 }
5471
5472 return 0;
5473}
5474
5475static inline void ath9k_hw_setup_calibration(struct ath_hal *ah,
5476 struct hal_cal_list *currCal)
5477{
5478 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(0),
5479 AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX,
5480 currCal->calData->calCountMax);
5481
5482 switch (currCal->calData->calType) {
5483 case IQ_MISMATCH_CAL:
5484 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ);
5485 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
5486 "%s: starting IQ Mismatch Calibration\n",
5487 __func__);
5488 break;
5489 case ADC_GAIN_CAL:
5490 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_GAIN);
5491 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
5492 "%s: starting ADC Gain Calibration\n", __func__);
5493 break;
5494 case ADC_DC_CAL:
5495 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_PER);
5496 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
5497 "%s: starting ADC DC Calibration\n", __func__);
5498 break;
5499 case ADC_DC_INIT_CAL:
5500 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_INIT);
5501 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
5502 "%s: starting Init ADC DC Calibration\n",
5503 __func__);
5504 break;
5505 }
5506
5507 REG_SET_BIT(ah, AR_PHY_TIMING_CTRL4(0),
5508 AR_PHY_TIMING_CTRL4_DO_CAL);
5509}
5510
5511static inline void ath9k_hw_reset_calibration(struct ath_hal *ah,
5512 struct hal_cal_list *currCal)
5513{
5514 struct ath_hal_5416 *ahp = AH5416(ah);
5515 int i;
5516
5517 ath9k_hw_setup_calibration(ah, currCal);
5518
5519 currCal->calState = CAL_RUNNING;
5520
5521 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
5522 ahp->ah_Meas0.sign[i] = 0;
5523 ahp->ah_Meas1.sign[i] = 0;
5524 ahp->ah_Meas2.sign[i] = 0;
5525 ahp->ah_Meas3.sign[i] = 0;
5526 }
5527
5528 ahp->ah_CalSamples = 0;
5529}
5530
5531static inline void
5532ath9k_hw_per_calibration(struct ath_hal *ah,
5533 struct ath9k_channel *ichan,
5534 u8 rxchainmask,
5535 struct hal_cal_list *currCal,
5536 bool *isCalDone)
5537{
5538 struct ath_hal_5416 *ahp = AH5416(ah);
5539
5540 *isCalDone = false;
5541
5542 if (currCal->calState == CAL_RUNNING) {
5543 if (!(REG_READ(ah,
5544 AR_PHY_TIMING_CTRL4(0)) &
5545 AR_PHY_TIMING_CTRL4_DO_CAL)) {
5546
5547 currCal->calData->calCollect(ah);
5548
5549 ahp->ah_CalSamples++;
5550
5551 if (ahp->ah_CalSamples >=
5552 currCal->calData->calNumSamples) {
5553 int i, numChains = 0;
5554 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
5555 if (rxchainmask & (1 << i))
5556 numChains++;
5557 }
5558
5559 currCal->calData->calPostProc(ah,
5560 numChains);
5561
5562 ichan->CalValid |=
5563 currCal->calData->calType;
5564 currCal->calState = CAL_DONE;
5565 *isCalDone = true;
5566 } else {
5567 ath9k_hw_setup_calibration(ah, currCal);
5568 }
5569 }
5570 } else if (!(ichan->CalValid & currCal->calData->calType)) {
5571 ath9k_hw_reset_calibration(ah, currCal);
5572 }
5573}
5574
5575static inline bool ath9k_hw_run_init_cals(struct ath_hal *ah,
5576 int init_cal_count)
5577{
5578 struct ath_hal_5416 *ahp = AH5416(ah);
5579 struct ath9k_channel ichan;
5580 bool isCalDone;
5581 struct hal_cal_list *currCal = ahp->ah_cal_list_curr;
5582 const struct hal_percal_data *calData = currCal->calData;
5583 int i;
5584
5585 if (currCal == NULL)
5586 return false;
5587
5588 ichan.CalValid = 0;
5589
5590 for (i = 0; i < init_cal_count; i++) {
5591 ath9k_hw_reset_calibration(ah, currCal);
5592
5593 if (!ath9k_hw_wait(ah, AR_PHY_TIMING_CTRL4(0),
5594 AR_PHY_TIMING_CTRL4_DO_CAL, 0)) {
5595 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
5596 "%s: Cal %d failed to complete in 100ms.\n",
5597 __func__, calData->calType);
5598
5599 ahp->ah_cal_list = ahp->ah_cal_list_last =
5600 ahp->ah_cal_list_curr = NULL;
5601 return false;
5602 }
5603
5604 ath9k_hw_per_calibration(ah, &ichan, ahp->ah_rxchainmask,
5605 currCal, &isCalDone);
5606 if (!isCalDone) {
5607 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
5608 "%s: Not able to run Init Cal %d.\n",
5609 __func__, calData->calType);
5610 }
5611 if (currCal->calNext) {
5612 currCal = currCal->calNext;
5613 calData = currCal->calData;
5614 }
5615 }
5616
5617 ahp->ah_cal_list = ahp->ah_cal_list_last = ahp->ah_cal_list_curr = NULL;
5618 return true;
5619}
5620
5621static inline bool
5622ath9k_hw_channel_change(struct ath_hal *ah,
5623 struct ath9k_channel *chan,
5624 enum ath9k_ht_macmode macmode)
5625{
5626 u32 synthDelay, qnum;
5627 struct ath_hal_5416 *ahp = AH5416(ah);
5628
5629 for (qnum = 0; qnum < AR_NUM_QCU; qnum++) {
5630 if (ath9k_hw_numtxpending(ah, qnum)) {
5631 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
5632 "%s: Transmit frames pending on queue %d\n",
5633 __func__, qnum);
5634 return false;
5635 }
5636 }
5637
5638 REG_WRITE(ah, AR_PHY_RFBUS_REQ, AR_PHY_RFBUS_REQ_EN);
5639 if (!ath9k_hw_wait(ah, AR_PHY_RFBUS_GRANT, AR_PHY_RFBUS_GRANT_EN,
5640 AR_PHY_RFBUS_GRANT_EN)) {
5641 DPRINTF(ah->ah_sc, ATH_DBG_PHY_IO,
5642 "%s: Could not kill baseband RX\n", __func__);
5643 return false;
5644 }
5645
5646 ath9k_hw_set_regs(ah, chan, macmode);
5647
5648 if (AR_SREV_9280_10_OR_LATER(ah)) {
5649 if (!(ath9k_hw_ar9280_set_channel(ah, chan))) {
5650 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
5651 "%s: failed to set channel\n", __func__);
5652 return false;
5653 }
5654 } else {
5655 if (!(ath9k_hw_set_channel(ah, chan))) {
5656 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
5657 "%s: failed to set channel\n", __func__);
5658 return false;
5659 }
5660 }
5661
5662 if (ath9k_hw_set_txpower(ah, &ahp->ah_eeprom, chan,
5663 ath9k_regd_get_ctl(ah, chan),
5664 ath9k_regd_get_antenna_allowed(ah, chan),
5665 chan->maxRegTxPower * 2,
5666 min((u32) MAX_RATE_POWER,
5667 (u32) ah->ah_powerLimit)) != 0) {
5668 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
5669 "%s: error init'ing transmit power\n", __func__);
5670 return false;
5671 }
5672
5673 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
5674 if (IS_CHAN_CCK(chan))
5675 synthDelay = (4 * synthDelay) / 22;
5676 else
5677 synthDelay /= 10;
5678
5679 udelay(synthDelay + BASE_ACTIVATE_DELAY);
5680
5681 REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0);
5682
5683 if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
5684 ath9k_hw_set_delta_slope(ah, chan);
5685
5686 if (AR_SREV_9280_10_OR_LATER(ah))
5687 ath9k_hw_9280_spur_mitigate(ah, chan);
5688 else
5689 ath9k_hw_spur_mitigate(ah, chan);
5690
5691 if (!chan->oneTimeCalsDone)
5692 chan->oneTimeCalsDone = true;
5693
5694 return true;
5695}
5696
5697static bool ath9k_hw_chip_reset(struct ath_hal *ah,
5698 struct ath9k_channel *chan)
5699{
5700 struct ath_hal_5416 *ahp = AH5416(ah);
5701
5702 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM))
5703 return false;
5704
5705 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
5706 return false;
5707
5708 ahp->ah_chipFullSleep = false;
5709
5710 ath9k_hw_init_pll(ah, chan);
5711
5712 ath9k_hw_set_rfmode(ah, chan);
5713
5714 return true;
5715}
5716
5717static inline void ath9k_hw_set_dma(struct ath_hal *ah)
5718{
5719 u32 regval;
5720
5721 regval = REG_READ(ah, AR_AHB_MODE);
5722 REG_WRITE(ah, AR_AHB_MODE, regval | AR_AHB_PREFETCH_RD_EN);
5723
5724 regval = REG_READ(ah, AR_TXCFG) & ~AR_TXCFG_DMASZ_MASK;
5725 REG_WRITE(ah, AR_TXCFG, regval | AR_TXCFG_DMASZ_128B);
5726
5727 REG_RMW_FIELD(ah, AR_TXCFG, AR_FTRIG, ah->ah_txTrigLevel);
5728
5729 regval = REG_READ(ah, AR_RXCFG) & ~AR_RXCFG_DMASZ_MASK;
5730 REG_WRITE(ah, AR_RXCFG, regval | AR_RXCFG_DMASZ_128B);
5731
5732 REG_WRITE(ah, AR_RXFIFO_CFG, 0x200);
5733
5734 if (AR_SREV_9285(ah)) {
5735 REG_WRITE(ah, AR_PCU_TXBUF_CTRL,
5736 AR_9285_PCU_TXBUF_CTRL_USABLE_SIZE);
5737 } else {
5738 REG_WRITE(ah, AR_PCU_TXBUF_CTRL,
5739 AR_PCU_TXBUF_CTRL_USABLE_SIZE);
5740 }
5741}
5742
5743bool ath9k_hw_stopdmarecv(struct ath_hal *ah)
5744{
5745 REG_WRITE(ah, AR_CR, AR_CR_RXD);
5746 if (!ath9k_hw_wait(ah, AR_CR, AR_CR_RXE, 0)) {
5747 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
5748 "%s: dma failed to stop in 10ms\n"
5749 "AR_CR=0x%08x\nAR_DIAG_SW=0x%08x\n",
5750 __func__,
5751 REG_READ(ah, AR_CR), REG_READ(ah, AR_DIAG_SW));
5752 return false;
5753 } else {
5754 return true;
5755 }
5756}
5757
5758void ath9k_hw_startpcureceive(struct ath_hal *ah)
5759{
5760 REG_CLR_BIT(ah, AR_DIAG_SW,
5761 (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
5762
5763 ath9k_enable_mib_counters(ah);
5764
5765 ath9k_ani_reset(ah);
5766}
5767
5768void ath9k_hw_stoppcurecv(struct ath_hal *ah)
5769{
5770 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
5771
5772 ath9k_hw_disable_mib_counters(ah);
5773}
5774
5775static bool ath9k_hw_iscal_supported(struct ath_hal *ah,
5776 struct ath9k_channel *chan,
5777 enum hal_cal_types calType)
5778{
5779 struct ath_hal_5416 *ahp = AH5416(ah);
5780 bool retval = false;
5781
5782 switch (calType & ahp->ah_suppCals) {
5783 case IQ_MISMATCH_CAL:
5784 if (!IS_CHAN_B(chan))
5785 retval = true;
5786 break;
5787 case ADC_GAIN_CAL:
5788 case ADC_DC_CAL:
5789 if (!IS_CHAN_B(chan)
5790 && !(IS_CHAN_2GHZ(chan) && IS_CHAN_HT20(chan)))
5791 retval = true;
5792 break;
5793 }
5794
5795 return retval;
5796}
5797
5798static inline bool ath9k_hw_init_cal(struct ath_hal *ah,
5799 struct ath9k_channel *chan)
5800{
5801 struct ath_hal_5416 *ahp = AH5416(ah);
5802 struct ath9k_channel *ichan =
5803 ath9k_regd_check_channel(ah, chan);
5804
5805 REG_WRITE(ah, AR_PHY_AGC_CONTROL,
5806 REG_READ(ah, AR_PHY_AGC_CONTROL) |
5807 AR_PHY_AGC_CONTROL_CAL);
5808
5809 if (!ath9k_hw_wait
5810 (ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL, 0)) {
5811 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
5812 "%s: offset calibration failed to complete in 1ms; "
5813 "noisy environment?\n", __func__);
5814 return false;
5815 }
5816
5817 REG_WRITE(ah, AR_PHY_AGC_CONTROL,
5818 REG_READ(ah, AR_PHY_AGC_CONTROL) |
5819 AR_PHY_AGC_CONTROL_NF);
5820
5821 ahp->ah_cal_list = ahp->ah_cal_list_last = ahp->ah_cal_list_curr =
5822 NULL;
5823
5824 if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah)) {
5825 if (ath9k_hw_iscal_supported(ah, chan, ADC_GAIN_CAL)) {
5826 INIT_CAL(&ahp->ah_adcGainCalData);
5827 INSERT_CAL(ahp, &ahp->ah_adcGainCalData);
5828 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
5829 "%s: enabling ADC Gain Calibration.\n",
5830 __func__);
5831 }
5832 if (ath9k_hw_iscal_supported(ah, chan, ADC_DC_CAL)) {
5833 INIT_CAL(&ahp->ah_adcDcCalData);
5834 INSERT_CAL(ahp, &ahp->ah_adcDcCalData);
5835 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
5836 "%s: enabling ADC DC Calibration.\n",
5837 __func__);
5838 }
5839 if (ath9k_hw_iscal_supported(ah, chan, IQ_MISMATCH_CAL)) {
5840 INIT_CAL(&ahp->ah_iqCalData);
5841 INSERT_CAL(ahp, &ahp->ah_iqCalData);
5842 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
5843 "%s: enabling IQ Calibration.\n",
5844 __func__);
5845 }
5846
5847 ahp->ah_cal_list_curr = ahp->ah_cal_list;
5848
5849 if (ahp->ah_cal_list_curr)
5850 ath9k_hw_reset_calibration(ah,
5851 ahp->ah_cal_list_curr);
5852 }
5853
5854 ichan->CalValid = 0;
5855
5856 return true;
5857}
5858
5859
5860bool ath9k_hw_reset(struct ath_hal *ah, enum ath9k_opmode opmode,
5861 struct ath9k_channel *chan,
5862 enum ath9k_ht_macmode macmode,
5863 u8 txchainmask, u8 rxchainmask,
5864 enum ath9k_ht_extprotspacing extprotspacing,
5865 bool bChannelChange,
5866 int *status)
5867{
5868#define FAIL(_code) do { ecode = _code; goto bad; } while (0)
5869 u32 saveLedState;
5870 struct ath_hal_5416 *ahp = AH5416(ah);
5871 struct ath9k_channel *curchan = ah->ah_curchan;
5872 u32 saveDefAntenna;
5873 u32 macStaId1;
5874 int ecode;
5875 int i, rx_chainmask;
5876
5877 ahp->ah_extprotspacing = extprotspacing;
5878 ahp->ah_txchainmask = txchainmask;
5879 ahp->ah_rxchainmask = rxchainmask;
5880
5881 if (AR_SREV_9280(ah)) {
5882 ahp->ah_txchainmask &= 0x3;
5883 ahp->ah_rxchainmask &= 0x3;
5884 }
5885
5886 if (ath9k_hw_check_chan(ah, chan) == NULL) {
5887 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
5888 "%s: invalid channel %u/0x%x; no mapping\n",
5889 __func__, chan->channel, chan->channelFlags);
5890 FAIL(-EINVAL);
5891 }
5892
5893 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
5894 return false;
5895
5896 if (curchan)
5897 ath9k_hw_getnf(ah, curchan);
5898
5899 if (bChannelChange &&
5900 (ahp->ah_chipFullSleep != true) &&
5901 (ah->ah_curchan != NULL) &&
5902 (chan->channel != ah->ah_curchan->channel) &&
5903 ((chan->channelFlags & CHANNEL_ALL) ==
5904 (ah->ah_curchan->channelFlags & CHANNEL_ALL)) &&
5905 (!AR_SREV_9280(ah) || (!IS_CHAN_A_5MHZ_SPACED(chan) &&
5906 !IS_CHAN_A_5MHZ_SPACED(ah->
5907 ah_curchan)))) {
5908
5909 if (ath9k_hw_channel_change(ah, chan, macmode)) {
5910 ath9k_hw_loadnf(ah, ah->ah_curchan);
5911 ath9k_hw_start_nfcal(ah);
5912 return true;
5913 }
5914 }
5915
5916 saveDefAntenna = REG_READ(ah, AR_DEF_ANTENNA);
5917 if (saveDefAntenna == 0)
5918 saveDefAntenna = 1;
5919
5920 macStaId1 = REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_BASE_RATE_11B;
5921
5922 saveLedState = REG_READ(ah, AR_CFG_LED) &
5923 (AR_CFG_LED_ASSOC_CTL | AR_CFG_LED_MODE_SEL |
5924 AR_CFG_LED_BLINK_THRESH_SEL | AR_CFG_LED_BLINK_SLOW);
5925
5926 ath9k_hw_mark_phy_inactive(ah);
5927
5928 if (!ath9k_hw_chip_reset(ah, chan)) {
5929 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "%s: chip reset failed\n",
5930 __func__);
5931 FAIL(-EIO);
5932 }
5933
5934 if (AR_SREV_9280(ah)) {
5935 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
5936 AR_GPIO_JTAG_DISABLE);
5937
5938 if (test_bit(ATH9K_MODE_11A, ah->ah_caps.wireless_modes)) {
5939 if (IS_CHAN_5GHZ(chan))
5940 ath9k_hw_set_gpio(ah, 9, 0);
5941 else
5942 ath9k_hw_set_gpio(ah, 9, 1);
5943 }
5944 ath9k_hw_cfg_output(ah, 9, ATH9K_GPIO_OUTPUT_MUX_AS_OUTPUT);
5945 }
5946
5947 ecode = ath9k_hw_process_ini(ah, chan, macmode);
5948 if (ecode != 0)
5949 goto bad;
5950
5951 if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
5952 ath9k_hw_set_delta_slope(ah, chan);
5953
5954 if (AR_SREV_9280_10_OR_LATER(ah))
5955 ath9k_hw_9280_spur_mitigate(ah, chan);
5956 else
5957 ath9k_hw_spur_mitigate(ah, chan);
5958
5959 if (!ath9k_hw_eeprom_set_board_values(ah, chan)) {
5960 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
5961 "%s: error setting board options\n", __func__);
5962 FAIL(-EIO);
5963 }
5964
5965 ath9k_hw_decrease_chain_power(ah, chan);
5966
5967 REG_WRITE(ah, AR_STA_ID0, get_unaligned_le32(ahp->ah_macaddr));
5968 REG_WRITE(ah, AR_STA_ID1, get_unaligned_le16(ahp->ah_macaddr + 4)
5969 | macStaId1
5970 | AR_STA_ID1_RTS_USE_DEF
5971 | (ah->ah_config.
5972 ack_6mb ? AR_STA_ID1_ACKCTS_6MB : 0)
5973 | ahp->ah_staId1Defaults);
5974 ath9k_hw_set_operating_mode(ah, opmode);
5975
5976 REG_WRITE(ah, AR_BSSMSKL, get_unaligned_le32(ahp->ah_bssidmask));
5977 REG_WRITE(ah, AR_BSSMSKU, get_unaligned_le16(ahp->ah_bssidmask + 4));
5978
5979 REG_WRITE(ah, AR_DEF_ANTENNA, saveDefAntenna);
5980
5981 REG_WRITE(ah, AR_BSS_ID0, get_unaligned_le32(ahp->ah_bssid));
5982 REG_WRITE(ah, AR_BSS_ID1, get_unaligned_le16(ahp->ah_bssid + 4) |
5983 ((ahp->ah_assocId & 0x3fff) << AR_BSS_ID1_AID_S));
5984
5985 REG_WRITE(ah, AR_ISR, ~0);
5986
5987 REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR);
5988
5989 if (AR_SREV_9280_10_OR_LATER(ah)) {
5990 if (!(ath9k_hw_ar9280_set_channel(ah, chan)))
5991 FAIL(-EIO);
5992 } else {
5993 if (!(ath9k_hw_set_channel(ah, chan)))
5994 FAIL(-EIO);
5995 }
5996
5997 for (i = 0; i < AR_NUM_DCU; i++)
5998 REG_WRITE(ah, AR_DQCUMASK(i), 1 << i);
5999
6000 ahp->ah_intrTxqs = 0;
6001 for (i = 0; i < ah->ah_caps.total_queues; i++)
6002 ath9k_hw_resettxqueue(ah, i);
6003
6004 ath9k_hw_init_interrupt_masks(ah, opmode);
6005 ath9k_hw_init_qos(ah);
6006
6007 ath9k_hw_init_user_settings(ah);
6008
6009 ah->ah_opmode = opmode;
6010
6011 REG_WRITE(ah, AR_STA_ID1,
6012 REG_READ(ah, AR_STA_ID1) | AR_STA_ID1_PRESERVE_SEQNUM);
6013
6014 ath9k_hw_set_dma(ah);
6015
6016 REG_WRITE(ah, AR_OBS, 8);
6017
6018 if (ahp->ah_intrMitigation) {
6019
6020 REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, 500);
6021 REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, 2000);
6022 }
6023
6024 ath9k_hw_init_bb(ah, chan);
6025
6026 if (!ath9k_hw_init_cal(ah, chan))
6027 FAIL(-ENODEV);
6028
6029 rx_chainmask = ahp->ah_rxchainmask;
6030 if ((rx_chainmask == 0x5) || (rx_chainmask == 0x3)) {
6031 REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask);
6032 REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask);
6033 }
6034
6035 REG_WRITE(ah, AR_CFG_LED, saveLedState | AR_CFG_SCLK_32KHZ);
6036
6037 if (AR_SREV_9100(ah)) {
6038 u32 mask;
6039 mask = REG_READ(ah, AR_CFG);
6040 if (mask & (AR_CFG_SWRB | AR_CFG_SWTB | AR_CFG_SWRG)) {
6041 DPRINTF(ah->ah_sc, ATH_DBG_RESET,
6042 "%s CFG Byte Swap Set 0x%x\n", __func__,
6043 mask);
6044 } else {
6045 mask =
6046 INIT_CONFIG_STATUS | AR_CFG_SWRB | AR_CFG_SWTB;
6047 REG_WRITE(ah, AR_CFG, mask);
6048 DPRINTF(ah->ah_sc, ATH_DBG_RESET,
6049 "%s Setting CFG 0x%x\n", __func__,
6050 REG_READ(ah, AR_CFG));
6051 }
6052 } else {
6053#ifdef __BIG_ENDIAN
6054 REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD);
6055#endif
6056 }
6057
6058 return true;
6059bad:
6060 if (status)
6061 *status = ecode;
6062 return false;
6063#undef FAIL
6064}
6065
6066bool ath9k_hw_phy_disable(struct ath_hal *ah)
6067{
6068 return ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM);
6069}
6070
6071bool ath9k_hw_disable(struct ath_hal *ah)
6072{
6073 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
6074 return false;
6075
6076 return ath9k_hw_set_reset_reg(ah, ATH9K_RESET_COLD);
6077}
6078
6079bool
6080ath9k_hw_calibrate(struct ath_hal *ah, struct ath9k_channel *chan,
6081 u8 rxchainmask, bool longcal,
6082 bool *isCalDone)
6083{
6084 struct ath_hal_5416 *ahp = AH5416(ah);
6085 struct hal_cal_list *currCal = ahp->ah_cal_list_curr;
6086 struct ath9k_channel *ichan =
6087 ath9k_regd_check_channel(ah, chan);
6088
6089 *isCalDone = true;
6090
6091 if (ichan == NULL) {
6092 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
6093 "%s: invalid channel %u/0x%x; no mapping\n",
6094 __func__, chan->channel, chan->channelFlags);
6095 return false;
6096 }
6097
6098 if (currCal &&
6099 (currCal->calState == CAL_RUNNING ||
6100 currCal->calState == CAL_WAITING)) {
6101 ath9k_hw_per_calibration(ah, ichan, rxchainmask, currCal,
6102 isCalDone);
6103 if (*isCalDone) {
6104 ahp->ah_cal_list_curr = currCal = currCal->calNext;
6105
6106 if (currCal->calState == CAL_WAITING) {
6107 *isCalDone = false;
6108 ath9k_hw_reset_calibration(ah, currCal);
6109 }
6110 }
6111 }
6112
6113 if (longcal) {
6114 ath9k_hw_getnf(ah, ichan);
6115 ath9k_hw_loadnf(ah, ah->ah_curchan);
6116 ath9k_hw_start_nfcal(ah);
6117
6118 if ((ichan->channelFlags & CHANNEL_CW_INT) != 0) {
6119
6120 chan->channelFlags |= CHANNEL_CW_INT;
6121 ichan->channelFlags &= ~CHANNEL_CW_INT;
6122 }
6123 }
6124
6125 return true;
6126}
6127
6128static void ath9k_hw_iqcal_collect(struct ath_hal *ah)
6129{
6130 struct ath_hal_5416 *ahp = AH5416(ah);
6131 int i;
6132
6133 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
6134 ahp->ah_totalPowerMeasI[i] +=
6135 REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
6136 ahp->ah_totalPowerMeasQ[i] +=
6137 REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
6138 ahp->ah_totalIqCorrMeas[i] +=
6139 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
6140 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6141 "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n",
6142 ahp->ah_CalSamples, i, ahp->ah_totalPowerMeasI[i],
6143 ahp->ah_totalPowerMeasQ[i],
6144 ahp->ah_totalIqCorrMeas[i]);
6145 }
6146}
6147
6148static void ath9k_hw_adc_gaincal_collect(struct ath_hal *ah)
6149{
6150 struct ath_hal_5416 *ahp = AH5416(ah);
6151 int i;
6152
6153 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
6154 ahp->ah_totalAdcIOddPhase[i] +=
6155 REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
6156 ahp->ah_totalAdcIEvenPhase[i] +=
6157 REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
6158 ahp->ah_totalAdcQOddPhase[i] +=
6159 REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
6160 ahp->ah_totalAdcQEvenPhase[i] +=
6161 REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
6162
6163 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6164 "%d: Chn %d oddi=0x%08x; eveni=0x%08x; "
6165 "oddq=0x%08x; evenq=0x%08x;\n",
6166 ahp->ah_CalSamples, i,
6167 ahp->ah_totalAdcIOddPhase[i],
6168 ahp->ah_totalAdcIEvenPhase[i],
6169 ahp->ah_totalAdcQOddPhase[i],
6170 ahp->ah_totalAdcQEvenPhase[i]);
6171 }
6172}
6173
6174static void ath9k_hw_adc_dccal_collect(struct ath_hal *ah)
6175{
6176 struct ath_hal_5416 *ahp = AH5416(ah);
6177 int i;
6178
6179 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
6180 ahp->ah_totalAdcDcOffsetIOddPhase[i] +=
6181 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
6182 ahp->ah_totalAdcDcOffsetIEvenPhase[i] +=
6183 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
6184 ahp->ah_totalAdcDcOffsetQOddPhase[i] +=
6185 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
6186 ahp->ah_totalAdcDcOffsetQEvenPhase[i] +=
6187 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
6188
6189 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6190 "%d: Chn %d oddi=0x%08x; eveni=0x%08x; "
6191 "oddq=0x%08x; evenq=0x%08x;\n",
6192 ahp->ah_CalSamples, i,
6193 ahp->ah_totalAdcDcOffsetIOddPhase[i],
6194 ahp->ah_totalAdcDcOffsetIEvenPhase[i],
6195 ahp->ah_totalAdcDcOffsetQOddPhase[i],
6196 ahp->ah_totalAdcDcOffsetQEvenPhase[i]);
6197 }
6198}
6199
6200static void ath9k_hw_iqcalibrate(struct ath_hal *ah, u8 numChains)
6201{
6202 struct ath_hal_5416 *ahp = AH5416(ah);
6203 u32 powerMeasQ, powerMeasI, iqCorrMeas;
6204 u32 qCoffDenom, iCoffDenom;
6205 int32_t qCoff, iCoff;
6206 int iqCorrNeg, i;
6207
6208 for (i = 0; i < numChains; i++) {
6209 powerMeasI = ahp->ah_totalPowerMeasI[i];
6210 powerMeasQ = ahp->ah_totalPowerMeasQ[i];
6211 iqCorrMeas = ahp->ah_totalIqCorrMeas[i];
6212
6213 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6214 "Starting IQ Cal and Correction for Chain %d\n",
6215 i);
6216
6217 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6218 "Orignal: Chn %diq_corr_meas = 0x%08x\n",
6219 i, ahp->ah_totalIqCorrMeas[i]);
6220
6221 iqCorrNeg = 0;
6222
6223
6224 if (iqCorrMeas > 0x80000000) {
6225 iqCorrMeas = (0xffffffff - iqCorrMeas) + 1;
6226 iqCorrNeg = 1;
6227 }
6228
6229 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6230 "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI);
6231 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6232 "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ);
6233 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n",
6234 iqCorrNeg);
6235
6236 iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 128;
6237 qCoffDenom = powerMeasQ / 64;
6238
6239 if (powerMeasQ != 0) {
6240
6241 iCoff = iqCorrMeas / iCoffDenom;
6242 qCoff = powerMeasI / qCoffDenom - 64;
6243 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6244 "Chn %d iCoff = 0x%08x\n", i, iCoff);
6245 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6246 "Chn %d qCoff = 0x%08x\n", i, qCoff);
6247
6248
6249 iCoff = iCoff & 0x3f;
6250 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6251 "New: Chn %d iCoff = 0x%08x\n", i, iCoff);
6252 if (iqCorrNeg == 0x0)
6253 iCoff = 0x40 - iCoff;
6254
6255 if (qCoff > 15)
6256 qCoff = 15;
6257 else if (qCoff <= -16)
6258 qCoff = 16;
6259
6260 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6261 "Chn %d : iCoff = 0x%x qCoff = 0x%x\n",
6262 i, iCoff, qCoff);
6263
6264 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
6265 AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF,
6266 iCoff);
6267 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
6268 AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF,
6269 qCoff);
6270 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6271 "IQ Cal and Correction done for Chain %d\n",
6272 i);
6273 }
6274 }
6275
6276 REG_SET_BIT(ah, AR_PHY_TIMING_CTRL4(0),
6277 AR_PHY_TIMING_CTRL4_IQCORR_ENABLE);
6278}
6279
6280static void
6281ath9k_hw_adc_gaincal_calibrate(struct ath_hal *ah, u8 numChains)
6282{
6283 struct ath_hal_5416 *ahp = AH5416(ah);
6284 u32 iOddMeasOffset, iEvenMeasOffset, qOddMeasOffset,
6285 qEvenMeasOffset;
6286 u32 qGainMismatch, iGainMismatch, val, i;
6287
6288 for (i = 0; i < numChains; i++) {
6289 iOddMeasOffset = ahp->ah_totalAdcIOddPhase[i];
6290 iEvenMeasOffset = ahp->ah_totalAdcIEvenPhase[i];
6291 qOddMeasOffset = ahp->ah_totalAdcQOddPhase[i];
6292 qEvenMeasOffset = ahp->ah_totalAdcQEvenPhase[i];
6293
6294 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6295 "Starting ADC Gain Cal for Chain %d\n", i);
6296
6297 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6298 "Chn %d pwr_meas_odd_i = 0x%08x\n", i,
6299 iOddMeasOffset);
6300 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6301 "Chn %d pwr_meas_even_i = 0x%08x\n", i,
6302 iEvenMeasOffset);
6303 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6304 "Chn %d pwr_meas_odd_q = 0x%08x\n", i,
6305 qOddMeasOffset);
6306 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6307 "Chn %d pwr_meas_even_q = 0x%08x\n", i,
6308 qEvenMeasOffset);
6309
6310 if (iOddMeasOffset != 0 && qEvenMeasOffset != 0) {
6311 iGainMismatch =
6312 ((iEvenMeasOffset * 32) /
6313 iOddMeasOffset) & 0x3f;
6314 qGainMismatch =
6315 ((qOddMeasOffset * 32) /
6316 qEvenMeasOffset) & 0x3f;
6317
6318 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6319 "Chn %d gain_mismatch_i = 0x%08x\n", i,
6320 iGainMismatch);
6321 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6322 "Chn %d gain_mismatch_q = 0x%08x\n", i,
6323 qGainMismatch);
6324
6325 val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
6326 val &= 0xfffff000;
6327 val |= (qGainMismatch) | (iGainMismatch << 6);
6328 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
6329
6330 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6331 "ADC Gain Cal done for Chain %d\n", i);
6332 }
6333 }
6334
6335 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0),
6336 REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0)) |
6337 AR_PHY_NEW_ADC_GAIN_CORR_ENABLE);
6338}
6339
6340static void
6341ath9k_hw_adc_dccal_calibrate(struct ath_hal *ah, u8 numChains)
6342{
6343 struct ath_hal_5416 *ahp = AH5416(ah);
6344 u32 iOddMeasOffset, iEvenMeasOffset, val, i;
6345 int32_t qOddMeasOffset, qEvenMeasOffset, qDcMismatch, iDcMismatch;
6346 const struct hal_percal_data *calData =
6347 ahp->ah_cal_list_curr->calData;
6348 u32 numSamples =
6349 (1 << (calData->calCountMax + 5)) * calData->calNumSamples;
6350
6351 for (i = 0; i < numChains; i++) {
6352 iOddMeasOffset = ahp->ah_totalAdcDcOffsetIOddPhase[i];
6353 iEvenMeasOffset = ahp->ah_totalAdcDcOffsetIEvenPhase[i];
6354 qOddMeasOffset = ahp->ah_totalAdcDcOffsetQOddPhase[i];
6355 qEvenMeasOffset = ahp->ah_totalAdcDcOffsetQEvenPhase[i];
6356
6357 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6358 "Starting ADC DC Offset Cal for Chain %d\n", i);
6359
6360 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6361 "Chn %d pwr_meas_odd_i = %d\n", i,
6362 iOddMeasOffset);
6363 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6364 "Chn %d pwr_meas_even_i = %d\n", i,
6365 iEvenMeasOffset);
6366 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6367 "Chn %d pwr_meas_odd_q = %d\n", i,
6368 qOddMeasOffset);
6369 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6370 "Chn %d pwr_meas_even_q = %d\n", i,
6371 qEvenMeasOffset);
6372
6373 iDcMismatch = (((iEvenMeasOffset - iOddMeasOffset) * 2) /
6374 numSamples) & 0x1ff;
6375 qDcMismatch = (((qOddMeasOffset - qEvenMeasOffset) * 2) /
6376 numSamples) & 0x1ff;
6377
6378 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6379 "Chn %d dc_offset_mismatch_i = 0x%08x\n", i,
6380 iDcMismatch);
6381 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6382 "Chn %d dc_offset_mismatch_q = 0x%08x\n", i,
6383 qDcMismatch);
6384
6385 val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
6386 val &= 0xc0000fff;
6387 val |= (qDcMismatch << 12) | (iDcMismatch << 21);
6388 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
6389
6390 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6391 "ADC DC Offset Cal done for Chain %d\n", i);
6392 }
6393
6394 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0),
6395 REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0)) |
6396 AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE);
6397}
6398
6399bool ath9k_hw_set_txpowerlimit(struct ath_hal *ah, u32 limit)
6400{
6401 struct ath_hal_5416 *ahp = AH5416(ah);
6402 struct ath9k_channel *chan = ah->ah_curchan;
6403
6404 ah->ah_powerLimit = min(limit, (u32) MAX_RATE_POWER);
6405
6406 if (ath9k_hw_set_txpower(ah, &ahp->ah_eeprom, chan,
6407 ath9k_regd_get_ctl(ah, chan),
6408 ath9k_regd_get_antenna_allowed(ah,
6409 chan),
6410 chan->maxRegTxPower * 2,
6411 min((u32) MAX_RATE_POWER,
6412 (u32) ah->ah_powerLimit)) != 0)
6413 return false;
6414
6415 return true;
6416}
6417
6418void
6419ath9k_hw_get_channel_centers(struct ath_hal *ah,
6420 struct ath9k_channel *chan,
6421 struct chan_centers *centers)
6422{
6423 int8_t extoff;
6424 struct ath_hal_5416 *ahp = AH5416(ah);
6425
6426 if (!IS_CHAN_HT40(chan)) {
6427 centers->ctl_center = centers->ext_center =
6428 centers->synth_center = chan->channel;
6429 return;
6430 }
6431
6432 if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
6433 (chan->chanmode == CHANNEL_G_HT40PLUS)) {
6434 centers->synth_center =
6435 chan->channel + HT40_CHANNEL_CENTER_SHIFT;
6436 extoff = 1;
6437 } else {
6438 centers->synth_center =
6439 chan->channel - HT40_CHANNEL_CENTER_SHIFT;
6440 extoff = -1;
6441 }
6442
6443 centers->ctl_center = centers->synth_center - (extoff *
6444 HT40_CHANNEL_CENTER_SHIFT);
6445 centers->ext_center = centers->synth_center + (extoff *
6446 ((ahp->
6447 ah_extprotspacing
6448 ==
6449 ATH9K_HT_EXTPROTSPACING_20)
6450 ?
6451 HT40_CHANNEL_CENTER_SHIFT
6452 : 15));
6453
6454}
6455
6456void
6457ath9k_hw_reset_calvalid(struct ath_hal *ah, struct ath9k_channel *chan,
6458 bool *isCalDone)
6459{
6460 struct ath_hal_5416 *ahp = AH5416(ah);
6461 struct ath9k_channel *ichan =
6462 ath9k_regd_check_channel(ah, chan);
6463 struct hal_cal_list *currCal = ahp->ah_cal_list_curr;
6464
6465 *isCalDone = true;
6466
6467 if (!AR_SREV_9100(ah) && !AR_SREV_9160_10_OR_LATER(ah))
6468 return;
6469
6470 if (currCal == NULL)
6471 return;
6472
6473 if (ichan == NULL) {
6474 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6475 "%s: invalid channel %u/0x%x; no mapping\n",
6476 __func__, chan->channel, chan->channelFlags);
6477 return;
6478 }
6479
6480
6481 if (currCal->calState != CAL_DONE) {
6482 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6483 "%s: Calibration state incorrect, %d\n",
6484 __func__, currCal->calState);
6485 return;
6486 }
6487
6488
6489 if (!ath9k_hw_iscal_supported(ah, chan, currCal->calData->calType))
6490 return;
6491
6492 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6493 "%s: Resetting Cal %d state for channel %u/0x%x\n",
6494 __func__, currCal->calData->calType, chan->channel,
6495 chan->channelFlags);
6496
6497 ichan->CalValid &= ~currCal->calData->calType;
6498 currCal->calState = CAL_WAITING;
6499
6500 *isCalDone = false;
6501}
6502
6503void ath9k_hw_getmac(struct ath_hal *ah, u8 *mac)
6504{
6505 struct ath_hal_5416 *ahp = AH5416(ah);
6506
6507 memcpy(mac, ahp->ah_macaddr, ETH_ALEN);
6508}
6509
6510bool ath9k_hw_setmac(struct ath_hal *ah, const u8 *mac)
6511{
6512 struct ath_hal_5416 *ahp = AH5416(ah);
6513
6514 memcpy(ahp->ah_macaddr, mac, ETH_ALEN);
6515 return true;
6516}
6517
6518void ath9k_hw_getbssidmask(struct ath_hal *ah, u8 *mask)
6519{
6520 struct ath_hal_5416 *ahp = AH5416(ah);
6521
6522 memcpy(mask, ahp->ah_bssidmask, ETH_ALEN);
6523}
6524
6525bool
6526ath9k_hw_setbssidmask(struct ath_hal *ah, const u8 *mask)
6527{
6528 struct ath_hal_5416 *ahp = AH5416(ah);
6529
6530 memcpy(ahp->ah_bssidmask, mask, ETH_ALEN);
6531
6532 REG_WRITE(ah, AR_BSSMSKL, get_unaligned_le32(ahp->ah_bssidmask));
6533 REG_WRITE(ah, AR_BSSMSKU, get_unaligned_le16(ahp->ah_bssidmask + 4));
6534
6535 return true;
6536}
6537
6538#ifdef CONFIG_ATH9K_RFKILL
6539static void ath9k_enable_rfkill(struct ath_hal *ah)
6540{
6541 struct ath_hal_5416 *ahp = AH5416(ah);
6542
6543 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
6544 AR_GPIO_INPUT_EN_VAL_RFSILENT_BB);
6545
6546 REG_CLR_BIT(ah, AR_GPIO_INPUT_MUX2,
6547 AR_GPIO_INPUT_MUX2_RFSILENT);
6548
6549 ath9k_hw_cfg_gpio_input(ah, ahp->ah_gpioSelect);
6550 REG_SET_BIT(ah, AR_PHY_TEST, RFSILENT_BB);
6551
6552 if (ahp->ah_gpioBit == ath9k_hw_gpio_get(ah, ahp->ah_gpioSelect)) {
6553
6554 ath9k_hw_set_gpio_intr(ah, ahp->ah_gpioSelect,
6555 !ahp->ah_gpioBit);
6556 } else {
6557 ath9k_hw_set_gpio_intr(ah, ahp->ah_gpioSelect,
6558 ahp->ah_gpioBit);
6559 }
6560}
6561#endif
6562
6563void
6564ath9k_hw_write_associd(struct ath_hal *ah, const u8 *bssid,
6565 u16 assocId)
6566{
6567 struct ath_hal_5416 *ahp = AH5416(ah);
6568
6569 memcpy(ahp->ah_bssid, bssid, ETH_ALEN);
6570 ahp->ah_assocId = assocId;
6571
6572 REG_WRITE(ah, AR_BSS_ID0, get_unaligned_le32(ahp->ah_bssid));
6573 REG_WRITE(ah, AR_BSS_ID1, get_unaligned_le16(ahp->ah_bssid + 4) |
6574 ((assocId & 0x3fff) << AR_BSS_ID1_AID_S));
6575}
6576
6577u64 ath9k_hw_gettsf64(struct ath_hal *ah)
6578{
6579 u64 tsf;
6580
6581 tsf = REG_READ(ah, AR_TSF_U32);
6582 tsf = (tsf << 32) | REG_READ(ah, AR_TSF_L32);
6583 return tsf;
6584}
6585
6586void ath9k_hw_reset_tsf(struct ath_hal *ah)
6587{
6588 int count;
6589
6590 count = 0;
6591 while (REG_READ(ah, AR_SLP32_MODE) & AR_SLP32_TSF_WRITE_STATUS) {
6592 count++;
6593 if (count > 10) {
6594 DPRINTF(ah->ah_sc, ATH_DBG_RESET,
6595 "%s: AR_SLP32_TSF_WRITE_STATUS limit exceeded\n",
6596 __func__);
6597 break;
6598 }
6599 udelay(10);
6600 }
6601 REG_WRITE(ah, AR_RESET_TSF, AR_RESET_TSF_ONCE);
6602}
6603
6604u32 ath9k_hw_getdefantenna(struct ath_hal *ah)
6605{
6606 return REG_READ(ah, AR_DEF_ANTENNA) & 0x7;
6607}
6608
6609void ath9k_hw_setantenna(struct ath_hal *ah, u32 antenna)
6610{
6611 REG_WRITE(ah, AR_DEF_ANTENNA, (antenna & 0x7));
6612}
6613
6614bool
6615ath9k_hw_setantennaswitch(struct ath_hal *ah,
6616 enum ath9k_ant_setting settings,
6617 struct ath9k_channel *chan,
6618 u8 *tx_chainmask,
6619 u8 *rx_chainmask,
6620 u8 *antenna_cfgd)
6621{
6622 struct ath_hal_5416 *ahp = AH5416(ah);
6623 static u8 tx_chainmask_cfg, rx_chainmask_cfg;
6624
6625 if (AR_SREV_9280(ah)) {
6626 if (!tx_chainmask_cfg) {
6627
6628 tx_chainmask_cfg = *tx_chainmask;
6629 rx_chainmask_cfg = *rx_chainmask;
6630 }
6631
6632 switch (settings) {
6633 case ATH9K_ANT_FIXED_A:
6634 *tx_chainmask = ATH9K_ANTENNA0_CHAINMASK;
6635 *rx_chainmask = ATH9K_ANTENNA0_CHAINMASK;
6636 *antenna_cfgd = true;
6637 break;
6638 case ATH9K_ANT_FIXED_B:
6639 if (ah->ah_caps.tx_chainmask >
6640 ATH9K_ANTENNA1_CHAINMASK) {
6641 *tx_chainmask = ATH9K_ANTENNA1_CHAINMASK;
6642 }
6643 *rx_chainmask = ATH9K_ANTENNA1_CHAINMASK;
6644 *antenna_cfgd = true;
6645 break;
6646 case ATH9K_ANT_VARIABLE:
6647 *tx_chainmask = tx_chainmask_cfg;
6648 *rx_chainmask = rx_chainmask_cfg;
6649 *antenna_cfgd = true;
6650 break;
6651 default:
6652 break;
6653 }
6654 } else {
6655 ahp->ah_diversityControl = settings;
6656 }
6657
6658 return true;
6659}
6660
6661void ath9k_hw_setopmode(struct ath_hal *ah)
6662{
6663 ath9k_hw_set_operating_mode(ah, ah->ah_opmode);
6664}
6665
6666bool
6667ath9k_hw_getcapability(struct ath_hal *ah, enum ath9k_capability_type type,
6668 u32 capability, u32 *result)
6669{
6670 struct ath_hal_5416 *ahp = AH5416(ah);
6671 const struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
6672
6673 switch (type) {
6674 case ATH9K_CAP_CIPHER:
6675 switch (capability) {
6676 case ATH9K_CIPHER_AES_CCM:
6677 case ATH9K_CIPHER_AES_OCB:
6678 case ATH9K_CIPHER_TKIP:
6679 case ATH9K_CIPHER_WEP:
6680 case ATH9K_CIPHER_MIC:
6681 case ATH9K_CIPHER_CLR:
6682 return true;
6683 default:
6684 return false;
6685 }
6686 case ATH9K_CAP_TKIP_MIC:
6687 switch (capability) {
6688 case 0:
6689 return true;
6690 case 1:
6691 return (ahp->ah_staId1Defaults &
6692 AR_STA_ID1_CRPT_MIC_ENABLE) ? true :
6693 false;
6694 }
6695 case ATH9K_CAP_TKIP_SPLIT:
6696 return (ahp->ah_miscMode & AR_PCU_MIC_NEW_LOC_ENA) ?
6697 false : true;
6698 case ATH9K_CAP_WME_TKIPMIC:
6699 return 0;
6700 case ATH9K_CAP_PHYCOUNTERS:
6701 return ahp->ah_hasHwPhyCounters ? 0 : -ENXIO;
6702 case ATH9K_CAP_DIVERSITY:
6703 return (REG_READ(ah, AR_PHY_CCK_DETECT) &
6704 AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV) ?
6705 true : false;
6706 case ATH9K_CAP_PHYDIAG:
6707 return true;
6708 case ATH9K_CAP_MCAST_KEYSRCH:
6709 switch (capability) {
6710 case 0:
6711 return true;
6712 case 1:
6713 if (REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_ADHOC) {
6714 return false;
6715 } else {
6716 return (ahp->ah_staId1Defaults &
6717 AR_STA_ID1_MCAST_KSRCH) ? true :
6718 false;
6719 }
6720 }
6721 return false;
6722 case ATH9K_CAP_TSF_ADJUST:
6723 return (ahp->ah_miscMode & AR_PCU_TX_ADD_TSF) ?
6724 true : false;
6725 case ATH9K_CAP_RFSILENT:
6726 if (capability == 3)
6727 return false;
6728 case ATH9K_CAP_ANT_CFG_2GHZ:
6729 *result = pCap->num_antcfg_2ghz;
6730 return true;
6731 case ATH9K_CAP_ANT_CFG_5GHZ:
6732 *result = pCap->num_antcfg_5ghz;
6733 return true;
6734 case ATH9K_CAP_TXPOW:
6735 switch (capability) {
6736 case 0:
6737 return 0;
6738 case 1:
6739 *result = ah->ah_powerLimit;
6740 return 0;
6741 case 2:
6742 *result = ah->ah_maxPowerLevel;
6743 return 0;
6744 case 3:
6745 *result = ah->ah_tpScale;
6746 return 0;
6747 }
6748 return false;
6749 default:
6750 return false;
6751 }
6752}
6753
6754int
6755ath9k_hw_select_antconfig(struct ath_hal *ah, u32 cfg)
6756{
6757 struct ath_hal_5416 *ahp = AH5416(ah);
6758 struct ath9k_channel *chan = ah->ah_curchan;
6759 const struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
6760 u16 ant_config;
6761 u32 halNumAntConfig;
6762
6763 halNumAntConfig =
6764 IS_CHAN_2GHZ(chan) ? pCap->num_antcfg_2ghz : pCap->
6765 num_antcfg_5ghz;
6766
6767 if (cfg < halNumAntConfig) {
6768 if (!ath9k_hw_get_eeprom_antenna_cfg(ahp, chan,
6769 cfg, &ant_config)) {
6770 REG_WRITE(ah, AR_PHY_SWITCH_COM, ant_config);
6771 return 0;
6772 }
6773 }
6774
6775 return -EINVAL;
6776}
6777
6778bool ath9k_hw_intrpend(struct ath_hal *ah)
6779{
6780 u32 host_isr;
6781
6782 if (AR_SREV_9100(ah))
6783 return true;
6784
6785 host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
6786 if ((host_isr & AR_INTR_MAC_IRQ) && (host_isr != AR_INTR_SPURIOUS))
6787 return true;
6788
6789 host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE);
6790 if ((host_isr & AR_INTR_SYNC_DEFAULT)
6791 && (host_isr != AR_INTR_SPURIOUS))
6792 return true;
6793
6794 return false;
6795}
6796
6797bool ath9k_hw_getisr(struct ath_hal *ah, enum ath9k_int *masked)
6798{
6799 u32 isr = 0;
6800 u32 mask2 = 0;
6801 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
6802 u32 sync_cause = 0;
6803 bool fatal_int = false;
6804
6805 if (!AR_SREV_9100(ah)) {
6806 if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) {
6807 if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M)
6808 == AR_RTC_STATUS_ON) {
6809 isr = REG_READ(ah, AR_ISR);
6810 }
6811 }
6812
6813 sync_cause =
6814 REG_READ(ah,
6815 AR_INTR_SYNC_CAUSE) & AR_INTR_SYNC_DEFAULT;
6816
6817 *masked = 0;
6818
6819 if (!isr && !sync_cause)
6820 return false;
6821 } else {
6822 *masked = 0;
6823 isr = REG_READ(ah, AR_ISR);
6824 }
6825
6826 if (isr) {
6827 struct ath_hal_5416 *ahp = AH5416(ah);
6828
6829 if (isr & AR_ISR_BCNMISC) {
6830 u32 isr2;
6831 isr2 = REG_READ(ah, AR_ISR_S2);
6832 if (isr2 & AR_ISR_S2_TIM)
6833 mask2 |= ATH9K_INT_TIM;
6834 if (isr2 & AR_ISR_S2_DTIM)
6835 mask2 |= ATH9K_INT_DTIM;
6836 if (isr2 & AR_ISR_S2_DTIMSYNC)
6837 mask2 |= ATH9K_INT_DTIMSYNC;
6838 if (isr2 & (AR_ISR_S2_CABEND))
6839 mask2 |= ATH9K_INT_CABEND;
6840 if (isr2 & AR_ISR_S2_GTT)
6841 mask2 |= ATH9K_INT_GTT;
6842 if (isr2 & AR_ISR_S2_CST)
6843 mask2 |= ATH9K_INT_CST;
6844 }
6845
6846 isr = REG_READ(ah, AR_ISR_RAC);
6847 if (isr == 0xffffffff) {
6848 *masked = 0;
6849 return false;
6850 }
6851
6852 *masked = isr & ATH9K_INT_COMMON;
6853
6854 if (ahp->ah_intrMitigation) {
6855
6856 if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM))
6857 *masked |= ATH9K_INT_RX;
6858 }
6859
6860 if (isr & (AR_ISR_RXOK | AR_ISR_RXERR))
6861 *masked |= ATH9K_INT_RX;
6862 if (isr &
6863 (AR_ISR_TXOK | AR_ISR_TXDESC | AR_ISR_TXERR |
6864 AR_ISR_TXEOL)) {
6865 u32 s0_s, s1_s;
6866
6867 *masked |= ATH9K_INT_TX;
6868
6869 s0_s = REG_READ(ah, AR_ISR_S0_S);
6870 ahp->ah_intrTxqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK);
6871 ahp->ah_intrTxqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC);
6872
6873 s1_s = REG_READ(ah, AR_ISR_S1_S);
6874 ahp->ah_intrTxqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR);
6875 ahp->ah_intrTxqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL);
6876 }
6877
6878 if (isr & AR_ISR_RXORN) {
6879 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT,
6880 "%s: receive FIFO overrun interrupt\n",
6881 __func__);
6882 }
6883
6884 if (!AR_SREV_9100(ah)) {
6885 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
6886 u32 isr5 = REG_READ(ah, AR_ISR_S5_S);
6887 if (isr5 & AR_ISR_S5_TIM_TIMER)
6888 *masked |= ATH9K_INT_TIM_TIMER;
6889 }
6890 }
6891
6892 *masked |= mask2;
6893 }
6894 if (AR_SREV_9100(ah))
6895 return true;
6896 if (sync_cause) {
6897 fatal_int =
6898 (sync_cause &
6899 (AR_INTR_SYNC_HOST1_FATAL | AR_INTR_SYNC_HOST1_PERR))
6900 ? true : false;
6901
6902 if (fatal_int) {
6903 if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) {
6904 DPRINTF(ah->ah_sc, ATH_DBG_ANY,
6905 "%s: received PCI FATAL interrupt\n",
6906 __func__);
6907 }
6908 if (sync_cause & AR_INTR_SYNC_HOST1_PERR) {
6909 DPRINTF(ah->ah_sc, ATH_DBG_ANY,
6910 "%s: received PCI PERR interrupt\n",
6911 __func__);
6912 }
6913 }
6914 if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
6915 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT,
6916 "%s: AR_INTR_SYNC_RADM_CPL_TIMEOUT\n",
6917 __func__);
6918 REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
6919 REG_WRITE(ah, AR_RC, 0);
6920 *masked |= ATH9K_INT_FATAL;
6921 }
6922 if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT) {
6923 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT,
6924 "%s: AR_INTR_SYNC_LOCAL_TIMEOUT\n",
6925 __func__);
6926 }
6927
6928 REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause);
6929 (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR);
6930 }
6931 return true;
6932}
6933
6934enum ath9k_int ath9k_hw_intrget(struct ath_hal *ah)
6935{
6936 return AH5416(ah)->ah_maskReg;
6937}
6938
6939enum ath9k_int ath9k_hw_set_interrupts(struct ath_hal *ah, enum ath9k_int ints)
6940{
6941 struct ath_hal_5416 *ahp = AH5416(ah);
6942 u32 omask = ahp->ah_maskReg;
6943 u32 mask, mask2;
6944 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
6945
6946 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "%s: 0x%x => 0x%x\n", __func__,
6947 omask, ints);
6948
6949 if (omask & ATH9K_INT_GLOBAL) {
6950 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "%s: disable IER\n",
6951 __func__);
6952 REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
6953 (void) REG_READ(ah, AR_IER);
6954 if (!AR_SREV_9100(ah)) {
6955 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0);
6956 (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE);
6957
6958 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
6959 (void) REG_READ(ah, AR_INTR_SYNC_ENABLE);
6960 }
6961 }
6962
6963 mask = ints & ATH9K_INT_COMMON;
6964 mask2 = 0;
6965
6966 if (ints & ATH9K_INT_TX) {
6967 if (ahp->ah_txOkInterruptMask)
6968 mask |= AR_IMR_TXOK;
6969 if (ahp->ah_txDescInterruptMask)
6970 mask |= AR_IMR_TXDESC;
6971 if (ahp->ah_txErrInterruptMask)
6972 mask |= AR_IMR_TXERR;
6973 if (ahp->ah_txEolInterruptMask)
6974 mask |= AR_IMR_TXEOL;
6975 }
6976 if (ints & ATH9K_INT_RX) {
6977 mask |= AR_IMR_RXERR;
6978 if (ahp->ah_intrMitigation)
6979 mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
6980 else
6981 mask |= AR_IMR_RXOK | AR_IMR_RXDESC;
6982 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
6983 mask |= AR_IMR_GENTMR;
6984 }
6985
6986 if (ints & (ATH9K_INT_BMISC)) {
6987 mask |= AR_IMR_BCNMISC;
6988 if (ints & ATH9K_INT_TIM)
6989 mask2 |= AR_IMR_S2_TIM;
6990 if (ints & ATH9K_INT_DTIM)
6991 mask2 |= AR_IMR_S2_DTIM;
6992 if (ints & ATH9K_INT_DTIMSYNC)
6993 mask2 |= AR_IMR_S2_DTIMSYNC;
6994 if (ints & ATH9K_INT_CABEND)
6995 mask2 |= (AR_IMR_S2_CABEND);
6996 }
6997
6998 if (ints & (ATH9K_INT_GTT | ATH9K_INT_CST)) {
6999 mask |= AR_IMR_BCNMISC;
7000 if (ints & ATH9K_INT_GTT)
7001 mask2 |= AR_IMR_S2_GTT;
7002 if (ints & ATH9K_INT_CST)
7003 mask2 |= AR_IMR_S2_CST;
7004 }
7005
7006 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "%s: new IMR 0x%x\n", __func__,
7007 mask);
7008 REG_WRITE(ah, AR_IMR, mask);
7009 mask = REG_READ(ah, AR_IMR_S2) & ~(AR_IMR_S2_TIM |
7010 AR_IMR_S2_DTIM |
7011 AR_IMR_S2_DTIMSYNC |
7012 AR_IMR_S2_CABEND |
7013 AR_IMR_S2_CABTO |
7014 AR_IMR_S2_TSFOOR |
7015 AR_IMR_S2_GTT | AR_IMR_S2_CST);
7016 REG_WRITE(ah, AR_IMR_S2, mask | mask2);
7017 ahp->ah_maskReg = ints;
7018
7019 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
7020 if (ints & ATH9K_INT_TIM_TIMER)
7021 REG_SET_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
7022 else
7023 REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
7024 }
7025
7026 if (ints & ATH9K_INT_GLOBAL) {
7027 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "%s: enable IER\n",
7028 __func__);
7029 REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
7030 if (!AR_SREV_9100(ah)) {
7031 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE,
7032 AR_INTR_MAC_IRQ);
7033 REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ);
7034
7035
7036 REG_WRITE(ah, AR_INTR_SYNC_ENABLE,
7037 AR_INTR_SYNC_DEFAULT);
7038 REG_WRITE(ah, AR_INTR_SYNC_MASK,
7039 AR_INTR_SYNC_DEFAULT);
7040 }
7041 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
7042 REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
7043 }
7044
7045 return omask;
7046}
7047
7048void
7049ath9k_hw_beaconinit(struct ath_hal *ah,
7050 u32 next_beacon, u32 beacon_period)
7051{
7052 struct ath_hal_5416 *ahp = AH5416(ah);
7053 int flags = 0;
7054
7055 ahp->ah_beaconInterval = beacon_period;
7056
7057 switch (ah->ah_opmode) {
7058 case ATH9K_M_STA:
7059 case ATH9K_M_MONITOR:
7060 REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(next_beacon));
7061 REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, 0xffff);
7062 REG_WRITE(ah, AR_NEXT_SWBA, 0x7ffff);
7063 flags |= AR_TBTT_TIMER_EN;
7064 break;
7065 case ATH9K_M_IBSS:
7066 REG_SET_BIT(ah, AR_TXCFG,
7067 AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY);
7068 REG_WRITE(ah, AR_NEXT_NDP_TIMER,
7069 TU_TO_USEC(next_beacon +
7070 (ahp->ah_atimWindow ? ahp->
7071 ah_atimWindow : 1)));
7072 flags |= AR_NDP_TIMER_EN;
7073 case ATH9K_M_HOSTAP:
7074 REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(next_beacon));
7075 REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT,
7076 TU_TO_USEC(next_beacon -
7077 ah->ah_config.
7078 dma_beacon_response_time));
7079 REG_WRITE(ah, AR_NEXT_SWBA,
7080 TU_TO_USEC(next_beacon -
7081 ah->ah_config.
7082 sw_beacon_response_time));
7083 flags |=
7084 AR_TBTT_TIMER_EN | AR_DBA_TIMER_EN | AR_SWBA_TIMER_EN;
7085 break;
7086 }
7087
7088 REG_WRITE(ah, AR_BEACON_PERIOD, TU_TO_USEC(beacon_period));
7089 REG_WRITE(ah, AR_DMA_BEACON_PERIOD, TU_TO_USEC(beacon_period));
7090 REG_WRITE(ah, AR_SWBA_PERIOD, TU_TO_USEC(beacon_period));
7091 REG_WRITE(ah, AR_NDP_PERIOD, TU_TO_USEC(beacon_period));
7092
7093 beacon_period &= ~ATH9K_BEACON_ENA;
7094 if (beacon_period & ATH9K_BEACON_RESET_TSF) {
7095 beacon_period &= ~ATH9K_BEACON_RESET_TSF;
7096 ath9k_hw_reset_tsf(ah);
7097 }
7098
7099 REG_SET_BIT(ah, AR_TIMER_MODE, flags);
7100}
7101
7102void
7103ath9k_hw_set_sta_beacon_timers(struct ath_hal *ah,
7104 const struct ath9k_beacon_state *bs)
7105{
7106 u32 nextTbtt, beaconintval, dtimperiod, beacontimeout;
7107 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
7108
7109 REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(bs->bs_nexttbtt));
7110
7111 REG_WRITE(ah, AR_BEACON_PERIOD,
7112 TU_TO_USEC(bs->bs_intval & ATH9K_BEACON_PERIOD));
7113 REG_WRITE(ah, AR_DMA_BEACON_PERIOD,
7114 TU_TO_USEC(bs->bs_intval & ATH9K_BEACON_PERIOD));
7115
7116 REG_RMW_FIELD(ah, AR_RSSI_THR,
7117 AR_RSSI_THR_BM_THR, bs->bs_bmissthreshold);
7118
7119 beaconintval = bs->bs_intval & ATH9K_BEACON_PERIOD;
7120
7121 if (bs->bs_sleepduration > beaconintval)
7122 beaconintval = bs->bs_sleepduration;
7123
7124 dtimperiod = bs->bs_dtimperiod;
7125 if (bs->bs_sleepduration > dtimperiod)
7126 dtimperiod = bs->bs_sleepduration;
7127
7128 if (beaconintval == dtimperiod)
7129 nextTbtt = bs->bs_nextdtim;
7130 else
7131 nextTbtt = bs->bs_nexttbtt;
7132
7133 DPRINTF(ah->ah_sc, ATH_DBG_BEACON, "%s: next DTIM %d\n", __func__,
7134 bs->bs_nextdtim);
7135 DPRINTF(ah->ah_sc, ATH_DBG_BEACON, "%s: next beacon %d\n", __func__,
7136 nextTbtt);
7137 DPRINTF(ah->ah_sc, ATH_DBG_BEACON, "%s: beacon period %d\n", __func__,
7138 beaconintval);
7139 DPRINTF(ah->ah_sc, ATH_DBG_BEACON, "%s: DTIM period %d\n", __func__,
7140 dtimperiod);
7141
7142 REG_WRITE(ah, AR_NEXT_DTIM,
7143 TU_TO_USEC(bs->bs_nextdtim - SLEEP_SLOP));
7144 REG_WRITE(ah, AR_NEXT_TIM, TU_TO_USEC(nextTbtt - SLEEP_SLOP));
7145
7146 REG_WRITE(ah, AR_SLEEP1,
7147 SM((CAB_TIMEOUT_VAL << 3), AR_SLEEP1_CAB_TIMEOUT)
7148 | AR_SLEEP1_ASSUME_DTIM);
7149
7150 if (pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)
7151 beacontimeout = (BEACON_TIMEOUT_VAL << 3);
7152 else
7153 beacontimeout = MIN_BEACON_TIMEOUT_VAL;
7154
7155 REG_WRITE(ah, AR_SLEEP2,
7156 SM(beacontimeout, AR_SLEEP2_BEACON_TIMEOUT));
7157
7158 REG_WRITE(ah, AR_TIM_PERIOD, TU_TO_USEC(beaconintval));
7159 REG_WRITE(ah, AR_DTIM_PERIOD, TU_TO_USEC(dtimperiod));
7160
7161 REG_SET_BIT(ah, AR_TIMER_MODE,
7162 AR_TBTT_TIMER_EN | AR_TIM_TIMER_EN |
7163 AR_DTIM_TIMER_EN);
7164
7165}
7166
7167bool ath9k_hw_keyisvalid(struct ath_hal *ah, u16 entry)
7168{
7169 if (entry < ah->ah_caps.keycache_size) {
7170 u32 val = REG_READ(ah, AR_KEYTABLE_MAC1(entry));
7171 if (val & AR_KEYTABLE_VALID)
7172 return true;
7173 }
7174 return false;
7175}
7176
7177bool ath9k_hw_keyreset(struct ath_hal *ah, u16 entry)
7178{
7179 u32 keyType;
7180
7181 if (entry >= ah->ah_caps.keycache_size) {
7182 DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
7183 "%s: entry %u out of range\n", __func__, entry);
7184 return false;
7185 }
7186 keyType = REG_READ(ah, AR_KEYTABLE_TYPE(entry));
7187
7188 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), 0);
7189 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), 0);
7190 REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), 0);
7191 REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), 0);
7192 REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), 0);
7193 REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), AR_KEYTABLE_TYPE_CLR);
7194 REG_WRITE(ah, AR_KEYTABLE_MAC0(entry), 0);
7195 REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), 0);
7196
7197 if (keyType == AR_KEYTABLE_TYPE_TKIP && ATH9K_IS_MIC_ENABLED(ah)) {
7198 u16 micentry = entry + 64;
7199
7200 REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), 0);
7201 REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0);
7202 REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), 0);
7203 REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), 0);
7204
7205 }
7206
7207 if (ah->ah_curchan == NULL)
7208 return true;
7209
7210 return true;
7211}
7212
7213bool
7214ath9k_hw_keysetmac(struct ath_hal *ah, u16 entry,
7215 const u8 *mac)
7216{
7217 u32 macHi, macLo;
7218
7219 if (entry >= ah->ah_caps.keycache_size) {
7220 DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
7221 "%s: entry %u out of range\n", __func__, entry);
7222 return false;
7223 }
7224
7225 if (mac != NULL) {
7226 macHi = (mac[5] << 8) | mac[4];
7227 macLo = (mac[3] << 24) | (mac[2] << 16)
7228 | (mac[1] << 8) | mac[0];
7229 macLo >>= 1;
7230 macLo |= (macHi & 1) << 31;
7231 macHi >>= 1;
7232 } else {
7233 macLo = macHi = 0;
7234 }
7235 REG_WRITE(ah, AR_KEYTABLE_MAC0(entry), macLo);
7236 REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), macHi | AR_KEYTABLE_VALID);
7237
7238 return true;
7239}
7240
7241bool
7242ath9k_hw_set_keycache_entry(struct ath_hal *ah, u16 entry,
7243 const struct ath9k_keyval *k,
7244 const u8 *mac, int xorKey)
7245{
7246 const struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
7247 u32 key0, key1, key2, key3, key4;
7248 u32 keyType;
7249 u32 xorMask = xorKey ?
7250 (ATH9K_KEY_XOR << 24 | ATH9K_KEY_XOR << 16 | ATH9K_KEY_XOR << 8
7251 | ATH9K_KEY_XOR) : 0;
7252 struct ath_hal_5416 *ahp = AH5416(ah);
7253
7254 if (entry >= pCap->keycache_size) {
7255 DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
7256 "%s: entry %u out of range\n", __func__, entry);
7257 return false;
7258 }
7259 switch (k->kv_type) {
7260 case ATH9K_CIPHER_AES_OCB:
7261 keyType = AR_KEYTABLE_TYPE_AES;
7262 break;
7263 case ATH9K_CIPHER_AES_CCM:
7264 if (!(pCap->hw_caps & ATH9K_HW_CAP_CIPHER_AESCCM)) {
7265 DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
7266 "%s: AES-CCM not supported by "
7267 "mac rev 0x%x\n", __func__,
7268 ah->ah_macRev);
7269 return false;
7270 }
7271 keyType = AR_KEYTABLE_TYPE_CCM;
7272 break;
7273 case ATH9K_CIPHER_TKIP:
7274 keyType = AR_KEYTABLE_TYPE_TKIP;
7275 if (ATH9K_IS_MIC_ENABLED(ah)
7276 && entry + 64 >= pCap->keycache_size) {
7277 DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
7278 "%s: entry %u inappropriate for TKIP\n",
7279 __func__, entry);
7280 return false;
7281 }
7282 break;
7283 case ATH9K_CIPHER_WEP:
7284 if (k->kv_len < 40 / NBBY) {
7285 DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
7286 "%s: WEP key length %u too small\n",
7287 __func__, k->kv_len);
7288 return false;
7289 }
7290 if (k->kv_len <= 40 / NBBY)
7291 keyType = AR_KEYTABLE_TYPE_40;
7292 else if (k->kv_len <= 104 / NBBY)
7293 keyType = AR_KEYTABLE_TYPE_104;
7294 else
7295 keyType = AR_KEYTABLE_TYPE_128;
7296 break;
7297 case ATH9K_CIPHER_CLR:
7298 keyType = AR_KEYTABLE_TYPE_CLR;
7299 break;
7300 default:
7301 DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
7302 "%s: cipher %u not supported\n", __func__,
7303 k->kv_type);
7304 return false;
7305 }
7306
7307 key0 = get_unaligned_le32(k->kv_val + 0) ^ xorMask;
7308 key1 = (get_unaligned_le16(k->kv_val + 4) ^ xorMask) & 0xffff;
7309 key2 = get_unaligned_le32(k->kv_val + 6) ^ xorMask;
7310 key3 = (get_unaligned_le16(k->kv_val + 10) ^ xorMask) & 0xffff;
7311 key4 = get_unaligned_le32(k->kv_val + 12) ^ xorMask;
7312 if (k->kv_len <= 104 / NBBY)
7313 key4 &= 0xff;
7314
7315 if (keyType == AR_KEYTABLE_TYPE_TKIP && ATH9K_IS_MIC_ENABLED(ah)) {
7316 u16 micentry = entry + 64;
7317
7318 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), ~key0);
7319 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), ~key1);
7320 REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), key2);
7321 REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), key3);
7322 REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), key4);
7323 REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), keyType);
7324 (void) ath9k_hw_keysetmac(ah, entry, mac);
7325
7326 if (ahp->ah_miscMode & AR_PCU_MIC_NEW_LOC_ENA) {
7327 u32 mic0, mic1, mic2, mic3, mic4;
7328
7329 mic0 = get_unaligned_le32(k->kv_mic + 0);
7330 mic2 = get_unaligned_le32(k->kv_mic + 4);
7331 mic1 = get_unaligned_le16(k->kv_txmic + 2) & 0xffff;
7332 mic3 = get_unaligned_le16(k->kv_txmic + 0) & 0xffff;
7333 mic4 = get_unaligned_le32(k->kv_txmic + 4);
7334 REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), mic0);
7335 REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), mic1);
7336 REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), mic2);
7337 REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), mic3);
7338 REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), mic4);
7339 REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry),
7340 AR_KEYTABLE_TYPE_CLR);
7341
7342 } else {
7343 u32 mic0, mic2;
7344
7345 mic0 = get_unaligned_le32(k->kv_mic + 0);
7346 mic2 = get_unaligned_le32(k->kv_mic + 4);
7347 REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), mic0);
7348 REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0);
7349 REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), mic2);
7350 REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), 0);
7351 REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), 0);
7352 REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry),
7353 AR_KEYTABLE_TYPE_CLR);
7354 }
7355 REG_WRITE(ah, AR_KEYTABLE_MAC0(micentry), 0);
7356 REG_WRITE(ah, AR_KEYTABLE_MAC1(micentry), 0);
7357 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), key0);
7358 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), key1);
7359 } else {
7360 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), key0);
7361 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), key1);
7362 REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), key2);
7363 REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), key3);
7364 REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), key4);
7365 REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), keyType);
7366
7367 (void) ath9k_hw_keysetmac(ah, entry, mac);
7368 }
7369
7370 if (ah->ah_curchan == NULL)
7371 return true;
7372
7373 return true;
7374}
7375
7376bool
7377ath9k_hw_updatetxtriglevel(struct ath_hal *ah, bool bIncTrigLevel)
7378{
7379 struct ath_hal_5416 *ahp = AH5416(ah);
7380 u32 txcfg, curLevel, newLevel;
7381 enum ath9k_int omask;
7382
7383 if (ah->ah_txTrigLevel >= MAX_TX_FIFO_THRESHOLD)
7384 return false;
7385
7386 omask = ath9k_hw_set_interrupts(ah,
7387 ahp->ah_maskReg & ~ATH9K_INT_GLOBAL);
7388
7389 txcfg = REG_READ(ah, AR_TXCFG);
7390 curLevel = MS(txcfg, AR_FTRIG);
7391 newLevel = curLevel;
7392 if (bIncTrigLevel) {
7393 if (curLevel < MAX_TX_FIFO_THRESHOLD)
7394 newLevel++;
7395 } else if (curLevel > MIN_TX_FIFO_THRESHOLD)
7396 newLevel--;
7397 if (newLevel != curLevel)
7398 REG_WRITE(ah, AR_TXCFG,
7399 (txcfg & ~AR_FTRIG) | SM(newLevel, AR_FTRIG));
7400
7401 ath9k_hw_set_interrupts(ah, omask);
7402
7403 ah->ah_txTrigLevel = newLevel;
7404
7405 return newLevel != curLevel;
7406}
7407
7408bool ath9k_hw_set_txq_props(struct ath_hal *ah, int q,
7409 const struct ath9k_tx_queue_info *qinfo)
7410{
7411 u32 cw;
7412 struct ath_hal_5416 *ahp = AH5416(ah);
7413 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
7414 struct ath9k_tx_queue_info *qi;
7415
7416 if (q >= pCap->total_queues) {
7417 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: invalid queue num %u\n",
7418 __func__, q);
7419 return false;
7420 }
7421
7422 qi = &ahp->ah_txq[q];
7423 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
7424 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: inactive queue\n",
7425 __func__);
7426 return false;
7427 }
7428
7429 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: queue %p\n", __func__, qi);
7430
7431 qi->tqi_ver = qinfo->tqi_ver;
7432 qi->tqi_subtype = qinfo->tqi_subtype;
7433 qi->tqi_qflags = qinfo->tqi_qflags;
7434 qi->tqi_priority = qinfo->tqi_priority;
7435 if (qinfo->tqi_aifs != ATH9K_TXQ_USEDEFAULT)
7436 qi->tqi_aifs = min(qinfo->tqi_aifs, 255U);
7437 else
7438 qi->tqi_aifs = INIT_AIFS;
7439 if (qinfo->tqi_cwmin != ATH9K_TXQ_USEDEFAULT) {
7440 cw = min(qinfo->tqi_cwmin, 1024U);
7441 qi->tqi_cwmin = 1;
7442 while (qi->tqi_cwmin < cw)
7443 qi->tqi_cwmin = (qi->tqi_cwmin << 1) | 1;
7444 } else
7445 qi->tqi_cwmin = qinfo->tqi_cwmin;
7446 if (qinfo->tqi_cwmax != ATH9K_TXQ_USEDEFAULT) {
7447 cw = min(qinfo->tqi_cwmax, 1024U);
7448 qi->tqi_cwmax = 1;
7449 while (qi->tqi_cwmax < cw)
7450 qi->tqi_cwmax = (qi->tqi_cwmax << 1) | 1;
7451 } else
7452 qi->tqi_cwmax = INIT_CWMAX;
7453
7454 if (qinfo->tqi_shretry != 0)
7455 qi->tqi_shretry = min((u32) qinfo->tqi_shretry, 15U);
7456 else
7457 qi->tqi_shretry = INIT_SH_RETRY;
7458 if (qinfo->tqi_lgretry != 0)
7459 qi->tqi_lgretry = min((u32) qinfo->tqi_lgretry, 15U);
7460 else
7461 qi->tqi_lgretry = INIT_LG_RETRY;
7462 qi->tqi_cbrPeriod = qinfo->tqi_cbrPeriod;
7463 qi->tqi_cbrOverflowLimit = qinfo->tqi_cbrOverflowLimit;
7464 qi->tqi_burstTime = qinfo->tqi_burstTime;
7465 qi->tqi_readyTime = qinfo->tqi_readyTime;
7466
7467 switch (qinfo->tqi_subtype) {
7468 case ATH9K_WME_UPSD:
7469 if (qi->tqi_type == ATH9K_TX_QUEUE_DATA)
7470 qi->tqi_intFlags = ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS;
7471 break;
7472 default:
7473 break;
7474 }
7475 return true;
7476}
7477
7478bool ath9k_hw_get_txq_props(struct ath_hal *ah, int q,
7479 struct ath9k_tx_queue_info *qinfo)
7480{
7481 struct ath_hal_5416 *ahp = AH5416(ah);
7482 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
7483 struct ath9k_tx_queue_info *qi;
7484
7485 if (q >= pCap->total_queues) {
7486 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: invalid queue num %u\n",
7487 __func__, q);
7488 return false;
7489 }
7490
7491 qi = &ahp->ah_txq[q];
7492 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
7493 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: inactive queue\n",
7494 __func__);
7495 return false;
7496 }
7497
7498 qinfo->tqi_qflags = qi->tqi_qflags;
7499 qinfo->tqi_ver = qi->tqi_ver;
7500 qinfo->tqi_subtype = qi->tqi_subtype;
7501 qinfo->tqi_qflags = qi->tqi_qflags;
7502 qinfo->tqi_priority = qi->tqi_priority;
7503 qinfo->tqi_aifs = qi->tqi_aifs;
7504 qinfo->tqi_cwmin = qi->tqi_cwmin;
7505 qinfo->tqi_cwmax = qi->tqi_cwmax;
7506 qinfo->tqi_shretry = qi->tqi_shretry;
7507 qinfo->tqi_lgretry = qi->tqi_lgretry;
7508 qinfo->tqi_cbrPeriod = qi->tqi_cbrPeriod;
7509 qinfo->tqi_cbrOverflowLimit = qi->tqi_cbrOverflowLimit;
7510 qinfo->tqi_burstTime = qi->tqi_burstTime;
7511 qinfo->tqi_readyTime = qi->tqi_readyTime;
7512
7513 return true;
7514}
7515
7516int
7517ath9k_hw_setuptxqueue(struct ath_hal *ah, enum ath9k_tx_queue type,
7518 const struct ath9k_tx_queue_info *qinfo)
7519{
7520 struct ath_hal_5416 *ahp = AH5416(ah);
7521 struct ath9k_tx_queue_info *qi;
7522 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
7523 int q;
7524
7525 switch (type) {
7526 case ATH9K_TX_QUEUE_BEACON:
7527 q = pCap->total_queues - 1;
7528 break;
7529 case ATH9K_TX_QUEUE_CAB:
7530 q = pCap->total_queues - 2;
7531 break;
7532 case ATH9K_TX_QUEUE_PSPOLL:
7533 q = 1;
7534 break;
7535 case ATH9K_TX_QUEUE_UAPSD:
7536 q = pCap->total_queues - 3;
7537 break;
7538 case ATH9K_TX_QUEUE_DATA:
7539 for (q = 0; q < pCap->total_queues; q++)
7540 if (ahp->ah_txq[q].tqi_type ==
7541 ATH9K_TX_QUEUE_INACTIVE)
7542 break;
7543 if (q == pCap->total_queues) {
7544 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
7545 "%s: no available tx queue\n", __func__);
7546 return -1;
7547 }
7548 break;
7549 default:
7550 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: bad tx queue type %u\n",
7551 __func__, type);
7552 return -1;
7553 }
7554
7555 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: queue %u\n", __func__, q);
7556
7557 qi = &ahp->ah_txq[q];
7558 if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) {
7559 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
7560 "%s: tx queue %u already active\n", __func__, q);
7561 return -1;
7562 }
7563 memset(qi, 0, sizeof(struct ath9k_tx_queue_info));
7564 qi->tqi_type = type;
7565 if (qinfo == NULL) {
7566 qi->tqi_qflags =
7567 TXQ_FLAG_TXOKINT_ENABLE
7568 | TXQ_FLAG_TXERRINT_ENABLE
7569 | TXQ_FLAG_TXDESCINT_ENABLE | TXQ_FLAG_TXURNINT_ENABLE;
7570 qi->tqi_aifs = INIT_AIFS;
7571 qi->tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
7572 qi->tqi_cwmax = INIT_CWMAX;
7573 qi->tqi_shretry = INIT_SH_RETRY;
7574 qi->tqi_lgretry = INIT_LG_RETRY;
7575 qi->tqi_physCompBuf = 0;
7576 } else {
7577 qi->tqi_physCompBuf = qinfo->tqi_physCompBuf;
7578 (void) ath9k_hw_set_txq_props(ah, q, qinfo);
7579 }
7580
7581 return q;
7582}
7583
7584static void
7585ath9k_hw_set_txq_interrupts(struct ath_hal *ah,
7586 struct ath9k_tx_queue_info *qi)
7587{
7588 struct ath_hal_5416 *ahp = AH5416(ah);
7589
7590 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT,
7591 "%s: tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n",
7592 __func__, ahp->ah_txOkInterruptMask,
7593 ahp->ah_txErrInterruptMask, ahp->ah_txDescInterruptMask,
7594 ahp->ah_txEolInterruptMask, ahp->ah_txUrnInterruptMask);
7595
7596 REG_WRITE(ah, AR_IMR_S0,
7597 SM(ahp->ah_txOkInterruptMask, AR_IMR_S0_QCU_TXOK)
7598 | SM(ahp->ah_txDescInterruptMask, AR_IMR_S0_QCU_TXDESC));
7599 REG_WRITE(ah, AR_IMR_S1,
7600 SM(ahp->ah_txErrInterruptMask, AR_IMR_S1_QCU_TXERR)
7601 | SM(ahp->ah_txEolInterruptMask, AR_IMR_S1_QCU_TXEOL));
7602 REG_RMW_FIELD(ah, AR_IMR_S2,
7603 AR_IMR_S2_QCU_TXURN, ahp->ah_txUrnInterruptMask);
7604}
7605
7606bool ath9k_hw_releasetxqueue(struct ath_hal *ah, u32 q)
7607{
7608 struct ath_hal_5416 *ahp = AH5416(ah);
7609 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
7610 struct ath9k_tx_queue_info *qi;
7611
7612 if (q >= pCap->total_queues) {
7613 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: invalid queue num %u\n",
7614 __func__, q);
7615 return false;
7616 }
7617 qi = &ahp->ah_txq[q];
7618 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
7619 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: inactive queue %u\n",
7620 __func__, q);
7621 return false;
7622 }
7623
7624 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: release queue %u\n",
7625 __func__, q);
7626
7627 qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE;
7628 ahp->ah_txOkInterruptMask &= ~(1 << q);
7629 ahp->ah_txErrInterruptMask &= ~(1 << q);
7630 ahp->ah_txDescInterruptMask &= ~(1 << q);
7631 ahp->ah_txEolInterruptMask &= ~(1 << q);
7632 ahp->ah_txUrnInterruptMask &= ~(1 << q);
7633 ath9k_hw_set_txq_interrupts(ah, qi);
7634
7635 return true;
7636}
7637
7638bool ath9k_hw_resettxqueue(struct ath_hal *ah, u32 q)
7639{
7640 struct ath_hal_5416 *ahp = AH5416(ah);
7641 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
7642 struct ath9k_channel *chan = ah->ah_curchan;
7643 struct ath9k_tx_queue_info *qi;
7644 u32 cwMin, chanCwMin, value;
7645
7646 if (q >= pCap->total_queues) {
7647 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: invalid queue num %u\n",
7648 __func__, q);
7649 return false;
7650 }
7651 qi = &ahp->ah_txq[q];
7652 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
7653 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: inactive queue %u\n",
7654 __func__, q);
7655 return true;
7656 }
7657
7658 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: reset queue %u\n", __func__, q);
7659
7660 if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
7661 if (chan && IS_CHAN_B(chan))
7662 chanCwMin = INIT_CWMIN_11B;
7663 else
7664 chanCwMin = INIT_CWMIN;
7665
7666 for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1);
7667 } else
7668 cwMin = qi->tqi_cwmin;
7669
7670 REG_WRITE(ah, AR_DLCL_IFS(q), SM(cwMin, AR_D_LCL_IFS_CWMIN)
7671 | SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX)
7672 | SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
7673
7674 REG_WRITE(ah, AR_DRETRY_LIMIT(q),
7675 SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH)
7676 | SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG)
7677 | SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH)
7678 );
7679
7680 REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ);
7681 REG_WRITE(ah, AR_DMISC(q),
7682 AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2);
7683
7684 if (qi->tqi_cbrPeriod) {
7685 REG_WRITE(ah, AR_QCBRCFG(q),
7686 SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL)
7687 | SM(qi->tqi_cbrOverflowLimit,
7688 AR_Q_CBRCFG_OVF_THRESH));
7689 REG_WRITE(ah, AR_QMISC(q),
7690 REG_READ(ah,
7691 AR_QMISC(q)) | AR_Q_MISC_FSP_CBR | (qi->
7692 tqi_cbrOverflowLimit
7693 ?
7694 AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN
7695 :
7696 0));
7697 }
7698 if (qi->tqi_readyTime && (qi->tqi_type != ATH9K_TX_QUEUE_CAB)) {
7699 REG_WRITE(ah, AR_QRDYTIMECFG(q),
7700 SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_DURATION) |
7701 AR_Q_RDYTIMECFG_EN);
7702 }
7703
7704 REG_WRITE(ah, AR_DCHNTIME(q),
7705 SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) |
7706 (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
7707
7708 if (qi->tqi_burstTime
7709 && (qi->tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)) {
7710 REG_WRITE(ah, AR_QMISC(q),
7711 REG_READ(ah,
7712 AR_QMISC(q)) |
7713 AR_Q_MISC_RDYTIME_EXP_POLICY);
7714
7715 }
7716
7717 if (qi->tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE) {
7718 REG_WRITE(ah, AR_DMISC(q),
7719 REG_READ(ah, AR_DMISC(q)) |
7720 AR_D_MISC_POST_FR_BKOFF_DIS);
7721 }
7722 if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) {
7723 REG_WRITE(ah, AR_DMISC(q),
7724 REG_READ(ah, AR_DMISC(q)) |
7725 AR_D_MISC_FRAG_BKOFF_EN);
7726 }
7727 switch (qi->tqi_type) {
7728 case ATH9K_TX_QUEUE_BEACON:
7729 REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
7730 | AR_Q_MISC_FSP_DBA_GATED
7731 | AR_Q_MISC_BEACON_USE
7732 | AR_Q_MISC_CBR_INCR_DIS1);
7733
7734 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
7735 | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
7736 AR_D_MISC_ARB_LOCKOUT_CNTRL_S)
7737 | AR_D_MISC_BEACON_USE
7738 | AR_D_MISC_POST_FR_BKOFF_DIS);
7739 break;
7740 case ATH9K_TX_QUEUE_CAB:
7741 REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
7742 | AR_Q_MISC_FSP_DBA_GATED
7743 | AR_Q_MISC_CBR_INCR_DIS1
7744 | AR_Q_MISC_CBR_INCR_DIS0);
7745 value = (qi->tqi_readyTime
7746 - (ah->ah_config.sw_beacon_response_time -
7747 ah->ah_config.dma_beacon_response_time)
7748 -
7749 ah->ah_config.additional_swba_backoff) *
7750 1024;
7751 REG_WRITE(ah, AR_QRDYTIMECFG(q),
7752 value | AR_Q_RDYTIMECFG_EN);
7753 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
7754 | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
7755 AR_D_MISC_ARB_LOCKOUT_CNTRL_S));
7756 break;
7757 case ATH9K_TX_QUEUE_PSPOLL:
7758 REG_WRITE(ah, AR_QMISC(q),
7759 REG_READ(ah,
7760 AR_QMISC(q)) | AR_Q_MISC_CBR_INCR_DIS1);
7761 break;
7762 case ATH9K_TX_QUEUE_UAPSD:
7763 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
7764 | AR_D_MISC_POST_FR_BKOFF_DIS);
7765 break;
7766 default:
7767 break;
7768 }
7769
7770 if (qi->tqi_intFlags & ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS) {
7771 REG_WRITE(ah, AR_DMISC(q),
7772 REG_READ(ah, AR_DMISC(q)) |
7773 SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
7774 AR_D_MISC_ARB_LOCKOUT_CNTRL) |
7775 AR_D_MISC_POST_FR_BKOFF_DIS);
7776 }
7777
7778 if (qi->tqi_qflags & TXQ_FLAG_TXOKINT_ENABLE)
7779 ahp->ah_txOkInterruptMask |= 1 << q;
7780 else
7781 ahp->ah_txOkInterruptMask &= ~(1 << q);
7782 if (qi->tqi_qflags & TXQ_FLAG_TXERRINT_ENABLE)
7783 ahp->ah_txErrInterruptMask |= 1 << q;
7784 else
7785 ahp->ah_txErrInterruptMask &= ~(1 << q);
7786 if (qi->tqi_qflags & TXQ_FLAG_TXDESCINT_ENABLE)
7787 ahp->ah_txDescInterruptMask |= 1 << q;
7788 else
7789 ahp->ah_txDescInterruptMask &= ~(1 << q);
7790 if (qi->tqi_qflags & TXQ_FLAG_TXEOLINT_ENABLE)
7791 ahp->ah_txEolInterruptMask |= 1 << q;
7792 else
7793 ahp->ah_txEolInterruptMask &= ~(1 << q);
7794 if (qi->tqi_qflags & TXQ_FLAG_TXURNINT_ENABLE)
7795 ahp->ah_txUrnInterruptMask |= 1 << q;
7796 else
7797 ahp->ah_txUrnInterruptMask &= ~(1 << q);
7798 ath9k_hw_set_txq_interrupts(ah, qi);
7799
7800 return true;
7801}
7802
7803void ath9k_hw_gettxintrtxqs(struct ath_hal *ah, u32 *txqs)
7804{
7805 struct ath_hal_5416 *ahp = AH5416(ah);
7806 *txqs &= ahp->ah_intrTxqs;
7807 ahp->ah_intrTxqs &= ~(*txqs);
7808}
7809
7810bool
7811ath9k_hw_filltxdesc(struct ath_hal *ah, struct ath_desc *ds,
7812 u32 segLen, bool firstSeg,
7813 bool lastSeg, const struct ath_desc *ds0)
7814{
7815 struct ar5416_desc *ads = AR5416DESC(ds);
7816
7817 if (firstSeg) {
7818 ads->ds_ctl1 |= segLen | (lastSeg ? 0 : AR_TxMore);
7819 } else if (lastSeg) {
7820 ads->ds_ctl0 = 0;
7821 ads->ds_ctl1 = segLen;
7822 ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2;
7823 ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3;
7824 } else {
7825 ads->ds_ctl0 = 0;
7826 ads->ds_ctl1 = segLen | AR_TxMore;
7827 ads->ds_ctl2 = 0;
7828 ads->ds_ctl3 = 0;
7829 }
7830 ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
7831 ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
7832 ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
7833 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
7834 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
7835 return true;
7836}
7837
7838void ath9k_hw_cleartxdesc(struct ath_hal *ah, struct ath_desc *ds)
7839{
7840 struct ar5416_desc *ads = AR5416DESC(ds);
7841
7842 ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
7843 ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
7844 ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
7845 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
7846 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
7847}
7848
7849int
7850ath9k_hw_txprocdesc(struct ath_hal *ah, struct ath_desc *ds)
7851{
7852 struct ar5416_desc *ads = AR5416DESC(ds);
7853
7854 if ((ads->ds_txstatus9 & AR_TxDone) == 0)
7855 return -EINPROGRESS;
7856
7857 ds->ds_txstat.ts_seqnum = MS(ads->ds_txstatus9, AR_SeqNum);
7858 ds->ds_txstat.ts_tstamp = ads->AR_SendTimestamp;
7859 ds->ds_txstat.ts_status = 0;
7860 ds->ds_txstat.ts_flags = 0;
7861
7862 if (ads->ds_txstatus1 & AR_ExcessiveRetries)
7863 ds->ds_txstat.ts_status |= ATH9K_TXERR_XRETRY;
7864 if (ads->ds_txstatus1 & AR_Filtered)
7865 ds->ds_txstat.ts_status |= ATH9K_TXERR_FILT;
7866 if (ads->ds_txstatus1 & AR_FIFOUnderrun)
7867 ds->ds_txstat.ts_status |= ATH9K_TXERR_FIFO;
7868 if (ads->ds_txstatus9 & AR_TxOpExceeded)
7869 ds->ds_txstat.ts_status |= ATH9K_TXERR_XTXOP;
7870 if (ads->ds_txstatus1 & AR_TxTimerExpired)
7871 ds->ds_txstat.ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
7872
7873 if (ads->ds_txstatus1 & AR_DescCfgErr)
7874 ds->ds_txstat.ts_flags |= ATH9K_TX_DESC_CFG_ERR;
7875 if (ads->ds_txstatus1 & AR_TxDataUnderrun) {
7876 ds->ds_txstat.ts_flags |= ATH9K_TX_DATA_UNDERRUN;
7877 ath9k_hw_updatetxtriglevel(ah, true);
7878 }
7879 if (ads->ds_txstatus1 & AR_TxDelimUnderrun) {
7880 ds->ds_txstat.ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
7881 ath9k_hw_updatetxtriglevel(ah, true);
7882 }
7883 if (ads->ds_txstatus0 & AR_TxBaStatus) {
7884 ds->ds_txstat.ts_flags |= ATH9K_TX_BA;
7885 ds->ds_txstat.ba_low = ads->AR_BaBitmapLow;
7886 ds->ds_txstat.ba_high = ads->AR_BaBitmapHigh;
7887 }
7888
7889 ds->ds_txstat.ts_rateindex = MS(ads->ds_txstatus9, AR_FinalTxIdx);
7890 switch (ds->ds_txstat.ts_rateindex) {
7891 case 0:
7892 ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate0);
7893 break;
7894 case 1:
7895 ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate1);
7896 break;
7897 case 2:
7898 ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate2);
7899 break;
7900 case 3:
7901 ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate3);
7902 break;
7903 }
7904
7905 ds->ds_txstat.ts_rssi = MS(ads->ds_txstatus5, AR_TxRSSICombined);
7906 ds->ds_txstat.ts_rssi_ctl0 = MS(ads->ds_txstatus0, AR_TxRSSIAnt00);
7907 ds->ds_txstat.ts_rssi_ctl1 = MS(ads->ds_txstatus0, AR_TxRSSIAnt01);
7908 ds->ds_txstat.ts_rssi_ctl2 = MS(ads->ds_txstatus0, AR_TxRSSIAnt02);
7909 ds->ds_txstat.ts_rssi_ext0 = MS(ads->ds_txstatus5, AR_TxRSSIAnt10);
7910 ds->ds_txstat.ts_rssi_ext1 = MS(ads->ds_txstatus5, AR_TxRSSIAnt11);
7911 ds->ds_txstat.ts_rssi_ext2 = MS(ads->ds_txstatus5, AR_TxRSSIAnt12);
7912 ds->ds_txstat.evm0 = ads->AR_TxEVM0;
7913 ds->ds_txstat.evm1 = ads->AR_TxEVM1;
7914 ds->ds_txstat.evm2 = ads->AR_TxEVM2;
7915 ds->ds_txstat.ts_shortretry = MS(ads->ds_txstatus1, AR_RTSFailCnt);
7916 ds->ds_txstat.ts_longretry = MS(ads->ds_txstatus1, AR_DataFailCnt);
7917 ds->ds_txstat.ts_virtcol = MS(ads->ds_txstatus1, AR_VirtRetryCnt);
7918 ds->ds_txstat.ts_antenna = 1;
7919
7920 return 0;
7921}
7922
7923void
7924ath9k_hw_set11n_txdesc(struct ath_hal *ah, struct ath_desc *ds,
7925 u32 pktLen, enum ath9k_pkt_type type, u32 txPower,
7926 u32 keyIx, enum ath9k_key_type keyType, u32 flags)
7927{
7928 struct ar5416_desc *ads = AR5416DESC(ds);
7929 struct ath_hal_5416 *ahp = AH5416(ah);
7930
7931 txPower += ahp->ah_txPowerIndexOffset;
7932 if (txPower > 63)
7933 txPower = 63;
7934
7935 ads->ds_ctl0 = (pktLen & AR_FrameLen)
7936 | (flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
7937 | SM(txPower, AR_XmitPower)
7938 | (flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
7939 | (flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
7940 | (flags & ATH9K_TXDESC_INTREQ ? AR_TxIntrReq : 0)
7941 | (keyIx != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0);
7942
7943 ads->ds_ctl1 =
7944 (keyIx != ATH9K_TXKEYIX_INVALID ? SM(keyIx, AR_DestIdx) : 0)
7945 | SM(type, AR_FrameType)
7946 | (flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0)
7947 | (flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0)
7948 | (flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0);
7949
7950 ads->ds_ctl6 = SM(keyType, AR_EncrType);
7951
7952 if (AR_SREV_9285(ah)) {
7953
7954 ads->ds_ctl8 = 0;
7955 ads->ds_ctl9 = 0;
7956 ads->ds_ctl10 = 0;
7957 ads->ds_ctl11 = 0;
7958 }
7959}
7960
7961void
7962ath9k_hw_set11n_ratescenario(struct ath_hal *ah, struct ath_desc *ds,
7963 struct ath_desc *lastds,
7964 u32 durUpdateEn, u32 rtsctsRate,
7965 u32 rtsctsDuration,
7966 struct ath9k_11n_rate_series series[],
7967 u32 nseries, u32 flags)
7968{
7969 struct ar5416_desc *ads = AR5416DESC(ds);
7970 struct ar5416_desc *last_ads = AR5416DESC(lastds);
7971 u32 ds_ctl0;
7972
7973 (void) nseries;
7974 (void) rtsctsDuration;
7975
7976 if (flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)) {
7977 ds_ctl0 = ads->ds_ctl0;
7978
7979 if (flags & ATH9K_TXDESC_RTSENA) {
7980 ds_ctl0 &= ~AR_CTSEnable;
7981 ds_ctl0 |= AR_RTSEnable;
7982 } else {
7983 ds_ctl0 &= ~AR_RTSEnable;
7984 ds_ctl0 |= AR_CTSEnable;
7985 }
7986
7987 ads->ds_ctl0 = ds_ctl0;
7988 } else {
7989 ads->ds_ctl0 =
7990 (ads->ds_ctl0 & ~(AR_RTSEnable | AR_CTSEnable));
7991 }
7992
7993 ads->ds_ctl2 = set11nTries(series, 0)
7994 | set11nTries(series, 1)
7995 | set11nTries(series, 2)
7996 | set11nTries(series, 3)
7997 | (durUpdateEn ? AR_DurUpdateEna : 0)
7998 | SM(0, AR_BurstDur);
7999
8000 ads->ds_ctl3 = set11nRate(series, 0)
8001 | set11nRate(series, 1)
8002 | set11nRate(series, 2)
8003 | set11nRate(series, 3);
8004
8005 ads->ds_ctl4 = set11nPktDurRTSCTS(series, 0)
8006 | set11nPktDurRTSCTS(series, 1);
8007
8008 ads->ds_ctl5 = set11nPktDurRTSCTS(series, 2)
8009 | set11nPktDurRTSCTS(series, 3);
8010
8011 ads->ds_ctl7 = set11nRateFlags(series, 0)
8012 | set11nRateFlags(series, 1)
8013 | set11nRateFlags(series, 2)
8014 | set11nRateFlags(series, 3)
8015 | SM(rtsctsRate, AR_RTSCTSRate);
8016 last_ads->ds_ctl2 = ads->ds_ctl2;
8017 last_ads->ds_ctl3 = ads->ds_ctl3;
8018}
8019
8020void
8021ath9k_hw_set11n_aggr_first(struct ath_hal *ah, struct ath_desc *ds,
8022 u32 aggrLen)
8023{
8024 struct ar5416_desc *ads = AR5416DESC(ds);
8025
8026 ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
8027
8028 ads->ds_ctl6 &= ~AR_AggrLen;
8029 ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen);
8030}
8031
8032void
8033ath9k_hw_set11n_aggr_middle(struct ath_hal *ah, struct ath_desc *ds,
8034 u32 numDelims)
8035{
8036 struct ar5416_desc *ads = AR5416DESC(ds);
8037 unsigned int ctl6;
8038
8039 ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
8040
8041 ctl6 = ads->ds_ctl6;
8042 ctl6 &= ~AR_PadDelim;
8043 ctl6 |= SM(numDelims, AR_PadDelim);
8044 ads->ds_ctl6 = ctl6;
8045}
8046
8047void ath9k_hw_set11n_aggr_last(struct ath_hal *ah, struct ath_desc *ds)
8048{
8049 struct ar5416_desc *ads = AR5416DESC(ds);
8050
8051 ads->ds_ctl1 |= AR_IsAggr;
8052 ads->ds_ctl1 &= ~AR_MoreAggr;
8053 ads->ds_ctl6 &= ~AR_PadDelim;
8054}
8055
8056void ath9k_hw_clr11n_aggr(struct ath_hal *ah, struct ath_desc *ds)
8057{
8058 struct ar5416_desc *ads = AR5416DESC(ds);
8059
8060 ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr);
8061}
8062
8063void
8064ath9k_hw_set11n_burstduration(struct ath_hal *ah, struct ath_desc *ds,
8065 u32 burstDuration)
8066{
8067 struct ar5416_desc *ads = AR5416DESC(ds);
8068
8069 ads->ds_ctl2 &= ~AR_BurstDur;
8070 ads->ds_ctl2 |= SM(burstDuration, AR_BurstDur);
8071}
8072
8073void
8074ath9k_hw_set11n_virtualmorefrag(struct ath_hal *ah, struct ath_desc *ds,
8075 u32 vmf)
8076{
8077 struct ar5416_desc *ads = AR5416DESC(ds);
8078
8079 if (vmf)
8080 ads->ds_ctl0 |= AR_VirtMoreFrag;
8081 else
8082 ads->ds_ctl0 &= ~AR_VirtMoreFrag;
8083}
8084
8085void ath9k_hw_putrxbuf(struct ath_hal *ah, u32 rxdp)
8086{
8087 REG_WRITE(ah, AR_RXDP, rxdp);
8088}
8089
8090void ath9k_hw_rxena(struct ath_hal *ah)
8091{
8092 REG_WRITE(ah, AR_CR, AR_CR_RXE);
8093}
8094
8095bool ath9k_hw_setrxabort(struct ath_hal *ah, bool set)
8096{
8097 if (set) {
8098
8099 REG_SET_BIT(ah, AR_DIAG_SW,
8100 (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
8101
8102 if (!ath9k_hw_wait
8103 (ah, AR_OBS_BUS_1, AR_OBS_BUS_1_RX_STATE, 0)) {
8104 u32 reg;
8105
8106 REG_CLR_BIT(ah, AR_DIAG_SW,
8107 (AR_DIAG_RX_DIS |
8108 AR_DIAG_RX_ABORT));
8109
8110 reg = REG_READ(ah, AR_OBS_BUS_1);
8111 DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
8112 "%s: rx failed to go idle in 10 ms RXSM=0x%x\n",
8113 __func__, reg);
8114
8115 return false;
8116 }
8117 } else {
8118 REG_CLR_BIT(ah, AR_DIAG_SW,
8119 (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
8120 }
8121
8122 return true;
8123}
8124
8125void
8126ath9k_hw_setmcastfilter(struct ath_hal *ah, u32 filter0,
8127 u32 filter1)
8128{
8129 REG_WRITE(ah, AR_MCAST_FIL0, filter0);
8130 REG_WRITE(ah, AR_MCAST_FIL1, filter1);
8131}
8132
8133bool
8134ath9k_hw_setuprxdesc(struct ath_hal *ah, struct ath_desc *ds,
8135 u32 size, u32 flags)
8136{
8137 struct ar5416_desc *ads = AR5416DESC(ds);
8138 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
8139
8140 ads->ds_ctl1 = size & AR_BufLen;
8141 if (flags & ATH9K_RXDESC_INTREQ)
8142 ads->ds_ctl1 |= AR_RxIntrReq;
8143
8144 ads->ds_rxstatus8 &= ~AR_RxDone;
8145 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
8146 memset(&(ads->u), 0, sizeof(ads->u));
8147 return true;
8148}
8149
8150int
8151ath9k_hw_rxprocdesc(struct ath_hal *ah, struct ath_desc *ds,
8152 u32 pa, struct ath_desc *nds, u64 tsf)
8153{
8154 struct ar5416_desc ads;
8155 struct ar5416_desc *adsp = AR5416DESC(ds);
8156
8157 if ((adsp->ds_rxstatus8 & AR_RxDone) == 0)
8158 return -EINPROGRESS;
8159
8160 ads.u.rx = adsp->u.rx;
8161
8162 ds->ds_rxstat.rs_status = 0;
8163 ds->ds_rxstat.rs_flags = 0;
8164
8165 ds->ds_rxstat.rs_datalen = ads.ds_rxstatus1 & AR_DataLen;
8166 ds->ds_rxstat.rs_tstamp = ads.AR_RcvTimestamp;
8167
8168 ds->ds_rxstat.rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined);
8169 ds->ds_rxstat.rs_rssi_ctl0 = MS(ads.ds_rxstatus0, AR_RxRSSIAnt00);
8170 ds->ds_rxstat.rs_rssi_ctl1 = MS(ads.ds_rxstatus0, AR_RxRSSIAnt01);
8171 ds->ds_rxstat.rs_rssi_ctl2 = MS(ads.ds_rxstatus0, AR_RxRSSIAnt02);
8172 ds->ds_rxstat.rs_rssi_ext0 = MS(ads.ds_rxstatus4, AR_RxRSSIAnt10);
8173 ds->ds_rxstat.rs_rssi_ext1 = MS(ads.ds_rxstatus4, AR_RxRSSIAnt11);
8174 ds->ds_rxstat.rs_rssi_ext2 = MS(ads.ds_rxstatus4, AR_RxRSSIAnt12);
8175 if (ads.ds_rxstatus8 & AR_RxKeyIdxValid)
8176 ds->ds_rxstat.rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx);
8177 else
8178 ds->ds_rxstat.rs_keyix = ATH9K_RXKEYIX_INVALID;
8179
8180 ds->ds_rxstat.rs_rate = RXSTATUS_RATE(ah, (&ads));
8181 ds->ds_rxstat.rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
8182
8183 ds->ds_rxstat.rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
8184 ds->ds_rxstat.rs_moreaggr =
8185 (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
8186 ds->ds_rxstat.rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
8187 ds->ds_rxstat.rs_flags =
8188 (ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0;
8189 ds->ds_rxstat.rs_flags |=
8190 (ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0;
8191
8192 if (ads.ds_rxstatus8 & AR_PreDelimCRCErr)
8193 ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
8194 if (ads.ds_rxstatus8 & AR_PostDelimCRCErr)
8195 ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_POST;
8196 if (ads.ds_rxstatus8 & AR_DecryptBusyErr)
8197 ds->ds_rxstat.rs_flags |= ATH9K_RX_DECRYPT_BUSY;
8198
8199 if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) {
8200
8201 if (ads.ds_rxstatus8 & AR_CRCErr)
8202 ds->ds_rxstat.rs_status |= ATH9K_RXERR_CRC;
8203 else if (ads.ds_rxstatus8 & AR_PHYErr) {
8204 u32 phyerr;
8205
8206 ds->ds_rxstat.rs_status |= ATH9K_RXERR_PHY;
8207 phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
8208 ds->ds_rxstat.rs_phyerr = phyerr;
8209 } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
8210 ds->ds_rxstat.rs_status |= ATH9K_RXERR_DECRYPT;
8211 else if (ads.ds_rxstatus8 & AR_MichaelErr)
8212 ds->ds_rxstat.rs_status |= ATH9K_RXERR_MIC;
8213 }
8214
8215 return 0;
8216}
8217
8218static void ath9k_hw_setup_rate_table(struct ath_hal *ah,
8219 struct ath9k_rate_table *rt)
8220{
8221 int i;
8222
8223 if (rt->rateCodeToIndex[0] != 0)
8224 return;
8225 for (i = 0; i < 256; i++)
8226 rt->rateCodeToIndex[i] = (u8) -1;
8227 for (i = 0; i < rt->rateCount; i++) {
8228 u8 code = rt->info[i].rateCode;
8229 u8 cix = rt->info[i].controlRate;
8230
8231 rt->rateCodeToIndex[code] = i;
8232 rt->rateCodeToIndex[code | rt->info[i].shortPreamble] = i;
8233
8234 rt->info[i].lpAckDuration =
8235 ath9k_hw_computetxtime(ah, rt,
8236 WLAN_CTRL_FRAME_SIZE,
8237 cix,
8238 false);
8239 rt->info[i].spAckDuration =
8240 ath9k_hw_computetxtime(ah, rt,
8241 WLAN_CTRL_FRAME_SIZE,
8242 cix,
8243 true);
8244 }
8245}
8246
8247const struct ath9k_rate_table *ath9k_hw_getratetable(struct ath_hal *ah,
8248 u32 mode)
8249{
8250 struct ath9k_rate_table *rt;
8251 switch (mode) {
8252 case ATH9K_MODE_11A:
8253 rt = &ar5416_11a_table;
8254 break;
8255 case ATH9K_MODE_11B:
8256 rt = &ar5416_11b_table;
8257 break;
8258 case ATH9K_MODE_11G:
8259 rt = &ar5416_11g_table;
8260 break;
8261 case ATH9K_MODE_11NG_HT20:
8262 case ATH9K_MODE_11NG_HT40PLUS:
8263 case ATH9K_MODE_11NG_HT40MINUS:
8264 rt = &ar5416_11ng_table;
8265 break;
8266 case ATH9K_MODE_11NA_HT20:
8267 case ATH9K_MODE_11NA_HT40PLUS:
8268 case ATH9K_MODE_11NA_HT40MINUS:
8269 rt = &ar5416_11na_table;
8270 break;
8271 default:
8272 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL, "%s: invalid mode 0x%x\n",
8273 __func__, mode);
8274 return NULL;
8275 }
8276 ath9k_hw_setup_rate_table(ah, rt);
8277 return rt;
8278}
8279
8280static const char *ath9k_hw_devname(u16 devid)
8281{
8282 switch (devid) {
8283 case AR5416_DEVID_PCI:
8284 case AR5416_DEVID_PCIE:
8285 return "Atheros 5416";
8286 case AR9160_DEVID_PCI:
8287 return "Atheros 9160";
8288 case AR9280_DEVID_PCI:
8289 case AR9280_DEVID_PCIE:
8290 return "Atheros 9280";
8291 }
8292 return NULL;
8293}
8294
8295const char *ath9k_hw_probe(u16 vendorid, u16 devid)
8296{
8297 return vendorid == ATHEROS_VENDOR_ID ?
8298 ath9k_hw_devname(devid) : NULL;
8299}
8300
8301struct ath_hal *ath9k_hw_attach(u16 devid,
8302 struct ath_softc *sc,
8303 void __iomem *mem,
8304 int *error)
8305{
8306 struct ath_hal *ah = NULL;
8307
8308 switch (devid) {
8309 case AR5416_DEVID_PCI:
8310 case AR5416_DEVID_PCIE:
8311 case AR9160_DEVID_PCI:
8312 case AR9280_DEVID_PCI:
8313 case AR9280_DEVID_PCIE:
8314 ah = ath9k_hw_do_attach(devid, sc, mem, error);
8315 break;
8316 default:
8317 DPRINTF(ah->ah_sc, ATH_DBG_ANY,
8318 "devid=0x%x not supported.\n", devid);
8319 ah = NULL;
8320 *error = -ENXIO;
8321 break;
8322 }
8323 if (ah != NULL) {
8324 ah->ah_devid = ah->ah_devid;
8325 ah->ah_subvendorid = ah->ah_subvendorid;
8326 ah->ah_macVersion = ah->ah_macVersion;
8327 ah->ah_macRev = ah->ah_macRev;
8328 ah->ah_phyRev = ah->ah_phyRev;
8329 ah->ah_analog5GhzRev = ah->ah_analog5GhzRev;
8330 ah->ah_analog2GhzRev = ah->ah_analog2GhzRev;
8331 }
8332 return ah;
8333}
8334
8335u16
8336ath9k_hw_computetxtime(struct ath_hal *ah,
8337 const struct ath9k_rate_table *rates,
8338 u32 frameLen, u16 rateix,
8339 bool shortPreamble)
8340{
8341 u32 bitsPerSymbol, numBits, numSymbols, phyTime, txTime;
8342 u32 kbps;
8343
8344 kbps = rates->info[rateix].rateKbps;
8345
8346 if (kbps == 0)
8347 return 0;
8348 switch (rates->info[rateix].phy) {
8349
8350 case PHY_CCK:
8351 phyTime = CCK_PREAMBLE_BITS + CCK_PLCP_BITS;
8352 if (shortPreamble && rates->info[rateix].shortPreamble)
8353 phyTime >>= 1;
8354 numBits = frameLen << 3;
8355 txTime = CCK_SIFS_TIME + phyTime
8356 + ((numBits * 1000) / kbps);
8357 break;
8358 case PHY_OFDM:
8359 if (ah->ah_curchan && IS_CHAN_QUARTER_RATE(ah->ah_curchan)) {
8360 bitsPerSymbol =
8361 (kbps * OFDM_SYMBOL_TIME_QUARTER) / 1000;
8362
8363 numBits = OFDM_PLCP_BITS + (frameLen << 3);
8364 numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol);
8365 txTime = OFDM_SIFS_TIME_QUARTER
8366 + OFDM_PREAMBLE_TIME_QUARTER
8367 + (numSymbols * OFDM_SYMBOL_TIME_QUARTER);
8368 } else if (ah->ah_curchan &&
8369 IS_CHAN_HALF_RATE(ah->ah_curchan)) {
8370 bitsPerSymbol =
8371 (kbps * OFDM_SYMBOL_TIME_HALF) / 1000;
8372
8373 numBits = OFDM_PLCP_BITS + (frameLen << 3);
8374 numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol);
8375 txTime = OFDM_SIFS_TIME_HALF +
8376 OFDM_PREAMBLE_TIME_HALF
8377 + (numSymbols * OFDM_SYMBOL_TIME_HALF);
8378 } else {
8379 bitsPerSymbol = (kbps * OFDM_SYMBOL_TIME) / 1000;
8380
8381 numBits = OFDM_PLCP_BITS + (frameLen << 3);
8382 numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol);
8383 txTime = OFDM_SIFS_TIME + OFDM_PREAMBLE_TIME
8384 + (numSymbols * OFDM_SYMBOL_TIME);
8385 }
8386 break;
8387
8388 default:
8389 DPRINTF(ah->ah_sc, ATH_DBG_PHY_IO,
8390 "%s: unknown phy %u (rate ix %u)\n", __func__,
8391 rates->info[rateix].phy, rateix);
8392 txTime = 0;
8393 break;
8394 }
8395 return txTime;
8396}
8397
8398u32 ath9k_hw_mhz2ieee(struct ath_hal *ah, u32 freq, u32 flags)
8399{
8400 if (flags & CHANNEL_2GHZ) {
8401 if (freq == 2484)
8402 return 14;
8403 if (freq < 2484)
8404 return (freq - 2407) / 5;
8405 else
8406 return 15 + ((freq - 2512) / 20);
8407 } else if (flags & CHANNEL_5GHZ) {
8408 if (ath9k_regd_is_public_safety_sku(ah) &&
8409 IS_CHAN_IN_PUBLIC_SAFETY_BAND(freq)) {
8410 return ((freq * 10) +
8411 (((freq % 5) == 2) ? 5 : 0) - 49400) / 5;
8412 } else if ((flags & CHANNEL_A) && (freq <= 5000)) {
8413 return (freq - 4000) / 5;
8414 } else {
8415 return (freq - 5000) / 5;
8416 }
8417 } else {
8418 if (freq == 2484)
8419 return 14;
8420 if (freq < 2484)
8421 return (freq - 2407) / 5;
8422 if (freq < 5000) {
8423 if (ath9k_regd_is_public_safety_sku(ah)
8424 && IS_CHAN_IN_PUBLIC_SAFETY_BAND(freq)) {
8425 return ((freq * 10) +
8426 (((freq % 5) ==
8427 2) ? 5 : 0) - 49400) / 5;
8428 } else if (freq > 4900) {
8429 return (freq - 4000) / 5;
8430 } else {
8431 return 15 + ((freq - 2512) / 20);
8432 }
8433 }
8434 return (freq - 5000) / 5;
8435 }
8436}
8437
8438int16_t
8439ath9k_hw_getchan_noise(struct ath_hal *ah, struct ath9k_channel *chan)
8440{
8441 struct ath9k_channel *ichan;
8442
8443 ichan = ath9k_regd_check_channel(ah, chan);
8444 if (ichan == NULL) {
8445 DPRINTF(ah->ah_sc, ATH_DBG_NF_CAL,
8446 "%s: invalid channel %u/0x%x; no mapping\n",
8447 __func__, chan->channel, chan->channelFlags);
8448 return 0;
8449 }
8450 if (ichan->rawNoiseFloor == 0) {
8451 enum wireless_mode mode = ath9k_hw_chan2wmode(ah, chan);
8452 return NOISE_FLOOR[mode];
8453 } else
8454 return ichan->rawNoiseFloor;
8455}
8456
8457bool ath9k_hw_set_tsfadjust(struct ath_hal *ah, u32 setting)
8458{
8459 struct ath_hal_5416 *ahp = AH5416(ah);
8460
8461 if (setting)
8462 ahp->ah_miscMode |= AR_PCU_TX_ADD_TSF;
8463 else
8464 ahp->ah_miscMode &= ~AR_PCU_TX_ADD_TSF;
8465 return true;
8466}
8467
8468bool ath9k_hw_phycounters(struct ath_hal *ah)
8469{
8470 struct ath_hal_5416 *ahp = AH5416(ah);
8471
8472 return ahp->ah_hasHwPhyCounters ? true : false;
8473}
8474
8475u32 ath9k_hw_gettxbuf(struct ath_hal *ah, u32 q)
8476{
8477 return REG_READ(ah, AR_QTXDP(q));
8478}
8479
8480bool ath9k_hw_puttxbuf(struct ath_hal *ah, u32 q,
8481 u32 txdp)
8482{
8483 REG_WRITE(ah, AR_QTXDP(q), txdp);
8484
8485 return true;
8486}
8487
8488bool ath9k_hw_txstart(struct ath_hal *ah, u32 q)
8489{
8490 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: queue %u\n", __func__, q);
8491
8492 REG_WRITE(ah, AR_Q_TXE, 1 << q);
8493
8494 return true;
8495}
8496
8497u32 ath9k_hw_numtxpending(struct ath_hal *ah, u32 q)
8498{
8499 u32 npend;
8500
8501 npend = REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT;
8502 if (npend == 0) {
8503
8504 if (REG_READ(ah, AR_Q_TXE) & (1 << q))
8505 npend = 1;
8506 }
8507 return npend;
8508}
8509
8510bool ath9k_hw_stoptxdma(struct ath_hal *ah, u32 q)
8511{
8512 u32 wait;
8513
8514 REG_WRITE(ah, AR_Q_TXD, 1 << q);
8515
8516 for (wait = 1000; wait != 0; wait--) {
8517 if (ath9k_hw_numtxpending(ah, q) == 0)
8518 break;
8519 udelay(100);
8520 }
8521
8522 if (ath9k_hw_numtxpending(ah, q)) {
8523 u32 tsfLow, j;
8524
8525 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
8526 "%s: Num of pending TX Frames %d on Q %d\n",
8527 __func__, ath9k_hw_numtxpending(ah, q), q);
8528
8529 for (j = 0; j < 2; j++) {
8530 tsfLow = REG_READ(ah, AR_TSF_L32);
8531 REG_WRITE(ah, AR_QUIET2,
8532 SM(10, AR_QUIET2_QUIET_DUR));
8533 REG_WRITE(ah, AR_QUIET_PERIOD, 100);
8534 REG_WRITE(ah, AR_NEXT_QUIET_TIMER, tsfLow >> 10);
8535 REG_SET_BIT(ah, AR_TIMER_MODE,
8536 AR_QUIET_TIMER_EN);
8537
8538 if ((REG_READ(ah, AR_TSF_L32) >> 10) ==
8539 (tsfLow >> 10)) {
8540 break;
8541 }
8542 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
8543 "%s: TSF have moved while trying to set "
8544 "quiet time TSF: 0x%08x\n",
8545 __func__, tsfLow);
8546 }
8547
8548 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
8549
8550 udelay(200);
8551 REG_CLR_BIT(ah, AR_TIMER_MODE, AR_QUIET_TIMER_EN);
8552
8553 wait = 1000;
8554
8555 while (ath9k_hw_numtxpending(ah, q)) {
8556 if ((--wait) == 0) {
8557 DPRINTF(ah->ah_sc, ATH_DBG_XMIT,
8558 "%s: Failed to stop Tx DMA in 100 "
8559 "msec after killing last frame\n",
8560 __func__);
8561 break;
8562 }
8563 udelay(100);
8564 }
8565
8566 REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
8567 }
8568
8569 REG_WRITE(ah, AR_Q_TXD, 0);
8570 return wait != 0;
8571}
diff --git a/drivers/net/wireless/ath9k/hw.h b/drivers/net/wireless/ath9k/hw.h
new file mode 100644
index 000000000000..ae680f21ba7e
--- /dev/null
+++ b/drivers/net/wireless/ath9k/hw.h
@@ -0,0 +1,969 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef HW_H
18#define HW_H
19
20#include <linux/if_ether.h>
21#include <linux/delay.h>
22
23struct ar5416_desc {
24 u32 ds_link;
25 u32 ds_data;
26 u32 ds_ctl0;
27 u32 ds_ctl1;
28 union {
29 struct {
30 u32 ctl2;
31 u32 ctl3;
32 u32 ctl4;
33 u32 ctl5;
34 u32 ctl6;
35 u32 ctl7;
36 u32 ctl8;
37 u32 ctl9;
38 u32 ctl10;
39 u32 ctl11;
40 u32 status0;
41 u32 status1;
42 u32 status2;
43 u32 status3;
44 u32 status4;
45 u32 status5;
46 u32 status6;
47 u32 status7;
48 u32 status8;
49 u32 status9;
50 } tx;
51 struct {
52 u32 status0;
53 u32 status1;
54 u32 status2;
55 u32 status3;
56 u32 status4;
57 u32 status5;
58 u32 status6;
59 u32 status7;
60 u32 status8;
61 } rx;
62 } u;
63} __packed;
64
65#define AR5416DESC(_ds) ((struct ar5416_desc *)(_ds))
66#define AR5416DESC_CONST(_ds) ((const struct ar5416_desc *)(_ds))
67
68#define ds_ctl2 u.tx.ctl2
69#define ds_ctl3 u.tx.ctl3
70#define ds_ctl4 u.tx.ctl4
71#define ds_ctl5 u.tx.ctl5
72#define ds_ctl6 u.tx.ctl6
73#define ds_ctl7 u.tx.ctl7
74#define ds_ctl8 u.tx.ctl8
75#define ds_ctl9 u.tx.ctl9
76#define ds_ctl10 u.tx.ctl10
77#define ds_ctl11 u.tx.ctl11
78
79#define ds_txstatus0 u.tx.status0
80#define ds_txstatus1 u.tx.status1
81#define ds_txstatus2 u.tx.status2
82#define ds_txstatus3 u.tx.status3
83#define ds_txstatus4 u.tx.status4
84#define ds_txstatus5 u.tx.status5
85#define ds_txstatus6 u.tx.status6
86#define ds_txstatus7 u.tx.status7
87#define ds_txstatus8 u.tx.status8
88#define ds_txstatus9 u.tx.status9
89
90#define ds_rxstatus0 u.rx.status0
91#define ds_rxstatus1 u.rx.status1
92#define ds_rxstatus2 u.rx.status2
93#define ds_rxstatus3 u.rx.status3
94#define ds_rxstatus4 u.rx.status4
95#define ds_rxstatus5 u.rx.status5
96#define ds_rxstatus6 u.rx.status6
97#define ds_rxstatus7 u.rx.status7
98#define ds_rxstatus8 u.rx.status8
99
100#define AR_FrameLen 0x00000fff
101#define AR_VirtMoreFrag 0x00001000
102#define AR_TxCtlRsvd00 0x0000e000
103#define AR_XmitPower 0x003f0000
104#define AR_XmitPower_S 16
105#define AR_RTSEnable 0x00400000
106#define AR_VEOL 0x00800000
107#define AR_ClrDestMask 0x01000000
108#define AR_TxCtlRsvd01 0x1e000000
109#define AR_TxIntrReq 0x20000000
110#define AR_DestIdxValid 0x40000000
111#define AR_CTSEnable 0x80000000
112
113#define AR_BufLen 0x00000fff
114#define AR_TxMore 0x00001000
115#define AR_DestIdx 0x000fe000
116#define AR_DestIdx_S 13
117#define AR_FrameType 0x00f00000
118#define AR_FrameType_S 20
119#define AR_NoAck 0x01000000
120#define AR_InsertTS 0x02000000
121#define AR_CorruptFCS 0x04000000
122#define AR_ExtOnly 0x08000000
123#define AR_ExtAndCtl 0x10000000
124#define AR_MoreAggr 0x20000000
125#define AR_IsAggr 0x40000000
126
127#define AR_BurstDur 0x00007fff
128#define AR_BurstDur_S 0
129#define AR_DurUpdateEna 0x00008000
130#define AR_XmitDataTries0 0x000f0000
131#define AR_XmitDataTries0_S 16
132#define AR_XmitDataTries1 0x00f00000
133#define AR_XmitDataTries1_S 20
134#define AR_XmitDataTries2 0x0f000000
135#define AR_XmitDataTries2_S 24
136#define AR_XmitDataTries3 0xf0000000
137#define AR_XmitDataTries3_S 28
138
139#define AR_XmitRate0 0x000000ff
140#define AR_XmitRate0_S 0
141#define AR_XmitRate1 0x0000ff00
142#define AR_XmitRate1_S 8
143#define AR_XmitRate2 0x00ff0000
144#define AR_XmitRate2_S 16
145#define AR_XmitRate3 0xff000000
146#define AR_XmitRate3_S 24
147
148#define AR_PacketDur0 0x00007fff
149#define AR_PacketDur0_S 0
150#define AR_RTSCTSQual0 0x00008000
151#define AR_PacketDur1 0x7fff0000
152#define AR_PacketDur1_S 16
153#define AR_RTSCTSQual1 0x80000000
154
155#define AR_PacketDur2 0x00007fff
156#define AR_PacketDur2_S 0
157#define AR_RTSCTSQual2 0x00008000
158#define AR_PacketDur3 0x7fff0000
159#define AR_PacketDur3_S 16
160#define AR_RTSCTSQual3 0x80000000
161
162#define AR_AggrLen 0x0000ffff
163#define AR_AggrLen_S 0
164#define AR_TxCtlRsvd60 0x00030000
165#define AR_PadDelim 0x03fc0000
166#define AR_PadDelim_S 18
167#define AR_EncrType 0x0c000000
168#define AR_EncrType_S 26
169#define AR_TxCtlRsvd61 0xf0000000
170
171#define AR_2040_0 0x00000001
172#define AR_GI0 0x00000002
173#define AR_ChainSel0 0x0000001c
174#define AR_ChainSel0_S 2
175#define AR_2040_1 0x00000020
176#define AR_GI1 0x00000040
177#define AR_ChainSel1 0x00000380
178#define AR_ChainSel1_S 7
179#define AR_2040_2 0x00000400
180#define AR_GI2 0x00000800
181#define AR_ChainSel2 0x00007000
182#define AR_ChainSel2_S 12
183#define AR_2040_3 0x00008000
184#define AR_GI3 0x00010000
185#define AR_ChainSel3 0x000e0000
186#define AR_ChainSel3_S 17
187#define AR_RTSCTSRate 0x0ff00000
188#define AR_RTSCTSRate_S 20
189#define AR_TxCtlRsvd70 0xf0000000
190
191#define AR_TxRSSIAnt00 0x000000ff
192#define AR_TxRSSIAnt00_S 0
193#define AR_TxRSSIAnt01 0x0000ff00
194#define AR_TxRSSIAnt01_S 8
195#define AR_TxRSSIAnt02 0x00ff0000
196#define AR_TxRSSIAnt02_S 16
197#define AR_TxStatusRsvd00 0x3f000000
198#define AR_TxBaStatus 0x40000000
199#define AR_TxStatusRsvd01 0x80000000
200
201#define AR_FrmXmitOK 0x00000001
202#define AR_ExcessiveRetries 0x00000002
203#define AR_FIFOUnderrun 0x00000004
204#define AR_Filtered 0x00000008
205#define AR_RTSFailCnt 0x000000f0
206#define AR_RTSFailCnt_S 4
207#define AR_DataFailCnt 0x00000f00
208#define AR_DataFailCnt_S 8
209#define AR_VirtRetryCnt 0x0000f000
210#define AR_VirtRetryCnt_S 12
211#define AR_TxDelimUnderrun 0x00010000
212#define AR_TxDataUnderrun 0x00020000
213#define AR_DescCfgErr 0x00040000
214#define AR_TxTimerExpired 0x00080000
215#define AR_TxStatusRsvd10 0xfff00000
216
217#define AR_SendTimestamp ds_txstatus2
218#define AR_BaBitmapLow ds_txstatus3
219#define AR_BaBitmapHigh ds_txstatus4
220
221#define AR_TxRSSIAnt10 0x000000ff
222#define AR_TxRSSIAnt10_S 0
223#define AR_TxRSSIAnt11 0x0000ff00
224#define AR_TxRSSIAnt11_S 8
225#define AR_TxRSSIAnt12 0x00ff0000
226#define AR_TxRSSIAnt12_S 16
227#define AR_TxRSSICombined 0xff000000
228#define AR_TxRSSICombined_S 24
229
230#define AR_TxEVM0 ds_txstatus5
231#define AR_TxEVM1 ds_txstatus6
232#define AR_TxEVM2 ds_txstatus7
233
234#define AR_TxDone 0x00000001
235#define AR_SeqNum 0x00001ffe
236#define AR_SeqNum_S 1
237#define AR_TxStatusRsvd80 0x0001e000
238#define AR_TxOpExceeded 0x00020000
239#define AR_TxStatusRsvd81 0x001c0000
240#define AR_FinalTxIdx 0x00600000
241#define AR_FinalTxIdx_S 21
242#define AR_TxStatusRsvd82 0x01800000
243#define AR_PowerMgmt 0x02000000
244#define AR_TxStatusRsvd83 0xfc000000
245
246#define AR_RxCTLRsvd00 0xffffffff
247
248#define AR_BufLen 0x00000fff
249#define AR_RxCtlRsvd00 0x00001000
250#define AR_RxIntrReq 0x00002000
251#define AR_RxCtlRsvd01 0xffffc000
252
253#define AR_RxRSSIAnt00 0x000000ff
254#define AR_RxRSSIAnt00_S 0
255#define AR_RxRSSIAnt01 0x0000ff00
256#define AR_RxRSSIAnt01_S 8
257#define AR_RxRSSIAnt02 0x00ff0000
258#define AR_RxRSSIAnt02_S 16
259#define AR_RxRate 0xff000000
260#define AR_RxRate_S 24
261#define AR_RxStatusRsvd00 0xff000000
262
263#define AR_DataLen 0x00000fff
264#define AR_RxMore 0x00001000
265#define AR_NumDelim 0x003fc000
266#define AR_NumDelim_S 14
267#define AR_RxStatusRsvd10 0xff800000
268
269#define AR_RcvTimestamp ds_rxstatus2
270
271#define AR_GI 0x00000001
272#define AR_2040 0x00000002
273#define AR_Parallel40 0x00000004
274#define AR_Parallel40_S 2
275#define AR_RxStatusRsvd30 0x000000f8
276#define AR_RxAntenna 0xffffff00
277#define AR_RxAntenna_S 8
278
279#define AR_RxRSSIAnt10 0x000000ff
280#define AR_RxRSSIAnt10_S 0
281#define AR_RxRSSIAnt11 0x0000ff00
282#define AR_RxRSSIAnt11_S 8
283#define AR_RxRSSIAnt12 0x00ff0000
284#define AR_RxRSSIAnt12_S 16
285#define AR_RxRSSICombined 0xff000000
286#define AR_RxRSSICombined_S 24
287
288#define AR_RxEVM0 ds_rxstatus4
289#define AR_RxEVM1 ds_rxstatus5
290#define AR_RxEVM2 ds_rxstatus6
291
292#define AR_RxDone 0x00000001
293#define AR_RxFrameOK 0x00000002
294#define AR_CRCErr 0x00000004
295#define AR_DecryptCRCErr 0x00000008
296#define AR_PHYErr 0x00000010
297#define AR_MichaelErr 0x00000020
298#define AR_PreDelimCRCErr 0x00000040
299#define AR_RxStatusRsvd70 0x00000080
300#define AR_RxKeyIdxValid 0x00000100
301#define AR_KeyIdx 0x0000fe00
302#define AR_KeyIdx_S 9
303#define AR_PHYErrCode 0x0000ff00
304#define AR_PHYErrCode_S 8
305#define AR_RxMoreAggr 0x00010000
306#define AR_RxAggr 0x00020000
307#define AR_PostDelimCRCErr 0x00040000
308#define AR_RxStatusRsvd71 0x3ff80000
309#define AR_DecryptBusyErr 0x40000000
310#define AR_KeyMiss 0x80000000
311
312#define AR5416_MAGIC 0x19641014
313
314#define RXSTATUS_RATE(ah, ads) (AR_SREV_5416_V20_OR_LATER(ah) ? \
315 MS(ads->ds_rxstatus0, AR_RxRate) : \
316 (ads->ds_rxstatus3 >> 2) & 0xFF)
317#define RXSTATUS_DUPLICATE(ah, ads) (AR_SREV_5416_V20_OR_LATER(ah) ? \
318 MS(ads->ds_rxstatus3, AR_Parallel40) : \
319 (ads->ds_rxstatus3 >> 10) & 0x1)
320
321#define set11nTries(_series, _index) \
322 (SM((_series)[_index].Tries, AR_XmitDataTries##_index))
323
324#define set11nRate(_series, _index) \
325 (SM((_series)[_index].Rate, AR_XmitRate##_index))
326
327#define set11nPktDurRTSCTS(_series, _index) \
328 (SM((_series)[_index].PktDuration, AR_PacketDur##_index) | \
329 ((_series)[_index].RateFlags & ATH9K_RATESERIES_RTS_CTS ? \
330 AR_RTSCTSQual##_index : 0))
331
332#define set11nRateFlags(_series, _index) \
333 (((_series)[_index].RateFlags & ATH9K_RATESERIES_2040 ? \
334 AR_2040_##_index : 0) \
335 |((_series)[_index].RateFlags & ATH9K_RATESERIES_HALFGI ? \
336 AR_GI##_index : 0) \
337 |SM((_series)[_index].ChSel, AR_ChainSel##_index))
338
339#define AR_SREV_9100(ah) ((ah->ah_macVersion) == AR_SREV_VERSION_9100)
340
341#define INIT_CONFIG_STATUS 0x00000000
342#define INIT_RSSI_THR 0x00000700
343#define INIT_BCON_CNTRL_REG 0x00000000
344
345#define MIN_TX_FIFO_THRESHOLD 0x1
346#define MAX_TX_FIFO_THRESHOLD ((4096 / 64) - 1)
347#define INIT_TX_FIFO_THRESHOLD MIN_TX_FIFO_THRESHOLD
348
349#define NUM_CORNER_FIX_BITS_2133 7
350#define CCK_OFDM_GAIN_DELTA 15
351
352struct ar5416AniState {
353 struct ath9k_channel c;
354 u8 noiseImmunityLevel;
355 u8 spurImmunityLevel;
356 u8 firstepLevel;
357 u8 ofdmWeakSigDetectOff;
358 u8 cckWeakSigThreshold;
359 u32 listenTime;
360 u32 ofdmTrigHigh;
361 u32 ofdmTrigLow;
362 int32_t cckTrigHigh;
363 int32_t cckTrigLow;
364 int32_t rssiThrLow;
365 int32_t rssiThrHigh;
366 u32 noiseFloor;
367 u32 txFrameCount;
368 u32 rxFrameCount;
369 u32 cycleCount;
370 u32 ofdmPhyErrCount;
371 u32 cckPhyErrCount;
372 u32 ofdmPhyErrBase;
373 u32 cckPhyErrBase;
374 int16_t pktRssi[2];
375 int16_t ofdmErrRssi[2];
376 int16_t cckErrRssi[2];
377};
378
379#define HAL_PROCESS_ANI 0x00000001
380#define HAL_RADAR_EN 0x80000000
381#define HAL_AR_EN 0x40000000
382
383#define DO_ANI(ah) \
384 ((AH5416(ah)->ah_procPhyErr & HAL_PROCESS_ANI))
385
386struct ar5416Stats {
387 u32 ast_ani_niup;
388 u32 ast_ani_nidown;
389 u32 ast_ani_spurup;
390 u32 ast_ani_spurdown;
391 u32 ast_ani_ofdmon;
392 u32 ast_ani_ofdmoff;
393 u32 ast_ani_cckhigh;
394 u32 ast_ani_ccklow;
395 u32 ast_ani_stepup;
396 u32 ast_ani_stepdown;
397 u32 ast_ani_ofdmerrs;
398 u32 ast_ani_cckerrs;
399 u32 ast_ani_reset;
400 u32 ast_ani_lzero;
401 u32 ast_ani_lneg;
402 struct ath9k_mib_stats ast_mibstats;
403 struct ath9k_node_stats ast_nodestats;
404};
405
406#define AR5416_OPFLAGS_11A 0x01
407#define AR5416_OPFLAGS_11G 0x02
408#define AR5416_OPFLAGS_N_5G_HT40 0x04
409#define AR5416_OPFLAGS_N_2G_HT40 0x08
410#define AR5416_OPFLAGS_N_5G_HT20 0x10
411#define AR5416_OPFLAGS_N_2G_HT20 0x20
412
413#define EEP_RFSILENT_ENABLED 0x0001
414#define EEP_RFSILENT_ENABLED_S 0
415#define EEP_RFSILENT_POLARITY 0x0002
416#define EEP_RFSILENT_POLARITY_S 1
417#define EEP_RFSILENT_GPIO_SEL 0x001c
418#define EEP_RFSILENT_GPIO_SEL_S 2
419
420#define AR5416_EEP_NO_BACK_VER 0x1
421#define AR5416_EEP_VER 0xE
422#define AR5416_EEP_VER_MINOR_MASK 0x0FFF
423#define AR5416_EEP_MINOR_VER_2 0x2
424#define AR5416_EEP_MINOR_VER_3 0x3
425#define AR5416_EEP_MINOR_VER_7 0x7
426#define AR5416_EEP_MINOR_VER_9 0x9
427
428#define AR5416_EEP_START_LOC 256
429#define AR5416_NUM_5G_CAL_PIERS 8
430#define AR5416_NUM_2G_CAL_PIERS 4
431#define AR5416_NUM_5G_20_TARGET_POWERS 8
432#define AR5416_NUM_5G_40_TARGET_POWERS 8
433#define AR5416_NUM_2G_CCK_TARGET_POWERS 3
434#define AR5416_NUM_2G_20_TARGET_POWERS 4
435#define AR5416_NUM_2G_40_TARGET_POWERS 4
436#define AR5416_NUM_CTLS 24
437#define AR5416_NUM_BAND_EDGES 8
438#define AR5416_NUM_PD_GAINS 4
439#define AR5416_PD_GAINS_IN_MASK 4
440#define AR5416_PD_GAIN_ICEPTS 5
441#define AR5416_EEPROM_MODAL_SPURS 5
442#define AR5416_MAX_RATE_POWER 63
443#define AR5416_NUM_PDADC_VALUES 128
444#define AR5416_NUM_RATES 16
445#define AR5416_BCHAN_UNUSED 0xFF
446#define AR5416_MAX_PWR_RANGE_IN_HALF_DB 64
447#define AR5416_EEPMISC_BIG_ENDIAN 0x01
448#define AR5416_MAX_CHAINS 3
449#define AR5416_ANT_16S 25
450
451#define AR5416_NUM_ANT_CHAIN_FIELDS 7
452#define AR5416_NUM_ANT_COMMON_FIELDS 4
453#define AR5416_SIZE_ANT_CHAIN_FIELD 3
454#define AR5416_SIZE_ANT_COMMON_FIELD 4
455#define AR5416_ANT_CHAIN_MASK 0x7
456#define AR5416_ANT_COMMON_MASK 0xf
457#define AR5416_CHAIN_0_IDX 0
458#define AR5416_CHAIN_1_IDX 1
459#define AR5416_CHAIN_2_IDX 2
460
461#define AR5416_PWR_TABLE_OFFSET -5
462#define AR5416_LEGACY_CHAINMASK 1
463
464enum eeprom_param {
465 EEP_NFTHRESH_5,
466 EEP_NFTHRESH_2,
467 EEP_MAC_MSW,
468 EEP_MAC_MID,
469 EEP_MAC_LSW,
470 EEP_REG_0,
471 EEP_REG_1,
472 EEP_OP_CAP,
473 EEP_OP_MODE,
474 EEP_RF_SILENT,
475 EEP_OB_5,
476 EEP_DB_5,
477 EEP_OB_2,
478 EEP_DB_2,
479 EEP_MINOR_REV,
480 EEP_TX_MASK,
481 EEP_RX_MASK,
482};
483
484enum ar5416_rates {
485 rate6mb, rate9mb, rate12mb, rate18mb,
486 rate24mb, rate36mb, rate48mb, rate54mb,
487 rate1l, rate2l, rate2s, rate5_5l,
488 rate5_5s, rate11l, rate11s, rateXr,
489 rateHt20_0, rateHt20_1, rateHt20_2, rateHt20_3,
490 rateHt20_4, rateHt20_5, rateHt20_6, rateHt20_7,
491 rateHt40_0, rateHt40_1, rateHt40_2, rateHt40_3,
492 rateHt40_4, rateHt40_5, rateHt40_6, rateHt40_7,
493 rateDupCck, rateDupOfdm, rateExtCck, rateExtOfdm,
494 Ar5416RateSize
495};
496
497struct base_eep_header {
498 u16 length;
499 u16 checksum;
500 u16 version;
501 u8 opCapFlags;
502 u8 eepMisc;
503 u16 regDmn[2];
504 u8 macAddr[6];
505 u8 rxMask;
506 u8 txMask;
507 u16 rfSilent;
508 u16 blueToothOptions;
509 u16 deviceCap;
510 u32 binBuildNumber;
511 u8 deviceType;
512 u8 pwdclkind;
513 u8 futureBase[32];
514} __packed;
515
516struct spur_chan {
517 u16 spurChan;
518 u8 spurRangeLow;
519 u8 spurRangeHigh;
520} __packed;
521
522struct modal_eep_header {
523 u32 antCtrlChain[AR5416_MAX_CHAINS];
524 u32 antCtrlCommon;
525 u8 antennaGainCh[AR5416_MAX_CHAINS];
526 u8 switchSettling;
527 u8 txRxAttenCh[AR5416_MAX_CHAINS];
528 u8 rxTxMarginCh[AR5416_MAX_CHAINS];
529 u8 adcDesiredSize;
530 u8 pgaDesiredSize;
531 u8 xlnaGainCh[AR5416_MAX_CHAINS];
532 u8 txEndToXpaOff;
533 u8 txEndToRxOn;
534 u8 txFrameToXpaOn;
535 u8 thresh62;
536 u8 noiseFloorThreshCh[AR5416_MAX_CHAINS];
537 u8 xpdGain;
538 u8 xpd;
539 u8 iqCalICh[AR5416_MAX_CHAINS];
540 u8 iqCalQCh[AR5416_MAX_CHAINS];
541 u8 pdGainOverlap;
542 u8 ob;
543 u8 db;
544 u8 xpaBiasLvl;
545 u8 pwrDecreaseFor2Chain;
546 u8 pwrDecreaseFor3Chain;
547 u8 txFrameToDataStart;
548 u8 txFrameToPaOn;
549 u8 ht40PowerIncForPdadc;
550 u8 bswAtten[AR5416_MAX_CHAINS];
551 u8 bswMargin[AR5416_MAX_CHAINS];
552 u8 swSettleHt40;
553 u8 xatten2Db[AR5416_MAX_CHAINS];
554 u8 xatten2Margin[AR5416_MAX_CHAINS];
555 u8 ob_ch1;
556 u8 db_ch1;
557 u8 useAnt1:1,
558 force_xpaon:1,
559 local_bias:1,
560 femBandSelectUsed:1, xlnabufin:1, xlnaisel:2, xlnabufmode:1;
561 u8 futureModalar9280;
562 u16 xpaBiasLvlFreq[3];
563 u8 futureModal[6];
564
565 struct spur_chan spurChans[AR5416_EEPROM_MODAL_SPURS];
566} __packed;
567
568struct cal_data_per_freq {
569 u8 pwrPdg[AR5416_NUM_PD_GAINS][AR5416_PD_GAIN_ICEPTS];
570 u8 vpdPdg[AR5416_NUM_PD_GAINS][AR5416_PD_GAIN_ICEPTS];
571} __packed;
572
573struct cal_target_power_leg {
574 u8 bChannel;
575 u8 tPow2x[4];
576} __packed;
577
578struct cal_target_power_ht {
579 u8 bChannel;
580 u8 tPow2x[8];
581} __packed;
582
583#ifdef __BIG_ENDIAN_BITFIELD
584struct cal_ctl_edges {
585 u8 bChannel;
586 u8 flag:2, tPower:6;
587} __packed;
588#else
589struct cal_ctl_edges {
590 u8 bChannel;
591 u8 tPower:6, flag:2;
592} __packed;
593#endif
594
595struct cal_ctl_data {
596 struct cal_ctl_edges
597 ctlEdges[AR5416_MAX_CHAINS][AR5416_NUM_BAND_EDGES];
598} __packed;
599
600struct ar5416_eeprom {
601 struct base_eep_header baseEepHeader;
602 u8 custData[64];
603 struct modal_eep_header modalHeader[2];
604 u8 calFreqPier5G[AR5416_NUM_5G_CAL_PIERS];
605 u8 calFreqPier2G[AR5416_NUM_2G_CAL_PIERS];
606 struct cal_data_per_freq
607 calPierData5G[AR5416_MAX_CHAINS][AR5416_NUM_5G_CAL_PIERS];
608 struct cal_data_per_freq
609 calPierData2G[AR5416_MAX_CHAINS][AR5416_NUM_2G_CAL_PIERS];
610 struct cal_target_power_leg
611 calTargetPower5G[AR5416_NUM_5G_20_TARGET_POWERS];
612 struct cal_target_power_ht
613 calTargetPower5GHT20[AR5416_NUM_5G_20_TARGET_POWERS];
614 struct cal_target_power_ht
615 calTargetPower5GHT40[AR5416_NUM_5G_40_TARGET_POWERS];
616 struct cal_target_power_leg
617 calTargetPowerCck[AR5416_NUM_2G_CCK_TARGET_POWERS];
618 struct cal_target_power_leg
619 calTargetPower2G[AR5416_NUM_2G_20_TARGET_POWERS];
620 struct cal_target_power_ht
621 calTargetPower2GHT20[AR5416_NUM_2G_20_TARGET_POWERS];
622 struct cal_target_power_ht
623 calTargetPower2GHT40[AR5416_NUM_2G_40_TARGET_POWERS];
624 u8 ctlIndex[AR5416_NUM_CTLS];
625 struct cal_ctl_data ctlData[AR5416_NUM_CTLS];
626 u8 padding;
627} __packed;
628
629struct ar5416IniArray {
630 u32 *ia_array;
631 u32 ia_rows;
632 u32 ia_columns;
633};
634
635#define INIT_INI_ARRAY(iniarray, array, rows, columns) do { \
636 (iniarray)->ia_array = (u32 *)(array); \
637 (iniarray)->ia_rows = (rows); \
638 (iniarray)->ia_columns = (columns); \
639 } while (0)
640
641#define INI_RA(iniarray, row, column) \
642 (((iniarray)->ia_array)[(row) * ((iniarray)->ia_columns) + (column)])
643
644#define INIT_CAL(_perCal) do { \
645 (_perCal)->calState = CAL_WAITING; \
646 (_perCal)->calNext = NULL; \
647 } while (0)
648
649#define INSERT_CAL(_ahp, _perCal) \
650 do { \
651 if ((_ahp)->ah_cal_list_last == NULL) { \
652 (_ahp)->ah_cal_list = \
653 (_ahp)->ah_cal_list_last = (_perCal); \
654 ((_ahp)->ah_cal_list_last)->calNext = (_perCal); \
655 } else { \
656 ((_ahp)->ah_cal_list_last)->calNext = (_perCal); \
657 (_ahp)->ah_cal_list_last = (_perCal); \
658 (_perCal)->calNext = (_ahp)->ah_cal_list; \
659 } \
660 } while (0)
661
662enum hal_cal_types {
663 ADC_DC_INIT_CAL = 0x1,
664 ADC_GAIN_CAL = 0x2,
665 ADC_DC_CAL = 0x4,
666 IQ_MISMATCH_CAL = 0x8
667};
668
669enum hal_cal_state {
670 CAL_INACTIVE,
671 CAL_WAITING,
672 CAL_RUNNING,
673 CAL_DONE
674};
675
676#define MIN_CAL_SAMPLES 1
677#define MAX_CAL_SAMPLES 64
678#define INIT_LOG_COUNT 5
679#define PER_MIN_LOG_COUNT 2
680#define PER_MAX_LOG_COUNT 10
681
682struct hal_percal_data {
683 enum hal_cal_types calType;
684 u32 calNumSamples;
685 u32 calCountMax;
686 void (*calCollect) (struct ath_hal *);
687 void (*calPostProc) (struct ath_hal *, u8);
688};
689
690struct hal_cal_list {
691 const struct hal_percal_data *calData;
692 enum hal_cal_state calState;
693 struct hal_cal_list *calNext;
694};
695
696struct ath_hal_5416 {
697 struct ath_hal ah;
698 struct ar5416_eeprom ah_eeprom;
699 u8 ah_macaddr[ETH_ALEN];
700 u8 ah_bssid[ETH_ALEN];
701 u8 ah_bssidmask[ETH_ALEN];
702 u16 ah_assocId;
703 int16_t ah_curchanRadIndex;
704 u32 ah_maskReg;
705 struct ar5416Stats ah_stats;
706 u32 ah_txDescMask;
707 u32 ah_txOkInterruptMask;
708 u32 ah_txErrInterruptMask;
709 u32 ah_txDescInterruptMask;
710 u32 ah_txEolInterruptMask;
711 u32 ah_txUrnInterruptMask;
712 struct ath9k_tx_queue_info ah_txq[ATH9K_NUM_TX_QUEUES];
713 enum ath9k_power_mode ah_powerMode;
714 bool ah_chipFullSleep;
715 u32 ah_atimWindow;
716 enum ath9k_ant_setting ah_diversityControl;
717 u16 ah_antennaSwitchSwap;
718 enum hal_cal_types ah_suppCals;
719 struct hal_cal_list ah_iqCalData;
720 struct hal_cal_list ah_adcGainCalData;
721 struct hal_cal_list ah_adcDcCalInitData;
722 struct hal_cal_list ah_adcDcCalData;
723 struct hal_cal_list *ah_cal_list;
724 struct hal_cal_list *ah_cal_list_last;
725 struct hal_cal_list *ah_cal_list_curr;
726#define ah_totalPowerMeasI ah_Meas0.unsign
727#define ah_totalPowerMeasQ ah_Meas1.unsign
728#define ah_totalIqCorrMeas ah_Meas2.sign
729#define ah_totalAdcIOddPhase ah_Meas0.unsign
730#define ah_totalAdcIEvenPhase ah_Meas1.unsign
731#define ah_totalAdcQOddPhase ah_Meas2.unsign
732#define ah_totalAdcQEvenPhase ah_Meas3.unsign
733#define ah_totalAdcDcOffsetIOddPhase ah_Meas0.sign
734#define ah_totalAdcDcOffsetIEvenPhase ah_Meas1.sign
735#define ah_totalAdcDcOffsetQOddPhase ah_Meas2.sign
736#define ah_totalAdcDcOffsetQEvenPhase ah_Meas3.sign
737 union {
738 u32 unsign[AR5416_MAX_CHAINS];
739 int32_t sign[AR5416_MAX_CHAINS];
740 } ah_Meas0;
741 union {
742 u32 unsign[AR5416_MAX_CHAINS];
743 int32_t sign[AR5416_MAX_CHAINS];
744 } ah_Meas1;
745 union {
746 u32 unsign[AR5416_MAX_CHAINS];
747 int32_t sign[AR5416_MAX_CHAINS];
748 } ah_Meas2;
749 union {
750 u32 unsign[AR5416_MAX_CHAINS];
751 int32_t sign[AR5416_MAX_CHAINS];
752 } ah_Meas3;
753 u16 ah_CalSamples;
754 u32 ah_tx6PowerInHalfDbm;
755 u32 ah_staId1Defaults;
756 u32 ah_miscMode;
757 bool ah_tpcEnabled;
758 u32 ah_beaconInterval;
759 enum {
760 AUTO_32KHZ,
761 USE_32KHZ,
762 DONT_USE_32KHZ,
763 } ah_enable32kHzClock;
764 u32 *ah_analogBank0Data;
765 u32 *ah_analogBank1Data;
766 u32 *ah_analogBank2Data;
767 u32 *ah_analogBank3Data;
768 u32 *ah_analogBank6Data;
769 u32 *ah_analogBank6TPCData;
770 u32 *ah_analogBank7Data;
771 u32 *ah_addac5416_21;
772 u32 *ah_bank6Temp;
773 u32 ah_ofdmTxPower;
774 int16_t ah_txPowerIndexOffset;
775 u32 ah_slottime;
776 u32 ah_acktimeout;
777 u32 ah_ctstimeout;
778 u32 ah_globaltxtimeout;
779 u8 ah_gBeaconRate;
780 u32 ah_gpioSelect;
781 u32 ah_polarity;
782 u32 ah_gpioBit;
783 bool ah_eepEnabled;
784 u32 ah_procPhyErr;
785 bool ah_hasHwPhyCounters;
786 u32 ah_aniPeriod;
787 struct ar5416AniState *ah_curani;
788 struct ar5416AniState ah_ani[255];
789 int ah_totalSizeDesired[5];
790 int ah_coarseHigh[5];
791 int ah_coarseLow[5];
792 int ah_firpwr[5];
793 u16 ah_ratesArray[16];
794 u32 ah_intrTxqs;
795 bool ah_intrMitigation;
796 u32 ah_cycleCount;
797 u32 ah_ctlBusy;
798 u32 ah_extBusy;
799 enum ath9k_ht_extprotspacing ah_extprotspacing;
800 u8 ah_txchainmask;
801 u8 ah_rxchainmask;
802 int ah_hwp;
803 void __iomem *ah_cal_mem;
804 enum ath9k_ani_cmd ah_ani_function;
805 struct ar5416IniArray ah_iniModes;
806 struct ar5416IniArray ah_iniCommon;
807 struct ar5416IniArray ah_iniBank0;
808 struct ar5416IniArray ah_iniBB_RfGain;
809 struct ar5416IniArray ah_iniBank1;
810 struct ar5416IniArray ah_iniBank2;
811 struct ar5416IniArray ah_iniBank3;
812 struct ar5416IniArray ah_iniBank6;
813 struct ar5416IniArray ah_iniBank6TPC;
814 struct ar5416IniArray ah_iniBank7;
815 struct ar5416IniArray ah_iniAddac;
816 struct ar5416IniArray ah_iniPcieSerdes;
817 struct ar5416IniArray ah_iniModesAdditional;
818};
819#define AH5416(_ah) ((struct ath_hal_5416 *)(_ah))
820
821#define FREQ2FBIN(x, y) ((y) ? ((x) - 2300) : (((x) - 4800) / 5))
822
823#define IS_5416_EMU(ah) \
824 ((ah->ah_devid == AR5416_DEVID_EMU) || \
825 (ah->ah_devid == AR5416_DEVID_EMU_PCIE))
826
827#define ar5416RfDetach(ah) do { \
828 if (AH5416(ah)->ah_rfHal.rfDetach != NULL) \
829 AH5416(ah)->ah_rfHal.rfDetach(ah); \
830 } while (0)
831
832#define ath9k_hw_use_flash(_ah) \
833 (!(_ah->ah_flags & AH_USE_EEPROM))
834
835
836#define DO_DELAY(x) do { \
837 if ((++(x) % 64) == 0) \
838 udelay(1); \
839 } while (0)
840
841#define REG_WRITE_ARRAY(iniarray, column, regWr) do { \
842 int r; \
843 for (r = 0; r < ((iniarray)->ia_rows); r++) { \
844 REG_WRITE(ah, INI_RA((iniarray), (r), 0), \
845 INI_RA((iniarray), r, (column))); \
846 DO_DELAY(regWr); \
847 } \
848 } while (0)
849
850#define BASE_ACTIVATE_DELAY 100
851#define RTC_PLL_SETTLE_DELAY 1000
852#define COEF_SCALE_S 24
853#define HT40_CHANNEL_CENTER_SHIFT 10
854
855#define ar5416CheckOpMode(_opmode) \
856 ((_opmode == ATH9K_M_STA) || (_opmode == ATH9K_M_IBSS) || \
857 (_opmode == ATH9K_M_HOSTAP) || (_opmode == ATH9K_M_MONITOR))
858
859#define AR5416_EEPROM_MAGIC_OFFSET 0x0
860
861#define AR5416_EEPROM_S 2
862#define AR5416_EEPROM_OFFSET 0x2000
863#define AR5416_EEPROM_START_ADDR \
864 (AR_SREV_9100(ah)) ? 0x1fff1000 : 0x503f1200
865#define AR5416_EEPROM_MAX 0xae0
866#define ar5416_get_eep_ver(_ahp) \
867 (((_ahp)->ah_eeprom.baseEepHeader.version >> 12) & 0xF)
868#define ar5416_get_eep_rev(_ahp) \
869 (((_ahp)->ah_eeprom.baseEepHeader.version) & 0xFFF)
870#define ar5416_get_ntxchains(_txchainmask) \
871 (((_txchainmask >> 2) & 1) + \
872 ((_txchainmask >> 1) & 1) + (_txchainmask & 1))
873
874#define IS_EEP_MINOR_V3(_ahp) \
875 (ath9k_hw_get_eeprom((_ahp), EEP_MINOR_REV) >= AR5416_EEP_MINOR_VER_3)
876
877#define FIXED_CCA_THRESHOLD 15
878
879#ifdef __BIG_ENDIAN
880#define AR5416_EEPROM_MAGIC 0x5aa5
881#else
882#define AR5416_EEPROM_MAGIC 0xa55a
883#endif
884
885#define ATH9K_POW_SM(_r, _s) (((_r) & 0x3f) << (_s))
886
887#define ATH9K_ANTENNA0_CHAINMASK 0x1
888#define ATH9K_ANTENNA1_CHAINMASK 0x2
889
890#define ATH9K_NUM_DMA_DEBUG_REGS 8
891#define ATH9K_NUM_QUEUES 10
892
893#define HAL_NOISE_IMMUNE_MAX 4
894#define HAL_SPUR_IMMUNE_MAX 7
895#define HAL_FIRST_STEP_MAX 2
896
897#define ATH9K_ANI_OFDM_TRIG_HIGH 500
898#define ATH9K_ANI_OFDM_TRIG_LOW 200
899#define ATH9K_ANI_CCK_TRIG_HIGH 200
900#define ATH9K_ANI_CCK_TRIG_LOW 100
901#define ATH9K_ANI_NOISE_IMMUNE_LVL 4
902#define ATH9K_ANI_USE_OFDM_WEAK_SIG true
903#define ATH9K_ANI_CCK_WEAK_SIG_THR false
904#define ATH9K_ANI_SPUR_IMMUNE_LVL 7
905#define ATH9K_ANI_FIRSTEP_LVL 0
906#define ATH9K_ANI_RSSI_THR_HIGH 40
907#define ATH9K_ANI_RSSI_THR_LOW 7
908#define ATH9K_ANI_PERIOD 100
909
910#define AR_GPIOD_MASK 0x00001FFF
911#define AR_GPIO_BIT(_gpio) (1 << (_gpio))
912
913#define MAX_ANALOG_START 319
914
915#define HAL_EP_RND(x, mul) \
916 ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul))
917#define BEACON_RSSI(ahp) \
918 HAL_EP_RND(ahp->ah_stats.ast_nodestats.ns_avgbrssi, \
919 ATH9K_RSSI_EP_MULTIPLIER)
920
921#define ah_mibStats ah_stats.ast_mibstats
922
923#define AH_TIMEOUT 100000
924#define AH_TIME_QUANTUM 10
925
926#define IS(_c, _f) (((_c)->channelFlags & _f) || 0)
927
928#define AR_KEYTABLE_SIZE 128
929#define POWER_UP_TIME 200000
930
931#define EXT_ADDITIVE (0x8000)
932#define CTL_11A_EXT (CTL_11A | EXT_ADDITIVE)
933#define CTL_11G_EXT (CTL_11G | EXT_ADDITIVE)
934#define CTL_11B_EXT (CTL_11B | EXT_ADDITIVE)
935
936#define SUB_NUM_CTL_MODES_AT_5G_40 2
937#define SUB_NUM_CTL_MODES_AT_2G_40 3
938#define SPUR_RSSI_THRESH 40
939
940#define TU_TO_USEC(_tu) ((_tu) << 10)
941
942#define CAB_TIMEOUT_VAL 10
943#define BEACON_TIMEOUT_VAL 10
944#define MIN_BEACON_TIMEOUT_VAL 1
945#define SLEEP_SLOP 3
946
947#define CCK_SIFS_TIME 10
948#define CCK_PREAMBLE_BITS 144
949#define CCK_PLCP_BITS 48
950
951#define OFDM_SIFS_TIME 16
952#define OFDM_PREAMBLE_TIME 20
953#define OFDM_PLCP_BITS 22
954#define OFDM_SYMBOL_TIME 4
955
956#define OFDM_SIFS_TIME_HALF 32
957#define OFDM_PREAMBLE_TIME_HALF 40
958#define OFDM_PLCP_BITS_HALF 22
959#define OFDM_SYMBOL_TIME_HALF 8
960
961#define OFDM_SIFS_TIME_QUARTER 64
962#define OFDM_PREAMBLE_TIME_QUARTER 80
963#define OFDM_PLCP_BITS_QUARTER 22
964#define OFDM_SYMBOL_TIME_QUARTER 16
965
966u32 ath9k_hw_get_eeprom(struct ath_hal_5416 *ahp,
967 enum eeprom_param param);
968
969#endif
diff --git a/drivers/net/wireless/ath9k/initvals.h b/drivers/net/wireless/ath9k/initvals.h
new file mode 100644
index 000000000000..3dd3815940a4
--- /dev/null
+++ b/drivers/net/wireless/ath9k/initvals.h
@@ -0,0 +1,3146 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17static const u32 ar5416Modes_9100[][6] = {
18 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
19 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
20 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
21 { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
22 { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
23 { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
24 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
25 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
26 { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
27 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
28 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
29 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
30 { 0x00009844, 0x1372161e, 0x1372161e, 0x137216a0, 0x137216a0, 0x137216a0 },
31 { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
32 { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
33 { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
34 { 0x00009850, 0x6de8b4e0, 0x6de8b4e0, 0x6de8b0de, 0x6de8b0de, 0x6de8b0de },
35 { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e },
36 { 0x0000985c, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e },
37 { 0x00009860, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18 },
38 { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
39 { 0x00009868, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190 },
40 { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
41 { 0x00009914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, 0x000007d0 },
42 { 0x00009918, 0x000001b8, 0x00000370, 0x00000268, 0x00000134, 0x00000134 },
43 { 0x00009924, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b },
44 { 0x00009944, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020 },
45 { 0x00009960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
46 { 0x0000a960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
47 { 0x0000b960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
48 { 0x00009964, 0x00000000, 0x00000000, 0x00001120, 0x00001120, 0x00001120 },
49 { 0x0000c9bc, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00 },
50 { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
51 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
52 { 0x000099c8, 0x60f6532c, 0x60f6532c, 0x60f6532c, 0x60f6532c, 0x60f6532c },
53 { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
54 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
55 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
56 { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
57 { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
58 { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
59 { 0x0000a20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 },
60 { 0x0000b20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 },
61 { 0x0000c20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 },
62 { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
63 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
64 { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
65 { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
66 { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
67 { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
68 { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
69 { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
70 { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
71 { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
72 { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
73 { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
74 { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
75 { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
76 { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
77 { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
78 { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
79};
80
81static const u32 ar5416Common_9100[][2] = {
82 { 0x0000000c, 0x00000000 },
83 { 0x00000030, 0x00020015 },
84 { 0x00000034, 0x00000005 },
85 { 0x00000040, 0x00000000 },
86 { 0x00000044, 0x00000008 },
87 { 0x00000048, 0x00000008 },
88 { 0x0000004c, 0x00000010 },
89 { 0x00000050, 0x00000000 },
90 { 0x00000054, 0x0000001f },
91 { 0x00000800, 0x00000000 },
92 { 0x00000804, 0x00000000 },
93 { 0x00000808, 0x00000000 },
94 { 0x0000080c, 0x00000000 },
95 { 0x00000810, 0x00000000 },
96 { 0x00000814, 0x00000000 },
97 { 0x00000818, 0x00000000 },
98 { 0x0000081c, 0x00000000 },
99 { 0x00000820, 0x00000000 },
100 { 0x00000824, 0x00000000 },
101 { 0x00001040, 0x002ffc0f },
102 { 0x00001044, 0x002ffc0f },
103 { 0x00001048, 0x002ffc0f },
104 { 0x0000104c, 0x002ffc0f },
105 { 0x00001050, 0x002ffc0f },
106 { 0x00001054, 0x002ffc0f },
107 { 0x00001058, 0x002ffc0f },
108 { 0x0000105c, 0x002ffc0f },
109 { 0x00001060, 0x002ffc0f },
110 { 0x00001064, 0x002ffc0f },
111 { 0x00001230, 0x00000000 },
112 { 0x00001270, 0x00000000 },
113 { 0x00001038, 0x00000000 },
114 { 0x00001078, 0x00000000 },
115 { 0x000010b8, 0x00000000 },
116 { 0x000010f8, 0x00000000 },
117 { 0x00001138, 0x00000000 },
118 { 0x00001178, 0x00000000 },
119 { 0x000011b8, 0x00000000 },
120 { 0x000011f8, 0x00000000 },
121 { 0x00001238, 0x00000000 },
122 { 0x00001278, 0x00000000 },
123 { 0x000012b8, 0x00000000 },
124 { 0x000012f8, 0x00000000 },
125 { 0x00001338, 0x00000000 },
126 { 0x00001378, 0x00000000 },
127 { 0x000013b8, 0x00000000 },
128 { 0x000013f8, 0x00000000 },
129 { 0x00001438, 0x00000000 },
130 { 0x00001478, 0x00000000 },
131 { 0x000014b8, 0x00000000 },
132 { 0x000014f8, 0x00000000 },
133 { 0x00001538, 0x00000000 },
134 { 0x00001578, 0x00000000 },
135 { 0x000015b8, 0x00000000 },
136 { 0x000015f8, 0x00000000 },
137 { 0x00001638, 0x00000000 },
138 { 0x00001678, 0x00000000 },
139 { 0x000016b8, 0x00000000 },
140 { 0x000016f8, 0x00000000 },
141 { 0x00001738, 0x00000000 },
142 { 0x00001778, 0x00000000 },
143 { 0x000017b8, 0x00000000 },
144 { 0x000017f8, 0x00000000 },
145 { 0x0000103c, 0x00000000 },
146 { 0x0000107c, 0x00000000 },
147 { 0x000010bc, 0x00000000 },
148 { 0x000010fc, 0x00000000 },
149 { 0x0000113c, 0x00000000 },
150 { 0x0000117c, 0x00000000 },
151 { 0x000011bc, 0x00000000 },
152 { 0x000011fc, 0x00000000 },
153 { 0x0000123c, 0x00000000 },
154 { 0x0000127c, 0x00000000 },
155 { 0x000012bc, 0x00000000 },
156 { 0x000012fc, 0x00000000 },
157 { 0x0000133c, 0x00000000 },
158 { 0x0000137c, 0x00000000 },
159 { 0x000013bc, 0x00000000 },
160 { 0x000013fc, 0x00000000 },
161 { 0x0000143c, 0x00000000 },
162 { 0x0000147c, 0x00000000 },
163 { 0x00004030, 0x00000002 },
164 { 0x0000403c, 0x00000002 },
165 { 0x00007010, 0x00000000 },
166 { 0x00007038, 0x000004c2 },
167 { 0x00008004, 0x00000000 },
168 { 0x00008008, 0x00000000 },
169 { 0x0000800c, 0x00000000 },
170 { 0x00008018, 0x00000700 },
171 { 0x00008020, 0x00000000 },
172 { 0x00008038, 0x00000000 },
173 { 0x0000803c, 0x00000000 },
174 { 0x00008048, 0x40000000 },
175 { 0x00008054, 0x00000000 },
176 { 0x00008058, 0x00000000 },
177 { 0x0000805c, 0x000fc78f },
178 { 0x00008060, 0x0000000f },
179 { 0x00008064, 0x00000000 },
180 { 0x000080c0, 0x2a82301a },
181 { 0x000080c4, 0x05dc01e0 },
182 { 0x000080c8, 0x1f402710 },
183 { 0x000080cc, 0x01f40000 },
184 { 0x000080d0, 0x00001e00 },
185 { 0x000080d4, 0x00000000 },
186 { 0x000080d8, 0x00400000 },
187 { 0x000080e0, 0xffffffff },
188 { 0x000080e4, 0x0000ffff },
189 { 0x000080e8, 0x003f3f3f },
190 { 0x000080ec, 0x00000000 },
191 { 0x000080f0, 0x00000000 },
192 { 0x000080f4, 0x00000000 },
193 { 0x000080f8, 0x00000000 },
194 { 0x000080fc, 0x00020000 },
195 { 0x00008100, 0x00020000 },
196 { 0x00008104, 0x00000001 },
197 { 0x00008108, 0x00000052 },
198 { 0x0000810c, 0x00000000 },
199 { 0x00008110, 0x00000168 },
200 { 0x00008118, 0x000100aa },
201 { 0x0000811c, 0x00003210 },
202 { 0x00008120, 0x08f04800 },
203 { 0x00008124, 0x00000000 },
204 { 0x00008128, 0x00000000 },
205 { 0x0000812c, 0x00000000 },
206 { 0x00008130, 0x00000000 },
207 { 0x00008134, 0x00000000 },
208 { 0x00008138, 0x00000000 },
209 { 0x0000813c, 0x00000000 },
210 { 0x00008144, 0x00000000 },
211 { 0x00008168, 0x00000000 },
212 { 0x0000816c, 0x00000000 },
213 { 0x00008170, 0x32143320 },
214 { 0x00008174, 0xfaa4fa50 },
215 { 0x00008178, 0x00000100 },
216 { 0x0000817c, 0x00000000 },
217 { 0x000081c4, 0x00000000 },
218 { 0x000081d0, 0x00003210 },
219 { 0x000081ec, 0x00000000 },
220 { 0x000081f0, 0x00000000 },
221 { 0x000081f4, 0x00000000 },
222 { 0x000081f8, 0x00000000 },
223 { 0x000081fc, 0x00000000 },
224 { 0x00008200, 0x00000000 },
225 { 0x00008204, 0x00000000 },
226 { 0x00008208, 0x00000000 },
227 { 0x0000820c, 0x00000000 },
228 { 0x00008210, 0x00000000 },
229 { 0x00008214, 0x00000000 },
230 { 0x00008218, 0x00000000 },
231 { 0x0000821c, 0x00000000 },
232 { 0x00008220, 0x00000000 },
233 { 0x00008224, 0x00000000 },
234 { 0x00008228, 0x00000000 },
235 { 0x0000822c, 0x00000000 },
236 { 0x00008230, 0x00000000 },
237 { 0x00008234, 0x00000000 },
238 { 0x00008238, 0x00000000 },
239 { 0x0000823c, 0x00000000 },
240 { 0x00008240, 0x00100000 },
241 { 0x00008244, 0x0010f400 },
242 { 0x00008248, 0x00000100 },
243 { 0x0000824c, 0x0001e800 },
244 { 0x00008250, 0x00000000 },
245 { 0x00008254, 0x00000000 },
246 { 0x00008258, 0x00000000 },
247 { 0x0000825c, 0x400000ff },
248 { 0x00008260, 0x00080922 },
249 { 0x00008270, 0x00000000 },
250 { 0x00008274, 0x40000000 },
251 { 0x00008278, 0x003e4180 },
252 { 0x0000827c, 0x00000000 },
253 { 0x00008284, 0x0000002c },
254 { 0x00008288, 0x0000002c },
255 { 0x0000828c, 0x00000000 },
256 { 0x00008294, 0x00000000 },
257 { 0x00008298, 0x00000000 },
258 { 0x00008300, 0x00000000 },
259 { 0x00008304, 0x00000000 },
260 { 0x00008308, 0x00000000 },
261 { 0x0000830c, 0x00000000 },
262 { 0x00008310, 0x00000000 },
263 { 0x00008314, 0x00000000 },
264 { 0x00008318, 0x00000000 },
265 { 0x00008328, 0x00000000 },
266 { 0x0000832c, 0x00000007 },
267 { 0x00008330, 0x00000302 },
268 { 0x00008334, 0x00000e00 },
269 { 0x00008338, 0x00000000 },
270 { 0x0000833c, 0x00000000 },
271 { 0x00008340, 0x000107ff },
272 { 0x00009808, 0x00000000 },
273 { 0x0000980c, 0xad848e19 },
274 { 0x00009810, 0x7d14e000 },
275 { 0x00009814, 0x9c0a9f6b },
276 { 0x0000981c, 0x00000000 },
277 { 0x0000982c, 0x0000a000 },
278 { 0x00009830, 0x00000000 },
279 { 0x0000983c, 0x00200400 },
280 { 0x00009840, 0x206a002e },
281 { 0x0000984c, 0x1284233c },
282 { 0x00009854, 0x00000859 },
283 { 0x00009900, 0x00000000 },
284 { 0x00009904, 0x00000000 },
285 { 0x00009908, 0x00000000 },
286 { 0x0000990c, 0x00000000 },
287 { 0x0000991c, 0x10000fff },
288 { 0x00009920, 0x05100000 },
289 { 0x0000a920, 0x05100000 },
290 { 0x0000b920, 0x05100000 },
291 { 0x00009928, 0x00000001 },
292 { 0x0000992c, 0x00000004 },
293 { 0x00009934, 0x1e1f2022 },
294 { 0x00009938, 0x0a0b0c0d },
295 { 0x0000993c, 0x00000000 },
296 { 0x00009948, 0x9280b212 },
297 { 0x0000994c, 0x00020028 },
298 { 0x00009954, 0x5d50e188 },
299 { 0x00009958, 0x00081fff },
300 { 0x0000c95c, 0x004b6a8e },
301 { 0x0000c968, 0x000003ce },
302 { 0x00009970, 0x190fb515 },
303 { 0x00009974, 0x00000000 },
304 { 0x00009978, 0x00000001 },
305 { 0x0000997c, 0x00000000 },
306 { 0x00009980, 0x00000000 },
307 { 0x00009984, 0x00000000 },
308 { 0x00009988, 0x00000000 },
309 { 0x0000998c, 0x00000000 },
310 { 0x00009990, 0x00000000 },
311 { 0x00009994, 0x00000000 },
312 { 0x00009998, 0x00000000 },
313 { 0x0000999c, 0x00000000 },
314 { 0x000099a0, 0x00000000 },
315 { 0x000099a4, 0x00000001 },
316 { 0x000099a8, 0x001fff00 },
317 { 0x000099ac, 0x00000000 },
318 { 0x000099b0, 0x03051000 },
319 { 0x000099dc, 0x00000000 },
320 { 0x000099e0, 0x00000200 },
321 { 0x000099e4, 0xaaaaaaaa },
322 { 0x000099e8, 0x3c466478 },
323 { 0x000099ec, 0x000000aa },
324 { 0x000099fc, 0x00001042 },
325 { 0x00009b00, 0x00000000 },
326 { 0x00009b04, 0x00000001 },
327 { 0x00009b08, 0x00000002 },
328 { 0x00009b0c, 0x00000003 },
329 { 0x00009b10, 0x00000004 },
330 { 0x00009b14, 0x00000005 },
331 { 0x00009b18, 0x00000008 },
332 { 0x00009b1c, 0x00000009 },
333 { 0x00009b20, 0x0000000a },
334 { 0x00009b24, 0x0000000b },
335 { 0x00009b28, 0x0000000c },
336 { 0x00009b2c, 0x0000000d },
337 { 0x00009b30, 0x00000010 },
338 { 0x00009b34, 0x00000011 },
339 { 0x00009b38, 0x00000012 },
340 { 0x00009b3c, 0x00000013 },
341 { 0x00009b40, 0x00000014 },
342 { 0x00009b44, 0x00000015 },
343 { 0x00009b48, 0x00000018 },
344 { 0x00009b4c, 0x00000019 },
345 { 0x00009b50, 0x0000001a },
346 { 0x00009b54, 0x0000001b },
347 { 0x00009b58, 0x0000001c },
348 { 0x00009b5c, 0x0000001d },
349 { 0x00009b60, 0x00000020 },
350 { 0x00009b64, 0x00000021 },
351 { 0x00009b68, 0x00000022 },
352 { 0x00009b6c, 0x00000023 },
353 { 0x00009b70, 0x00000024 },
354 { 0x00009b74, 0x00000025 },
355 { 0x00009b78, 0x00000028 },
356 { 0x00009b7c, 0x00000029 },
357 { 0x00009b80, 0x0000002a },
358 { 0x00009b84, 0x0000002b },
359 { 0x00009b88, 0x0000002c },
360 { 0x00009b8c, 0x0000002d },
361 { 0x00009b90, 0x00000030 },
362 { 0x00009b94, 0x00000031 },
363 { 0x00009b98, 0x00000032 },
364 { 0x00009b9c, 0x00000033 },
365 { 0x00009ba0, 0x00000034 },
366 { 0x00009ba4, 0x00000035 },
367 { 0x00009ba8, 0x00000035 },
368 { 0x00009bac, 0x00000035 },
369 { 0x00009bb0, 0x00000035 },
370 { 0x00009bb4, 0x00000035 },
371 { 0x00009bb8, 0x00000035 },
372 { 0x00009bbc, 0x00000035 },
373 { 0x00009bc0, 0x00000035 },
374 { 0x00009bc4, 0x00000035 },
375 { 0x00009bc8, 0x00000035 },
376 { 0x00009bcc, 0x00000035 },
377 { 0x00009bd0, 0x00000035 },
378 { 0x00009bd4, 0x00000035 },
379 { 0x00009bd8, 0x00000035 },
380 { 0x00009bdc, 0x00000035 },
381 { 0x00009be0, 0x00000035 },
382 { 0x00009be4, 0x00000035 },
383 { 0x00009be8, 0x00000035 },
384 { 0x00009bec, 0x00000035 },
385 { 0x00009bf0, 0x00000035 },
386 { 0x00009bf4, 0x00000035 },
387 { 0x00009bf8, 0x00000010 },
388 { 0x00009bfc, 0x0000001a },
389 { 0x0000a210, 0x40806333 },
390 { 0x0000a214, 0x00106c10 },
391 { 0x0000a218, 0x009c4060 },
392 { 0x0000a220, 0x018830c6 },
393 { 0x0000a224, 0x00000400 },
394 { 0x0000a228, 0x00000bb5 },
395 { 0x0000a22c, 0x00000011 },
396 { 0x0000a234, 0x20202020 },
397 { 0x0000a238, 0x20202020 },
398 { 0x0000a23c, 0x13c889af },
399 { 0x0000a240, 0x38490a20 },
400 { 0x0000a244, 0x00007bb6 },
401 { 0x0000a248, 0x0fff3ffc },
402 { 0x0000a24c, 0x00000001 },
403 { 0x0000a250, 0x0000a000 },
404 { 0x0000a254, 0x00000000 },
405 { 0x0000a258, 0x0cc75380 },
406 { 0x0000a25c, 0x0f0f0f01 },
407 { 0x0000a260, 0xdfa91f01 },
408 { 0x0000a268, 0x00000000 },
409 { 0x0000a26c, 0x0ebae9c6 },
410 { 0x0000b26c, 0x0ebae9c6 },
411 { 0x0000c26c, 0x0ebae9c6 },
412 { 0x0000d270, 0x00820820 },
413 { 0x0000a278, 0x1ce739ce },
414 { 0x0000a27c, 0x051701ce },
415 { 0x0000a338, 0x00000000 },
416 { 0x0000a33c, 0x00000000 },
417 { 0x0000a340, 0x00000000 },
418 { 0x0000a344, 0x00000000 },
419 { 0x0000a348, 0x3fffffff },
420 { 0x0000a34c, 0x3fffffff },
421 { 0x0000a350, 0x3fffffff },
422 { 0x0000a354, 0x0003ffff },
423 { 0x0000a358, 0x79a8aa1f },
424 { 0x0000d35c, 0x07ffffef },
425 { 0x0000d360, 0x0fffffe7 },
426 { 0x0000d364, 0x17ffffe5 },
427 { 0x0000d368, 0x1fffffe4 },
428 { 0x0000d36c, 0x37ffffe3 },
429 { 0x0000d370, 0x3fffffe3 },
430 { 0x0000d374, 0x57ffffe3 },
431 { 0x0000d378, 0x5fffffe2 },
432 { 0x0000d37c, 0x7fffffe2 },
433 { 0x0000d380, 0x7f3c7bba },
434 { 0x0000d384, 0xf3307ff0 },
435 { 0x0000a388, 0x08000000 },
436 { 0x0000a38c, 0x20202020 },
437 { 0x0000a390, 0x20202020 },
438 { 0x0000a394, 0x1ce739ce },
439 { 0x0000a398, 0x000001ce },
440 { 0x0000a39c, 0x00000001 },
441 { 0x0000a3a0, 0x00000000 },
442 { 0x0000a3a4, 0x00000000 },
443 { 0x0000a3a8, 0x00000000 },
444 { 0x0000a3ac, 0x00000000 },
445 { 0x0000a3b0, 0x00000000 },
446 { 0x0000a3b4, 0x00000000 },
447 { 0x0000a3b8, 0x00000000 },
448 { 0x0000a3bc, 0x00000000 },
449 { 0x0000a3c0, 0x00000000 },
450 { 0x0000a3c4, 0x00000000 },
451 { 0x0000a3c8, 0x00000246 },
452 { 0x0000a3cc, 0x20202020 },
453 { 0x0000a3d0, 0x20202020 },
454 { 0x0000a3d4, 0x20202020 },
455 { 0x0000a3dc, 0x1ce739ce },
456 { 0x0000a3e0, 0x000001ce },
457};
458
459static const u32 ar5416Bank0_9100[][2] = {
460 { 0x000098b0, 0x1e5795e5 },
461 { 0x000098e0, 0x02008020 },
462};
463
464static const u32 ar5416BB_RfGain_9100[][3] = {
465 { 0x00009a00, 0x00000000, 0x00000000 },
466 { 0x00009a04, 0x00000040, 0x00000040 },
467 { 0x00009a08, 0x00000080, 0x00000080 },
468 { 0x00009a0c, 0x000001a1, 0x00000141 },
469 { 0x00009a10, 0x000001e1, 0x00000181 },
470 { 0x00009a14, 0x00000021, 0x000001c1 },
471 { 0x00009a18, 0x00000061, 0x00000001 },
472 { 0x00009a1c, 0x00000168, 0x00000041 },
473 { 0x00009a20, 0x000001a8, 0x000001a8 },
474 { 0x00009a24, 0x000001e8, 0x000001e8 },
475 { 0x00009a28, 0x00000028, 0x00000028 },
476 { 0x00009a2c, 0x00000068, 0x00000068 },
477 { 0x00009a30, 0x00000189, 0x000000a8 },
478 { 0x00009a34, 0x000001c9, 0x00000169 },
479 { 0x00009a38, 0x00000009, 0x000001a9 },
480 { 0x00009a3c, 0x00000049, 0x000001e9 },
481 { 0x00009a40, 0x00000089, 0x00000029 },
482 { 0x00009a44, 0x00000170, 0x00000069 },
483 { 0x00009a48, 0x000001b0, 0x00000190 },
484 { 0x00009a4c, 0x000001f0, 0x000001d0 },
485 { 0x00009a50, 0x00000030, 0x00000010 },
486 { 0x00009a54, 0x00000070, 0x00000050 },
487 { 0x00009a58, 0x00000191, 0x00000090 },
488 { 0x00009a5c, 0x000001d1, 0x00000151 },
489 { 0x00009a60, 0x00000011, 0x00000191 },
490 { 0x00009a64, 0x00000051, 0x000001d1 },
491 { 0x00009a68, 0x00000091, 0x00000011 },
492 { 0x00009a6c, 0x000001b8, 0x00000051 },
493 { 0x00009a70, 0x000001f8, 0x00000198 },
494 { 0x00009a74, 0x00000038, 0x000001d8 },
495 { 0x00009a78, 0x00000078, 0x00000018 },
496 { 0x00009a7c, 0x00000199, 0x00000058 },
497 { 0x00009a80, 0x000001d9, 0x00000098 },
498 { 0x00009a84, 0x00000019, 0x00000159 },
499 { 0x00009a88, 0x00000059, 0x00000199 },
500 { 0x00009a8c, 0x00000099, 0x000001d9 },
501 { 0x00009a90, 0x000000d9, 0x00000019 },
502 { 0x00009a94, 0x000000f9, 0x00000059 },
503 { 0x00009a98, 0x000000f9, 0x00000099 },
504 { 0x00009a9c, 0x000000f9, 0x000000d9 },
505 { 0x00009aa0, 0x000000f9, 0x000000f9 },
506 { 0x00009aa4, 0x000000f9, 0x000000f9 },
507 { 0x00009aa8, 0x000000f9, 0x000000f9 },
508 { 0x00009aac, 0x000000f9, 0x000000f9 },
509 { 0x00009ab0, 0x000000f9, 0x000000f9 },
510 { 0x00009ab4, 0x000000f9, 0x000000f9 },
511 { 0x00009ab8, 0x000000f9, 0x000000f9 },
512 { 0x00009abc, 0x000000f9, 0x000000f9 },
513 { 0x00009ac0, 0x000000f9, 0x000000f9 },
514 { 0x00009ac4, 0x000000f9, 0x000000f9 },
515 { 0x00009ac8, 0x000000f9, 0x000000f9 },
516 { 0x00009acc, 0x000000f9, 0x000000f9 },
517 { 0x00009ad0, 0x000000f9, 0x000000f9 },
518 { 0x00009ad4, 0x000000f9, 0x000000f9 },
519 { 0x00009ad8, 0x000000f9, 0x000000f9 },
520 { 0x00009adc, 0x000000f9, 0x000000f9 },
521 { 0x00009ae0, 0x000000f9, 0x000000f9 },
522 { 0x00009ae4, 0x000000f9, 0x000000f9 },
523 { 0x00009ae8, 0x000000f9, 0x000000f9 },
524 { 0x00009aec, 0x000000f9, 0x000000f9 },
525 { 0x00009af0, 0x000000f9, 0x000000f9 },
526 { 0x00009af4, 0x000000f9, 0x000000f9 },
527 { 0x00009af8, 0x000000f9, 0x000000f9 },
528 { 0x00009afc, 0x000000f9, 0x000000f9 },
529};
530
531static const u32 ar5416Bank1_9100[][2] = {
532 { 0x000098b0, 0x02108421 },
533 { 0x000098ec, 0x00000008 },
534};
535
536static const u32 ar5416Bank2_9100[][2] = {
537 { 0x000098b0, 0x0e73ff17 },
538 { 0x000098e0, 0x00000420 },
539};
540
541static const u32 ar5416Bank3_9100[][3] = {
542 { 0x000098f0, 0x01400018, 0x01c00018 },
543};
544
545static const u32 ar5416Bank6_9100[][3] = {
546
547 { 0x0000989c, 0x00000000, 0x00000000 },
548 { 0x0000989c, 0x00000000, 0x00000000 },
549 { 0x0000989c, 0x00000000, 0x00000000 },
550 { 0x0000989c, 0x00e00000, 0x00e00000 },
551 { 0x0000989c, 0x005e0000, 0x005e0000 },
552 { 0x0000989c, 0x00120000, 0x00120000 },
553 { 0x0000989c, 0x00620000, 0x00620000 },
554 { 0x0000989c, 0x00020000, 0x00020000 },
555 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
556 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
557 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
558 { 0x0000989c, 0x40ff0000, 0x40ff0000 },
559 { 0x0000989c, 0x005f0000, 0x005f0000 },
560 { 0x0000989c, 0x00870000, 0x00870000 },
561 { 0x0000989c, 0x00f90000, 0x00f90000 },
562 { 0x0000989c, 0x007b0000, 0x007b0000 },
563 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
564 { 0x0000989c, 0x00f50000, 0x00f50000 },
565 { 0x0000989c, 0x00dc0000, 0x00dc0000 },
566 { 0x0000989c, 0x00110000, 0x00110000 },
567 { 0x0000989c, 0x006100a8, 0x006100a8 },
568 { 0x0000989c, 0x004210a2, 0x004210a2 },
569 { 0x0000989c, 0x0014008f, 0x0014008f },
570 { 0x0000989c, 0x00c40003, 0x00c40003 },
571 { 0x0000989c, 0x003000f2, 0x003000f2 },
572 { 0x0000989c, 0x00440016, 0x00440016 },
573 { 0x0000989c, 0x00410040, 0x00410040 },
574 { 0x0000989c, 0x0001805e, 0x0001805e },
575 { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
576 { 0x0000989c, 0x000000f1, 0x000000f1 },
577 { 0x0000989c, 0x00002081, 0x00002081 },
578 { 0x0000989c, 0x000000d4, 0x000000d4 },
579 { 0x000098d0, 0x0000000f, 0x0010000f },
580};
581
582static const u32 ar5416Bank6TPC_9100[][3] = {
583 { 0x0000989c, 0x00000000, 0x00000000 },
584 { 0x0000989c, 0x00000000, 0x00000000 },
585 { 0x0000989c, 0x00000000, 0x00000000 },
586 { 0x0000989c, 0x00e00000, 0x00e00000 },
587 { 0x0000989c, 0x005e0000, 0x005e0000 },
588 { 0x0000989c, 0x00120000, 0x00120000 },
589 { 0x0000989c, 0x00620000, 0x00620000 },
590 { 0x0000989c, 0x00020000, 0x00020000 },
591 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
592 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
593 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
594 { 0x0000989c, 0x40ff0000, 0x40ff0000 },
595 { 0x0000989c, 0x005f0000, 0x005f0000 },
596 { 0x0000989c, 0x00870000, 0x00870000 },
597 { 0x0000989c, 0x00f90000, 0x00f90000 },
598 { 0x0000989c, 0x007b0000, 0x007b0000 },
599 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
600 { 0x0000989c, 0x00f50000, 0x00f50000 },
601 { 0x0000989c, 0x00dc0000, 0x00dc0000 },
602 { 0x0000989c, 0x00110000, 0x00110000 },
603 { 0x0000989c, 0x006100a8, 0x006100a8 },
604 { 0x0000989c, 0x00423022, 0x00423022 },
605 { 0x0000989c, 0x201400df, 0x201400df },
606 { 0x0000989c, 0x00c40002, 0x00c40002 },
607 { 0x0000989c, 0x003000f2, 0x003000f2 },
608 { 0x0000989c, 0x00440016, 0x00440016 },
609 { 0x0000989c, 0x00410040, 0x00410040 },
610 { 0x0000989c, 0x0001805e, 0x0001805e },
611 { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
612 { 0x0000989c, 0x000000e1, 0x000000e1 },
613 { 0x0000989c, 0x00007081, 0x00007081 },
614 { 0x0000989c, 0x000000d4, 0x000000d4 },
615 { 0x000098d0, 0x0000000f, 0x0010000f },
616};
617
618static const u32 ar5416Bank7_9100[][2] = {
619 { 0x0000989c, 0x00000500 },
620 { 0x0000989c, 0x00000800 },
621 { 0x000098cc, 0x0000000e },
622};
623
624static const u32 ar5416Addac_9100[][2] = {
625 {0x0000989c, 0x00000000 },
626 {0x0000989c, 0x00000003 },
627 {0x0000989c, 0x00000000 },
628 {0x0000989c, 0x0000000c },
629 {0x0000989c, 0x00000000 },
630 {0x0000989c, 0x00000030 },
631 {0x0000989c, 0x00000000 },
632 {0x0000989c, 0x00000000 },
633 {0x0000989c, 0x00000000 },
634 {0x0000989c, 0x00000000 },
635 {0x0000989c, 0x00000000 },
636 {0x0000989c, 0x00000000 },
637 {0x0000989c, 0x00000000 },
638 {0x0000989c, 0x00000000 },
639 {0x0000989c, 0x00000000 },
640 {0x0000989c, 0x00000000 },
641 {0x0000989c, 0x00000000 },
642 {0x0000989c, 0x00000000 },
643 {0x0000989c, 0x00000060 },
644 {0x0000989c, 0x00000000 },
645 {0x0000989c, 0x00000000 },
646 {0x0000989c, 0x00000000 },
647 {0x0000989c, 0x00000000 },
648 {0x0000989c, 0x00000000 },
649 {0x0000989c, 0x00000000 },
650 {0x0000989c, 0x00000000 },
651 {0x0000989c, 0x00000000 },
652 {0x0000989c, 0x00000000 },
653 {0x0000989c, 0x00000000 },
654 {0x0000989c, 0x00000000 },
655 {0x0000989c, 0x00000000 },
656 {0x0000989c, 0x00000058 },
657 {0x0000989c, 0x00000000 },
658 {0x0000989c, 0x00000000 },
659 {0x0000989c, 0x00000000 },
660 {0x0000989c, 0x00000000 },
661 {0x000098c4, 0x00000000 },
662};
663
664static const u32 ar5416Modes[][6] = {
665 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
666 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
667 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
668 { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
669 { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
670 { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
671 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
672 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
673 { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
674 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
675 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
676 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
677 { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 },
678 { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
679 { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
680 { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
681 { 0x00009850, 0x6d48b4e2, 0x6d48b4e2, 0x6d48b0e2, 0x6d48b0e2, 0x6d48b0e2 },
682 { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec86d2e, 0x7ec84d2e, 0x7ec82d2e },
683 { 0x0000985c, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e },
684 { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 },
685 { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
686 { 0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0 },
687 { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
688 { 0x00009914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, 0x000007d0 },
689 { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
690 { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a11, 0xd00a8a0d, 0xd00a8a0d },
691 { 0x00009940, 0x00754604, 0x00754604, 0xfff81204, 0xfff81204, 0xfff81204 },
692 { 0x00009944, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020 },
693 { 0x00009954, 0x5f3ca3de, 0x5f3ca3de, 0xe250a51e, 0xe250a51e, 0xe250a51e },
694 { 0x00009958, 0x2108ecff, 0x2108ecff, 0x3388ffff, 0x3388ffff, 0x3388ffff },
695#ifdef TB243
696 { 0x00009960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
697 { 0x0000a960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
698 { 0x0000b960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
699 { 0x00009964, 0x00000000, 0x00000000, 0x00002210, 0x00002210, 0x00001120 },
700#else
701 { 0x00009960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
702 { 0x0000a960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
703 { 0x0000b960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
704 { 0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, 0x00001120 },
705#endif
706 { 0x0000c9bc, 0x001a0600, 0x001a0600, 0x001a1000, 0x001a0c00, 0x001a0c00 },
707 { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
708 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
709 { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 },
710 { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
711 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
712 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
713 { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
714 { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
715 { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
716 { 0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
717 { 0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
718 { 0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
719 { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
720 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
721 { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
722 { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
723 { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
724 { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
725 { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
726 { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
727 { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
728 { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
729 { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
730 { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
731 { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
732 { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
733 { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
734 { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
735 { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
736};
737
738static const u32 ar5416Common[][2] = {
739 { 0x0000000c, 0x00000000 },
740 { 0x00000030, 0x00020015 },
741 { 0x00000034, 0x00000005 },
742 { 0x00000040, 0x00000000 },
743 { 0x00000044, 0x00000008 },
744 { 0x00000048, 0x00000008 },
745 { 0x0000004c, 0x00000010 },
746 { 0x00000050, 0x00000000 },
747 { 0x00000054, 0x0000001f },
748 { 0x00000800, 0x00000000 },
749 { 0x00000804, 0x00000000 },
750 { 0x00000808, 0x00000000 },
751 { 0x0000080c, 0x00000000 },
752 { 0x00000810, 0x00000000 },
753 { 0x00000814, 0x00000000 },
754 { 0x00000818, 0x00000000 },
755 { 0x0000081c, 0x00000000 },
756 { 0x00000820, 0x00000000 },
757 { 0x00000824, 0x00000000 },
758 { 0x00001040, 0x002ffc0f },
759 { 0x00001044, 0x002ffc0f },
760 { 0x00001048, 0x002ffc0f },
761 { 0x0000104c, 0x002ffc0f },
762 { 0x00001050, 0x002ffc0f },
763 { 0x00001054, 0x002ffc0f },
764 { 0x00001058, 0x002ffc0f },
765 { 0x0000105c, 0x002ffc0f },
766 { 0x00001060, 0x002ffc0f },
767 { 0x00001064, 0x002ffc0f },
768 { 0x00001230, 0x00000000 },
769 { 0x00001270, 0x00000000 },
770 { 0x00001038, 0x00000000 },
771 { 0x00001078, 0x00000000 },
772 { 0x000010b8, 0x00000000 },
773 { 0x000010f8, 0x00000000 },
774 { 0x00001138, 0x00000000 },
775 { 0x00001178, 0x00000000 },
776 { 0x000011b8, 0x00000000 },
777 { 0x000011f8, 0x00000000 },
778 { 0x00001238, 0x00000000 },
779 { 0x00001278, 0x00000000 },
780 { 0x000012b8, 0x00000000 },
781 { 0x000012f8, 0x00000000 },
782 { 0x00001338, 0x00000000 },
783 { 0x00001378, 0x00000000 },
784 { 0x000013b8, 0x00000000 },
785 { 0x000013f8, 0x00000000 },
786 { 0x00001438, 0x00000000 },
787 { 0x00001478, 0x00000000 },
788 { 0x000014b8, 0x00000000 },
789 { 0x000014f8, 0x00000000 },
790 { 0x00001538, 0x00000000 },
791 { 0x00001578, 0x00000000 },
792 { 0x000015b8, 0x00000000 },
793 { 0x000015f8, 0x00000000 },
794 { 0x00001638, 0x00000000 },
795 { 0x00001678, 0x00000000 },
796 { 0x000016b8, 0x00000000 },
797 { 0x000016f8, 0x00000000 },
798 { 0x00001738, 0x00000000 },
799 { 0x00001778, 0x00000000 },
800 { 0x000017b8, 0x00000000 },
801 { 0x000017f8, 0x00000000 },
802 { 0x0000103c, 0x00000000 },
803 { 0x0000107c, 0x00000000 },
804 { 0x000010bc, 0x00000000 },
805 { 0x000010fc, 0x00000000 },
806 { 0x0000113c, 0x00000000 },
807 { 0x0000117c, 0x00000000 },
808 { 0x000011bc, 0x00000000 },
809 { 0x000011fc, 0x00000000 },
810 { 0x0000123c, 0x00000000 },
811 { 0x0000127c, 0x00000000 },
812 { 0x000012bc, 0x00000000 },
813 { 0x000012fc, 0x00000000 },
814 { 0x0000133c, 0x00000000 },
815 { 0x0000137c, 0x00000000 },
816 { 0x000013bc, 0x00000000 },
817 { 0x000013fc, 0x00000000 },
818 { 0x0000143c, 0x00000000 },
819 { 0x0000147c, 0x00000000 },
820 { 0x00020010, 0x00000003 },
821 { 0x00020038, 0x000004c2 },
822 { 0x00008004, 0x00000000 },
823 { 0x00008008, 0x00000000 },
824 { 0x0000800c, 0x00000000 },
825 { 0x00008018, 0x00000700 },
826 { 0x00008020, 0x00000000 },
827 { 0x00008038, 0x00000000 },
828 { 0x0000803c, 0x00000000 },
829 { 0x00008048, 0x40000000 },
830 { 0x00008054, 0x00004000 },
831 { 0x00008058, 0x00000000 },
832 { 0x0000805c, 0x000fc78f },
833 { 0x00008060, 0x0000000f },
834 { 0x00008064, 0x00000000 },
835 { 0x000080c0, 0x2a82301a },
836 { 0x000080c4, 0x05dc01e0 },
837 { 0x000080c8, 0x1f402710 },
838 { 0x000080cc, 0x01f40000 },
839 { 0x000080d0, 0x00001e00 },
840 { 0x000080d4, 0x00000000 },
841 { 0x000080d8, 0x00400000 },
842 { 0x000080e0, 0xffffffff },
843 { 0x000080e4, 0x0000ffff },
844 { 0x000080e8, 0x003f3f3f },
845 { 0x000080ec, 0x00000000 },
846 { 0x000080f0, 0x00000000 },
847 { 0x000080f4, 0x00000000 },
848 { 0x000080f8, 0x00000000 },
849 { 0x000080fc, 0x00020000 },
850 { 0x00008100, 0x00020000 },
851 { 0x00008104, 0x00000001 },
852 { 0x00008108, 0x00000052 },
853 { 0x0000810c, 0x00000000 },
854 { 0x00008110, 0x00000168 },
855 { 0x00008118, 0x000100aa },
856 { 0x0000811c, 0x00003210 },
857 { 0x00008120, 0x08f04800 },
858 { 0x00008124, 0x00000000 },
859 { 0x00008128, 0x00000000 },
860 { 0x0000812c, 0x00000000 },
861 { 0x00008130, 0x00000000 },
862 { 0x00008134, 0x00000000 },
863 { 0x00008138, 0x00000000 },
864 { 0x0000813c, 0x00000000 },
865 { 0x00008144, 0x00000000 },
866 { 0x00008168, 0x00000000 },
867 { 0x0000816c, 0x00000000 },
868 { 0x00008170, 0x32143320 },
869 { 0x00008174, 0xfaa4fa50 },
870 { 0x00008178, 0x00000100 },
871 { 0x0000817c, 0x00000000 },
872 { 0x000081c4, 0x00000000 },
873 { 0x000081d0, 0x00003210 },
874 { 0x000081ec, 0x00000000 },
875 { 0x000081f0, 0x00000000 },
876 { 0x000081f4, 0x00000000 },
877 { 0x000081f8, 0x00000000 },
878 { 0x000081fc, 0x00000000 },
879 { 0x00008200, 0x00000000 },
880 { 0x00008204, 0x00000000 },
881 { 0x00008208, 0x00000000 },
882 { 0x0000820c, 0x00000000 },
883 { 0x00008210, 0x00000000 },
884 { 0x00008214, 0x00000000 },
885 { 0x00008218, 0x00000000 },
886 { 0x0000821c, 0x00000000 },
887 { 0x00008220, 0x00000000 },
888 { 0x00008224, 0x00000000 },
889 { 0x00008228, 0x00000000 },
890 { 0x0000822c, 0x00000000 },
891 { 0x00008230, 0x00000000 },
892 { 0x00008234, 0x00000000 },
893 { 0x00008238, 0x00000000 },
894 { 0x0000823c, 0x00000000 },
895 { 0x00008240, 0x00100000 },
896 { 0x00008244, 0x0010f400 },
897 { 0x00008248, 0x00000100 },
898 { 0x0000824c, 0x0001e800 },
899 { 0x00008250, 0x00000000 },
900 { 0x00008254, 0x00000000 },
901 { 0x00008258, 0x00000000 },
902 { 0x0000825c, 0x400000ff },
903 { 0x00008260, 0x00080922 },
904 { 0x00008270, 0x00000000 },
905 { 0x00008274, 0x40000000 },
906 { 0x00008278, 0x003e4180 },
907 { 0x0000827c, 0x00000000 },
908 { 0x00008284, 0x0000002c },
909 { 0x00008288, 0x0000002c },
910 { 0x0000828c, 0x00000000 },
911 { 0x00008294, 0x00000000 },
912 { 0x00008298, 0x00000000 },
913 { 0x00008300, 0x00000000 },
914 { 0x00008304, 0x00000000 },
915 { 0x00008308, 0x00000000 },
916 { 0x0000830c, 0x00000000 },
917 { 0x00008310, 0x00000000 },
918 { 0x00008314, 0x00000000 },
919 { 0x00008318, 0x00000000 },
920 { 0x00008328, 0x00000000 },
921 { 0x0000832c, 0x00000007 },
922 { 0x00008330, 0x00000302 },
923 { 0x00008334, 0x00000e00 },
924 { 0x00008338, 0x00000000 },
925 { 0x0000833c, 0x00000000 },
926 { 0x00008340, 0x000107ff },
927 { 0x00009808, 0x00000000 },
928 { 0x0000980c, 0xad848e19 },
929 { 0x00009810, 0x7d14e000 },
930 { 0x00009814, 0x9c0a9f6b },
931 { 0x0000981c, 0x00000000 },
932 { 0x0000982c, 0x0000a000 },
933 { 0x00009830, 0x00000000 },
934 { 0x0000983c, 0x00200400 },
935 { 0x00009840, 0x206a01ae },
936 { 0x0000984c, 0x1284233c },
937 { 0x00009854, 0x00000859 },
938 { 0x00009900, 0x00000000 },
939 { 0x00009904, 0x00000000 },
940 { 0x00009908, 0x00000000 },
941 { 0x0000990c, 0x00000000 },
942 { 0x0000991c, 0x10000fff },
943 { 0x00009920, 0x05100000 },
944 { 0x0000a920, 0x05100000 },
945 { 0x0000b920, 0x05100000 },
946 { 0x00009928, 0x00000001 },
947 { 0x0000992c, 0x00000004 },
948 { 0x00009934, 0x1e1f2022 },
949 { 0x00009938, 0x0a0b0c0d },
950 { 0x0000993c, 0x00000000 },
951 { 0x00009948, 0x9280b212 },
952 { 0x0000994c, 0x00020028 },
953 { 0x0000c95c, 0x004b6a8e },
954 { 0x0000c968, 0x000003ce },
955 { 0x00009970, 0x190fb514 },
956 { 0x00009974, 0x00000000 },
957 { 0x00009978, 0x00000001 },
958 { 0x0000997c, 0x00000000 },
959 { 0x00009980, 0x00000000 },
960 { 0x00009984, 0x00000000 },
961 { 0x00009988, 0x00000000 },
962 { 0x0000998c, 0x00000000 },
963 { 0x00009990, 0x00000000 },
964 { 0x00009994, 0x00000000 },
965 { 0x00009998, 0x00000000 },
966 { 0x0000999c, 0x00000000 },
967 { 0x000099a0, 0x00000000 },
968 { 0x000099a4, 0x00000001 },
969 { 0x000099a8, 0x201fff00 },
970 { 0x000099ac, 0x006f0000 },
971 { 0x000099b0, 0x03051000 },
972 { 0x000099dc, 0x00000000 },
973 { 0x000099e0, 0x00000200 },
974 { 0x000099e4, 0xaaaaaaaa },
975 { 0x000099e8, 0x3c466478 },
976 { 0x000099ec, 0x0cc80caa },
977 { 0x000099fc, 0x00001042 },
978 { 0x00009b00, 0x00000000 },
979 { 0x00009b04, 0x00000001 },
980 { 0x00009b08, 0x00000002 },
981 { 0x00009b0c, 0x00000003 },
982 { 0x00009b10, 0x00000004 },
983 { 0x00009b14, 0x00000005 },
984 { 0x00009b18, 0x00000008 },
985 { 0x00009b1c, 0x00000009 },
986 { 0x00009b20, 0x0000000a },
987 { 0x00009b24, 0x0000000b },
988 { 0x00009b28, 0x0000000c },
989 { 0x00009b2c, 0x0000000d },
990 { 0x00009b30, 0x00000010 },
991 { 0x00009b34, 0x00000011 },
992 { 0x00009b38, 0x00000012 },
993 { 0x00009b3c, 0x00000013 },
994 { 0x00009b40, 0x00000014 },
995 { 0x00009b44, 0x00000015 },
996 { 0x00009b48, 0x00000018 },
997 { 0x00009b4c, 0x00000019 },
998 { 0x00009b50, 0x0000001a },
999 { 0x00009b54, 0x0000001b },
1000 { 0x00009b58, 0x0000001c },
1001 { 0x00009b5c, 0x0000001d },
1002 { 0x00009b60, 0x00000020 },
1003 { 0x00009b64, 0x00000021 },
1004 { 0x00009b68, 0x00000022 },
1005 { 0x00009b6c, 0x00000023 },
1006 { 0x00009b70, 0x00000024 },
1007 { 0x00009b74, 0x00000025 },
1008 { 0x00009b78, 0x00000028 },
1009 { 0x00009b7c, 0x00000029 },
1010 { 0x00009b80, 0x0000002a },
1011 { 0x00009b84, 0x0000002b },
1012 { 0x00009b88, 0x0000002c },
1013 { 0x00009b8c, 0x0000002d },
1014 { 0x00009b90, 0x00000030 },
1015 { 0x00009b94, 0x00000031 },
1016 { 0x00009b98, 0x00000032 },
1017 { 0x00009b9c, 0x00000033 },
1018 { 0x00009ba0, 0x00000034 },
1019 { 0x00009ba4, 0x00000035 },
1020 { 0x00009ba8, 0x00000035 },
1021 { 0x00009bac, 0x00000035 },
1022 { 0x00009bb0, 0x00000035 },
1023 { 0x00009bb4, 0x00000035 },
1024 { 0x00009bb8, 0x00000035 },
1025 { 0x00009bbc, 0x00000035 },
1026 { 0x00009bc0, 0x00000035 },
1027 { 0x00009bc4, 0x00000035 },
1028 { 0x00009bc8, 0x00000035 },
1029 { 0x00009bcc, 0x00000035 },
1030 { 0x00009bd0, 0x00000035 },
1031 { 0x00009bd4, 0x00000035 },
1032 { 0x00009bd8, 0x00000035 },
1033 { 0x00009bdc, 0x00000035 },
1034 { 0x00009be0, 0x00000035 },
1035 { 0x00009be4, 0x00000035 },
1036 { 0x00009be8, 0x00000035 },
1037 { 0x00009bec, 0x00000035 },
1038 { 0x00009bf0, 0x00000035 },
1039 { 0x00009bf4, 0x00000035 },
1040 { 0x00009bf8, 0x00000010 },
1041 { 0x00009bfc, 0x0000001a },
1042 { 0x0000a210, 0x40806333 },
1043 { 0x0000a214, 0x00106c10 },
1044 { 0x0000a218, 0x009c4060 },
1045 { 0x0000a220, 0x018830c6 },
1046 { 0x0000a224, 0x00000400 },
1047 { 0x0000a228, 0x001a0bb5 },
1048 { 0x0000a22c, 0x00000000 },
1049 { 0x0000a234, 0x20202020 },
1050 { 0x0000a238, 0x20202020 },
1051 { 0x0000a23c, 0x13c889ae },
1052 { 0x0000a240, 0x38490a20 },
1053 { 0x0000a244, 0x00007bb6 },
1054 { 0x0000a248, 0x0fff3ffc },
1055 { 0x0000a24c, 0x00000001 },
1056 { 0x0000a250, 0x0000a000 },
1057 { 0x0000a254, 0x00000000 },
1058 { 0x0000a258, 0x0cc75380 },
1059 { 0x0000a25c, 0x0f0f0f01 },
1060 { 0x0000a260, 0xdfa91f01 },
1061 { 0x0000a268, 0x00000001 },
1062 { 0x0000a26c, 0x0ebae9c6 },
1063 { 0x0000b26c, 0x0ebae9c6 },
1064 { 0x0000c26c, 0x0ebae9c6 },
1065 { 0x0000d270, 0x00820820 },
1066 { 0x0000a278, 0x1ce739ce },
1067 { 0x0000a27c, 0x050701ce },
1068 { 0x0000a338, 0x00000000 },
1069 { 0x0000a33c, 0x00000000 },
1070 { 0x0000a340, 0x00000000 },
1071 { 0x0000a344, 0x00000000 },
1072 { 0x0000a348, 0x3fffffff },
1073 { 0x0000a34c, 0x3fffffff },
1074 { 0x0000a350, 0x3fffffff },
1075 { 0x0000a354, 0x0003ffff },
1076 { 0x0000a358, 0x79a8aa33 },
1077 { 0x0000d35c, 0x07ffffef },
1078 { 0x0000d360, 0x0fffffe7 },
1079 { 0x0000d364, 0x17ffffe5 },
1080 { 0x0000d368, 0x1fffffe4 },
1081 { 0x0000d36c, 0x37ffffe3 },
1082 { 0x0000d370, 0x3fffffe3 },
1083 { 0x0000d374, 0x57ffffe3 },
1084 { 0x0000d378, 0x5fffffe2 },
1085 { 0x0000d37c, 0x7fffffe2 },
1086 { 0x0000d380, 0x7f3c7bba },
1087 { 0x0000d384, 0xf3307ff0 },
1088 { 0x0000a388, 0x0c000000 },
1089 { 0x0000a38c, 0x20202020 },
1090 { 0x0000a390, 0x20202020 },
1091 { 0x0000a394, 0x1ce739ce },
1092 { 0x0000a398, 0x000001ce },
1093 { 0x0000a39c, 0x00000001 },
1094 { 0x0000a3a0, 0x00000000 },
1095 { 0x0000a3a4, 0x00000000 },
1096 { 0x0000a3a8, 0x00000000 },
1097 { 0x0000a3ac, 0x00000000 },
1098 { 0x0000a3b0, 0x00000000 },
1099 { 0x0000a3b4, 0x00000000 },
1100 { 0x0000a3b8, 0x00000000 },
1101 { 0x0000a3bc, 0x00000000 },
1102 { 0x0000a3c0, 0x00000000 },
1103 { 0x0000a3c4, 0x00000000 },
1104 { 0x0000a3c8, 0x00000246 },
1105 { 0x0000a3cc, 0x20202020 },
1106 { 0x0000a3d0, 0x20202020 },
1107 { 0x0000a3d4, 0x20202020 },
1108 { 0x0000a3dc, 0x1ce739ce },
1109 { 0x0000a3e0, 0x000001ce },
1110};
1111
1112static const u32 ar5416Bank0[][2] = {
1113 { 0x000098b0, 0x1e5795e5 },
1114 { 0x000098e0, 0x02008020 },
1115};
1116
1117static const u32 ar5416BB_RfGain[][3] = {
1118 { 0x00009a00, 0x00000000, 0x00000000 },
1119 { 0x00009a04, 0x00000040, 0x00000040 },
1120 { 0x00009a08, 0x00000080, 0x00000080 },
1121 { 0x00009a0c, 0x000001a1, 0x00000141 },
1122 { 0x00009a10, 0x000001e1, 0x00000181 },
1123 { 0x00009a14, 0x00000021, 0x000001c1 },
1124 { 0x00009a18, 0x00000061, 0x00000001 },
1125 { 0x00009a1c, 0x00000168, 0x00000041 },
1126 { 0x00009a20, 0x000001a8, 0x000001a8 },
1127 { 0x00009a24, 0x000001e8, 0x000001e8 },
1128 { 0x00009a28, 0x00000028, 0x00000028 },
1129 { 0x00009a2c, 0x00000068, 0x00000068 },
1130 { 0x00009a30, 0x00000189, 0x000000a8 },
1131 { 0x00009a34, 0x000001c9, 0x00000169 },
1132 { 0x00009a38, 0x00000009, 0x000001a9 },
1133 { 0x00009a3c, 0x00000049, 0x000001e9 },
1134 { 0x00009a40, 0x00000089, 0x00000029 },
1135 { 0x00009a44, 0x00000170, 0x00000069 },
1136 { 0x00009a48, 0x000001b0, 0x00000190 },
1137 { 0x00009a4c, 0x000001f0, 0x000001d0 },
1138 { 0x00009a50, 0x00000030, 0x00000010 },
1139 { 0x00009a54, 0x00000070, 0x00000050 },
1140 { 0x00009a58, 0x00000191, 0x00000090 },
1141 { 0x00009a5c, 0x000001d1, 0x00000151 },
1142 { 0x00009a60, 0x00000011, 0x00000191 },
1143 { 0x00009a64, 0x00000051, 0x000001d1 },
1144 { 0x00009a68, 0x00000091, 0x00000011 },
1145 { 0x00009a6c, 0x000001b8, 0x00000051 },
1146 { 0x00009a70, 0x000001f8, 0x00000198 },
1147 { 0x00009a74, 0x00000038, 0x000001d8 },
1148 { 0x00009a78, 0x00000078, 0x00000018 },
1149 { 0x00009a7c, 0x00000199, 0x00000058 },
1150 { 0x00009a80, 0x000001d9, 0x00000098 },
1151 { 0x00009a84, 0x00000019, 0x00000159 },
1152 { 0x00009a88, 0x00000059, 0x00000199 },
1153 { 0x00009a8c, 0x00000099, 0x000001d9 },
1154 { 0x00009a90, 0x000000d9, 0x00000019 },
1155 { 0x00009a94, 0x000000f9, 0x00000059 },
1156 { 0x00009a98, 0x000000f9, 0x00000099 },
1157 { 0x00009a9c, 0x000000f9, 0x000000d9 },
1158 { 0x00009aa0, 0x000000f9, 0x000000f9 },
1159 { 0x00009aa4, 0x000000f9, 0x000000f9 },
1160 { 0x00009aa8, 0x000000f9, 0x000000f9 },
1161 { 0x00009aac, 0x000000f9, 0x000000f9 },
1162 { 0x00009ab0, 0x000000f9, 0x000000f9 },
1163 { 0x00009ab4, 0x000000f9, 0x000000f9 },
1164 { 0x00009ab8, 0x000000f9, 0x000000f9 },
1165 { 0x00009abc, 0x000000f9, 0x000000f9 },
1166 { 0x00009ac0, 0x000000f9, 0x000000f9 },
1167 { 0x00009ac4, 0x000000f9, 0x000000f9 },
1168 { 0x00009ac8, 0x000000f9, 0x000000f9 },
1169 { 0x00009acc, 0x000000f9, 0x000000f9 },
1170 { 0x00009ad0, 0x000000f9, 0x000000f9 },
1171 { 0x00009ad4, 0x000000f9, 0x000000f9 },
1172 { 0x00009ad8, 0x000000f9, 0x000000f9 },
1173 { 0x00009adc, 0x000000f9, 0x000000f9 },
1174 { 0x00009ae0, 0x000000f9, 0x000000f9 },
1175 { 0x00009ae4, 0x000000f9, 0x000000f9 },
1176 { 0x00009ae8, 0x000000f9, 0x000000f9 },
1177 { 0x00009aec, 0x000000f9, 0x000000f9 },
1178 { 0x00009af0, 0x000000f9, 0x000000f9 },
1179 { 0x00009af4, 0x000000f9, 0x000000f9 },
1180 { 0x00009af8, 0x000000f9, 0x000000f9 },
1181 { 0x00009afc, 0x000000f9, 0x000000f9 },
1182};
1183
1184static const u32 ar5416Bank1[][2] = {
1185 { 0x000098b0, 0x02108421},
1186 { 0x000098ec, 0x00000008},
1187};
1188
1189static const u32 ar5416Bank2[][2] = {
1190 { 0x000098b0, 0x0e73ff17},
1191 { 0x000098e0, 0x00000420},
1192};
1193
1194static const u32 ar5416Bank3[][3] = {
1195 { 0x000098f0, 0x01400018, 0x01c00018 },
1196};
1197
1198static const u32 ar5416Bank6[][3] = {
1199
1200 { 0x0000989c, 0x00000000, 0x00000000 },
1201 { 0x0000989c, 0x00000000, 0x00000000 },
1202 { 0x0000989c, 0x00000000, 0x00000000 },
1203 { 0x0000989c, 0x00e00000, 0x00e00000 },
1204 { 0x0000989c, 0x005e0000, 0x005e0000 },
1205 { 0x0000989c, 0x00120000, 0x00120000 },
1206 { 0x0000989c, 0x00620000, 0x00620000 },
1207 { 0x0000989c, 0x00020000, 0x00020000 },
1208 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1209 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1210 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1211 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1212 { 0x0000989c, 0x005f0000, 0x005f0000 },
1213 { 0x0000989c, 0x00870000, 0x00870000 },
1214 { 0x0000989c, 0x00f90000, 0x00f90000 },
1215 { 0x0000989c, 0x007b0000, 0x007b0000 },
1216 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1217 { 0x0000989c, 0x00f50000, 0x00f50000 },
1218 { 0x0000989c, 0x00dc0000, 0x00dc0000 },
1219 { 0x0000989c, 0x00110000, 0x00110000 },
1220 { 0x0000989c, 0x006100a8, 0x006100a8 },
1221 { 0x0000989c, 0x004210a2, 0x004210a2 },
1222 { 0x0000989c, 0x0014000f, 0x0014000f },
1223 { 0x0000989c, 0x00c40002, 0x00c40002 },
1224 { 0x0000989c, 0x003000f2, 0x003000f2 },
1225 { 0x0000989c, 0x00440016, 0x00440016 },
1226 { 0x0000989c, 0x00410040, 0x00410040 },
1227 { 0x0000989c, 0x000180d6, 0x000180d6 },
1228 { 0x0000989c, 0x0000c0aa, 0x0000c0aa },
1229 { 0x0000989c, 0x000000b1, 0x000000b1 },
1230 { 0x0000989c, 0x00002000, 0x00002000 },
1231 { 0x0000989c, 0x000000d4, 0x000000d4 },
1232 { 0x000098d0, 0x0000000f, 0x0010000f },
1233};
1234
1235
1236static const u32 ar5416Bank6TPC[][3] = {
1237
1238 { 0x0000989c, 0x00000000, 0x00000000 },
1239 { 0x0000989c, 0x00000000, 0x00000000 },
1240 { 0x0000989c, 0x00000000, 0x00000000 },
1241 { 0x0000989c, 0x00e00000, 0x00e00000 },
1242 { 0x0000989c, 0x005e0000, 0x005e0000 },
1243 { 0x0000989c, 0x00120000, 0x00120000 },
1244 { 0x0000989c, 0x00620000, 0x00620000 },
1245 { 0x0000989c, 0x00020000, 0x00020000 },
1246 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1247 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1248 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1249 { 0x0000989c, 0x40ff0000, 0x40ff0000 },
1250 { 0x0000989c, 0x005f0000, 0x005f0000 },
1251 { 0x0000989c, 0x00870000, 0x00870000 },
1252 { 0x0000989c, 0x00f90000, 0x00f90000 },
1253 { 0x0000989c, 0x007b0000, 0x007b0000 },
1254 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1255 { 0x0000989c, 0x00f50000, 0x00f50000 },
1256 { 0x0000989c, 0x00dc0000, 0x00dc0000 },
1257 { 0x0000989c, 0x00110000, 0x00110000 },
1258 { 0x0000989c, 0x006100a8, 0x006100a8 },
1259 { 0x0000989c, 0x00423022, 0x00423022 },
1260 { 0x0000989c, 0x2014008f, 0x2014008f },
1261 { 0x0000989c, 0x00c40002, 0x00c40002 },
1262 { 0x0000989c, 0x003000f2, 0x003000f2 },
1263 { 0x0000989c, 0x00440016, 0x00440016 },
1264 { 0x0000989c, 0x00410040, 0x00410040 },
1265 { 0x0000989c, 0x0001805e, 0x0001805e },
1266 { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
1267 { 0x0000989c, 0x000000e1, 0x000000e1 },
1268 { 0x0000989c, 0x00007080, 0x00007080 },
1269 { 0x0000989c, 0x000000d4, 0x000000d4 },
1270 { 0x000098d0, 0x0000000f, 0x0010000f },
1271};
1272
1273static const u32 ar5416Bank7[][2] = {
1274 { 0x0000989c, 0x00000500 },
1275 { 0x0000989c, 0x00000800 },
1276 { 0x000098cc, 0x0000000e },
1277};
1278
1279static const u32 ar5416Addac[][2] = {
1280 {0x0000989c, 0x00000000 },
1281 {0x0000989c, 0x00000000 },
1282 {0x0000989c, 0x00000000 },
1283 {0x0000989c, 0x00000000 },
1284 {0x0000989c, 0x00000000 },
1285 {0x0000989c, 0x00000000 },
1286 {0x0000989c, 0x00000000 },
1287 {0x0000989c, 0x00000010 },
1288 {0x0000989c, 0x00000000 },
1289 {0x0000989c, 0x00000000 },
1290 {0x0000989c, 0x00000000 },
1291 {0x0000989c, 0x00000000 },
1292 {0x0000989c, 0x00000000 },
1293 {0x0000989c, 0x00000000 },
1294 {0x0000989c, 0x00000000 },
1295 {0x0000989c, 0x00000000 },
1296 {0x0000989c, 0x00000000 },
1297 {0x0000989c, 0x00000000 },
1298 {0x0000989c, 0x00000000 },
1299 {0x0000989c, 0x00000000 },
1300 {0x0000989c, 0x00000000 },
1301 {0x0000989c, 0x000000c0 },
1302 {0x0000989c, 0x00000015 },
1303 {0x0000989c, 0x00000000 },
1304 {0x0000989c, 0x00000000 },
1305 {0x0000989c, 0x00000000 },
1306 {0x0000989c, 0x00000000 },
1307 {0x0000989c, 0x00000000 },
1308 {0x0000989c, 0x00000000 },
1309 {0x0000989c, 0x00000000 },
1310 {0x0000989c, 0x00000000 },
1311 {0x000098cc, 0x00000000 },
1312};
1313
1314
1315static const u32 ar5416Modes_9160[][6] = {
1316 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
1317 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
1318 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
1319 { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
1320 { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
1321 { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
1322 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
1323 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
1324 { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
1325 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
1326 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
1327 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
1328 { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 },
1329 { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
1330 { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
1331 { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
1332 { 0x00009850, 0x6d48b4e2, 0x6d48b4e2, 0x6d48b0e2, 0x6d48b0e2, 0x6d48b0e2 },
1333 { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e },
1334 { 0x0000985c, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e },
1335 { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 },
1336 { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
1337 { 0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0 },
1338 { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
1339 { 0x00009914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, 0x000007d0 },
1340 { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
1341 { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d },
1342 { 0x00009944, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020 },
1343 { 0x00009960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
1344 { 0x0000a960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
1345 { 0x0000b960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
1346 { 0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, 0x00001120 },
1347 { 0x0000c9bc, 0x001a0600, 0x001a0600, 0x001a0c00, 0x001a0c00, 0x001a0c00 },
1348 { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
1349 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
1350 { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 },
1351 { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
1352 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
1353 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
1354 { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
1355 { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
1356 { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
1357 { 0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
1358 { 0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
1359 { 0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
1360 { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
1361 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
1362 { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
1363 { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
1364 { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
1365 { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
1366 { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
1367 { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
1368 { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
1369 { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
1370 { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
1371 { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
1372 { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
1373 { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
1374 { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
1375 { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
1376 { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
1377};
1378
1379static const u32 ar5416Common_9160[][2] = {
1380 { 0x0000000c, 0x00000000 },
1381 { 0x00000030, 0x00020015 },
1382 { 0x00000034, 0x00000005 },
1383 { 0x00000040, 0x00000000 },
1384 { 0x00000044, 0x00000008 },
1385 { 0x00000048, 0x00000008 },
1386 { 0x0000004c, 0x00000010 },
1387 { 0x00000050, 0x00000000 },
1388 { 0x00000054, 0x0000001f },
1389 { 0x00000800, 0x00000000 },
1390 { 0x00000804, 0x00000000 },
1391 { 0x00000808, 0x00000000 },
1392 { 0x0000080c, 0x00000000 },
1393 { 0x00000810, 0x00000000 },
1394 { 0x00000814, 0x00000000 },
1395 { 0x00000818, 0x00000000 },
1396 { 0x0000081c, 0x00000000 },
1397 { 0x00000820, 0x00000000 },
1398 { 0x00000824, 0x00000000 },
1399 { 0x00001040, 0x002ffc0f },
1400 { 0x00001044, 0x002ffc0f },
1401 { 0x00001048, 0x002ffc0f },
1402 { 0x0000104c, 0x002ffc0f },
1403 { 0x00001050, 0x002ffc0f },
1404 { 0x00001054, 0x002ffc0f },
1405 { 0x00001058, 0x002ffc0f },
1406 { 0x0000105c, 0x002ffc0f },
1407 { 0x00001060, 0x002ffc0f },
1408 { 0x00001064, 0x002ffc0f },
1409 { 0x00001230, 0x00000000 },
1410 { 0x00001270, 0x00000000 },
1411 { 0x00001038, 0x00000000 },
1412 { 0x00001078, 0x00000000 },
1413 { 0x000010b8, 0x00000000 },
1414 { 0x000010f8, 0x00000000 },
1415 { 0x00001138, 0x00000000 },
1416 { 0x00001178, 0x00000000 },
1417 { 0x000011b8, 0x00000000 },
1418 { 0x000011f8, 0x00000000 },
1419 { 0x00001238, 0x00000000 },
1420 { 0x00001278, 0x00000000 },
1421 { 0x000012b8, 0x00000000 },
1422 { 0x000012f8, 0x00000000 },
1423 { 0x00001338, 0x00000000 },
1424 { 0x00001378, 0x00000000 },
1425 { 0x000013b8, 0x00000000 },
1426 { 0x000013f8, 0x00000000 },
1427 { 0x00001438, 0x00000000 },
1428 { 0x00001478, 0x00000000 },
1429 { 0x000014b8, 0x00000000 },
1430 { 0x000014f8, 0x00000000 },
1431 { 0x00001538, 0x00000000 },
1432 { 0x00001578, 0x00000000 },
1433 { 0x000015b8, 0x00000000 },
1434 { 0x000015f8, 0x00000000 },
1435 { 0x00001638, 0x00000000 },
1436 { 0x00001678, 0x00000000 },
1437 { 0x000016b8, 0x00000000 },
1438 { 0x000016f8, 0x00000000 },
1439 { 0x00001738, 0x00000000 },
1440 { 0x00001778, 0x00000000 },
1441 { 0x000017b8, 0x00000000 },
1442 { 0x000017f8, 0x00000000 },
1443 { 0x0000103c, 0x00000000 },
1444 { 0x0000107c, 0x00000000 },
1445 { 0x000010bc, 0x00000000 },
1446 { 0x000010fc, 0x00000000 },
1447 { 0x0000113c, 0x00000000 },
1448 { 0x0000117c, 0x00000000 },
1449 { 0x000011bc, 0x00000000 },
1450 { 0x000011fc, 0x00000000 },
1451 { 0x0000123c, 0x00000000 },
1452 { 0x0000127c, 0x00000000 },
1453 { 0x000012bc, 0x00000000 },
1454 { 0x000012fc, 0x00000000 },
1455 { 0x0000133c, 0x00000000 },
1456 { 0x0000137c, 0x00000000 },
1457 { 0x000013bc, 0x00000000 },
1458 { 0x000013fc, 0x00000000 },
1459 { 0x0000143c, 0x00000000 },
1460 { 0x0000147c, 0x00000000 },
1461 { 0x00004030, 0x00000002 },
1462 { 0x0000403c, 0x00000002 },
1463 { 0x00007010, 0x00000020 },
1464 { 0x00007038, 0x000004c2 },
1465 { 0x00008004, 0x00000000 },
1466 { 0x00008008, 0x00000000 },
1467 { 0x0000800c, 0x00000000 },
1468 { 0x00008018, 0x00000700 },
1469 { 0x00008020, 0x00000000 },
1470 { 0x00008038, 0x00000000 },
1471 { 0x0000803c, 0x00000000 },
1472 { 0x00008048, 0x40000000 },
1473 { 0x00008054, 0x00000000 },
1474 { 0x00008058, 0x00000000 },
1475 { 0x0000805c, 0x000fc78f },
1476 { 0x00008060, 0x0000000f },
1477 { 0x00008064, 0x00000000 },
1478 { 0x000080c0, 0x2a82301a },
1479 { 0x000080c4, 0x05dc01e0 },
1480 { 0x000080c8, 0x1f402710 },
1481 { 0x000080cc, 0x01f40000 },
1482 { 0x000080d0, 0x00001e00 },
1483 { 0x000080d4, 0x00000000 },
1484 { 0x000080d8, 0x00400000 },
1485 { 0x000080e0, 0xffffffff },
1486 { 0x000080e4, 0x0000ffff },
1487 { 0x000080e8, 0x003f3f3f },
1488 { 0x000080ec, 0x00000000 },
1489 { 0x000080f0, 0x00000000 },
1490 { 0x000080f4, 0x00000000 },
1491 { 0x000080f8, 0x00000000 },
1492 { 0x000080fc, 0x00020000 },
1493 { 0x00008100, 0x00020000 },
1494 { 0x00008104, 0x00000001 },
1495 { 0x00008108, 0x00000052 },
1496 { 0x0000810c, 0x00000000 },
1497 { 0x00008110, 0x00000168 },
1498 { 0x00008118, 0x000100aa },
1499 { 0x0000811c, 0x00003210 },
1500 { 0x00008120, 0x08f04800 },
1501 { 0x00008124, 0x00000000 },
1502 { 0x00008128, 0x00000000 },
1503 { 0x0000812c, 0x00000000 },
1504 { 0x00008130, 0x00000000 },
1505 { 0x00008134, 0x00000000 },
1506 { 0x00008138, 0x00000000 },
1507 { 0x0000813c, 0x00000000 },
1508 { 0x00008144, 0x00000000 },
1509 { 0x00008168, 0x00000000 },
1510 { 0x0000816c, 0x00000000 },
1511 { 0x00008170, 0x32143320 },
1512 { 0x00008174, 0xfaa4fa50 },
1513 { 0x00008178, 0x00000100 },
1514 { 0x0000817c, 0x00000000 },
1515 { 0x000081c4, 0x00000000 },
1516 { 0x000081d0, 0x00003210 },
1517 { 0x000081ec, 0x00000000 },
1518 { 0x000081f0, 0x00000000 },
1519 { 0x000081f4, 0x00000000 },
1520 { 0x000081f8, 0x00000000 },
1521 { 0x000081fc, 0x00000000 },
1522 { 0x00008200, 0x00000000 },
1523 { 0x00008204, 0x00000000 },
1524 { 0x00008208, 0x00000000 },
1525 { 0x0000820c, 0x00000000 },
1526 { 0x00008210, 0x00000000 },
1527 { 0x00008214, 0x00000000 },
1528 { 0x00008218, 0x00000000 },
1529 { 0x0000821c, 0x00000000 },
1530 { 0x00008220, 0x00000000 },
1531 { 0x00008224, 0x00000000 },
1532 { 0x00008228, 0x00000000 },
1533 { 0x0000822c, 0x00000000 },
1534 { 0x00008230, 0x00000000 },
1535 { 0x00008234, 0x00000000 },
1536 { 0x00008238, 0x00000000 },
1537 { 0x0000823c, 0x00000000 },
1538 { 0x00008240, 0x00100000 },
1539 { 0x00008244, 0x0010f400 },
1540 { 0x00008248, 0x00000100 },
1541 { 0x0000824c, 0x0001e800 },
1542 { 0x00008250, 0x00000000 },
1543 { 0x00008254, 0x00000000 },
1544 { 0x00008258, 0x00000000 },
1545 { 0x0000825c, 0x400000ff },
1546 { 0x00008260, 0x00080922 },
1547 { 0x00008270, 0x00000000 },
1548 { 0x00008274, 0x40000000 },
1549 { 0x00008278, 0x003e4180 },
1550 { 0x0000827c, 0x00000000 },
1551 { 0x00008284, 0x0000002c },
1552 { 0x00008288, 0x0000002c },
1553 { 0x0000828c, 0x00000000 },
1554 { 0x00008294, 0x00000000 },
1555 { 0x00008298, 0x00000000 },
1556 { 0x00008300, 0x00000000 },
1557 { 0x00008304, 0x00000000 },
1558 { 0x00008308, 0x00000000 },
1559 { 0x0000830c, 0x00000000 },
1560 { 0x00008310, 0x00000000 },
1561 { 0x00008314, 0x00000000 },
1562 { 0x00008318, 0x00000000 },
1563 { 0x00008328, 0x00000000 },
1564 { 0x0000832c, 0x00000007 },
1565 { 0x00008330, 0x00000302 },
1566 { 0x00008334, 0x00000e00 },
1567 { 0x00008338, 0x00000000 },
1568 { 0x0000833c, 0x00000000 },
1569 { 0x00008340, 0x000107ff },
1570 { 0x00009808, 0x00000000 },
1571 { 0x0000980c, 0xad848e19 },
1572 { 0x00009810, 0x7d14e000 },
1573 { 0x00009814, 0x9c0a9f6b },
1574 { 0x0000981c, 0x00000000 },
1575 { 0x0000982c, 0x0000a000 },
1576 { 0x00009830, 0x00000000 },
1577 { 0x0000983c, 0x00200400 },
1578 { 0x00009840, 0x206a01ae },
1579 { 0x0000984c, 0x1284233c },
1580 { 0x00009854, 0x00000859 },
1581 { 0x00009900, 0x00000000 },
1582 { 0x00009904, 0x00000000 },
1583 { 0x00009908, 0x00000000 },
1584 { 0x0000990c, 0x00000000 },
1585 { 0x0000991c, 0x10000fff },
1586 { 0x00009920, 0x05100000 },
1587 { 0x0000a920, 0x05100000 },
1588 { 0x0000b920, 0x05100000 },
1589 { 0x00009928, 0x00000001 },
1590 { 0x0000992c, 0x00000004 },
1591 { 0x00009934, 0x1e1f2022 },
1592 { 0x00009938, 0x0a0b0c0d },
1593 { 0x0000993c, 0x00000000 },
1594 { 0x00009948, 0x9280b212 },
1595 { 0x0000994c, 0x00020028 },
1596 { 0x00009954, 0x5f3ca3de },
1597 { 0x00009958, 0x2108ecff },
1598 { 0x00009940, 0x00750604 },
1599 { 0x0000c95c, 0x004b6a8e },
1600 { 0x0000c968, 0x000003ce },
1601 { 0x00009970, 0x190fb515 },
1602 { 0x00009974, 0x00000000 },
1603 { 0x00009978, 0x00000001 },
1604 { 0x0000997c, 0x00000000 },
1605 { 0x00009980, 0x00000000 },
1606 { 0x00009984, 0x00000000 },
1607 { 0x00009988, 0x00000000 },
1608 { 0x0000998c, 0x00000000 },
1609 { 0x00009990, 0x00000000 },
1610 { 0x00009994, 0x00000000 },
1611 { 0x00009998, 0x00000000 },
1612 { 0x0000999c, 0x00000000 },
1613 { 0x000099a0, 0x00000000 },
1614 { 0x000099a4, 0x00000001 },
1615 { 0x000099a8, 0x201fff00 },
1616 { 0x000099ac, 0x006f0000 },
1617 { 0x000099b0, 0x03051000 },
1618 { 0x000099dc, 0x00000000 },
1619 { 0x000099e0, 0x00000200 },
1620 { 0x000099e4, 0xaaaaaaaa },
1621 { 0x000099e8, 0x3c466478 },
1622 { 0x000099ec, 0x0cc80caa },
1623 { 0x000099fc, 0x00001042 },
1624 { 0x00009b00, 0x00000000 },
1625 { 0x00009b04, 0x00000001 },
1626 { 0x00009b08, 0x00000002 },
1627 { 0x00009b0c, 0x00000003 },
1628 { 0x00009b10, 0x00000004 },
1629 { 0x00009b14, 0x00000005 },
1630 { 0x00009b18, 0x00000008 },
1631 { 0x00009b1c, 0x00000009 },
1632 { 0x00009b20, 0x0000000a },
1633 { 0x00009b24, 0x0000000b },
1634 { 0x00009b28, 0x0000000c },
1635 { 0x00009b2c, 0x0000000d },
1636 { 0x00009b30, 0x00000010 },
1637 { 0x00009b34, 0x00000011 },
1638 { 0x00009b38, 0x00000012 },
1639 { 0x00009b3c, 0x00000013 },
1640 { 0x00009b40, 0x00000014 },
1641 { 0x00009b44, 0x00000015 },
1642 { 0x00009b48, 0x00000018 },
1643 { 0x00009b4c, 0x00000019 },
1644 { 0x00009b50, 0x0000001a },
1645 { 0x00009b54, 0x0000001b },
1646 { 0x00009b58, 0x0000001c },
1647 { 0x00009b5c, 0x0000001d },
1648 { 0x00009b60, 0x00000020 },
1649 { 0x00009b64, 0x00000021 },
1650 { 0x00009b68, 0x00000022 },
1651 { 0x00009b6c, 0x00000023 },
1652 { 0x00009b70, 0x00000024 },
1653 { 0x00009b74, 0x00000025 },
1654 { 0x00009b78, 0x00000028 },
1655 { 0x00009b7c, 0x00000029 },
1656 { 0x00009b80, 0x0000002a },
1657 { 0x00009b84, 0x0000002b },
1658 { 0x00009b88, 0x0000002c },
1659 { 0x00009b8c, 0x0000002d },
1660 { 0x00009b90, 0x00000030 },
1661 { 0x00009b94, 0x00000031 },
1662 { 0x00009b98, 0x00000032 },
1663 { 0x00009b9c, 0x00000033 },
1664 { 0x00009ba0, 0x00000034 },
1665 { 0x00009ba4, 0x00000035 },
1666 { 0x00009ba8, 0x00000035 },
1667 { 0x00009bac, 0x00000035 },
1668 { 0x00009bb0, 0x00000035 },
1669 { 0x00009bb4, 0x00000035 },
1670 { 0x00009bb8, 0x00000035 },
1671 { 0x00009bbc, 0x00000035 },
1672 { 0x00009bc0, 0x00000035 },
1673 { 0x00009bc4, 0x00000035 },
1674 { 0x00009bc8, 0x00000035 },
1675 { 0x00009bcc, 0x00000035 },
1676 { 0x00009bd0, 0x00000035 },
1677 { 0x00009bd4, 0x00000035 },
1678 { 0x00009bd8, 0x00000035 },
1679 { 0x00009bdc, 0x00000035 },
1680 { 0x00009be0, 0x00000035 },
1681 { 0x00009be4, 0x00000035 },
1682 { 0x00009be8, 0x00000035 },
1683 { 0x00009bec, 0x00000035 },
1684 { 0x00009bf0, 0x00000035 },
1685 { 0x00009bf4, 0x00000035 },
1686 { 0x00009bf8, 0x00000010 },
1687 { 0x00009bfc, 0x0000001a },
1688 { 0x0000a210, 0x40806333 },
1689 { 0x0000a214, 0x00106c10 },
1690 { 0x0000a218, 0x009c4060 },
1691 { 0x0000a220, 0x018830c6 },
1692 { 0x0000a224, 0x00000400 },
1693 { 0x0000a228, 0x001a0bb5 },
1694 { 0x0000a22c, 0x00000000 },
1695 { 0x0000a234, 0x20202020 },
1696 { 0x0000a238, 0x20202020 },
1697 { 0x0000a23c, 0x13c889af },
1698 { 0x0000a240, 0x38490a20 },
1699 { 0x0000a244, 0x00007bb6 },
1700 { 0x0000a248, 0x0fff3ffc },
1701 { 0x0000a24c, 0x00000001 },
1702 { 0x0000a250, 0x0000a000 },
1703 { 0x0000a254, 0x00000000 },
1704 { 0x0000a258, 0x0cc75380 },
1705 { 0x0000a25c, 0x0f0f0f01 },
1706 { 0x0000a260, 0xdfa91f01 },
1707 { 0x0000a268, 0x00000001 },
1708 { 0x0000a26c, 0x0ebae9c6 },
1709 { 0x0000b26c, 0x0ebae9c6 },
1710 { 0x0000c26c, 0x0ebae9c6 },
1711 { 0x0000d270, 0x00820820 },
1712 { 0x0000a278, 0x1ce739ce },
1713 { 0x0000a27c, 0x050701ce },
1714 { 0x0000a338, 0x00000000 },
1715 { 0x0000a33c, 0x00000000 },
1716 { 0x0000a340, 0x00000000 },
1717 { 0x0000a344, 0x00000000 },
1718 { 0x0000a348, 0x3fffffff },
1719 { 0x0000a34c, 0x3fffffff },
1720 { 0x0000a350, 0x3fffffff },
1721 { 0x0000a354, 0x0003ffff },
1722 { 0x0000a358, 0x79a8aa33 },
1723 { 0x0000d35c, 0x07ffffef },
1724 { 0x0000d360, 0x0fffffe7 },
1725 { 0x0000d364, 0x17ffffe5 },
1726 { 0x0000d368, 0x1fffffe4 },
1727 { 0x0000d36c, 0x37ffffe3 },
1728 { 0x0000d370, 0x3fffffe3 },
1729 { 0x0000d374, 0x57ffffe3 },
1730 { 0x0000d378, 0x5fffffe2 },
1731 { 0x0000d37c, 0x7fffffe2 },
1732 { 0x0000d380, 0x7f3c7bba },
1733 { 0x0000d384, 0xf3307ff0 },
1734 { 0x0000a388, 0x0c000000 },
1735 { 0x0000a38c, 0x20202020 },
1736 { 0x0000a390, 0x20202020 },
1737 { 0x0000a394, 0x1ce739ce },
1738 { 0x0000a398, 0x000001ce },
1739 { 0x0000a39c, 0x00000001 },
1740 { 0x0000a3a0, 0x00000000 },
1741 { 0x0000a3a4, 0x00000000 },
1742 { 0x0000a3a8, 0x00000000 },
1743 { 0x0000a3ac, 0x00000000 },
1744 { 0x0000a3b0, 0x00000000 },
1745 { 0x0000a3b4, 0x00000000 },
1746 { 0x0000a3b8, 0x00000000 },
1747 { 0x0000a3bc, 0x00000000 },
1748 { 0x0000a3c0, 0x00000000 },
1749 { 0x0000a3c4, 0x00000000 },
1750 { 0x0000a3c8, 0x00000246 },
1751 { 0x0000a3cc, 0x20202020 },
1752 { 0x0000a3d0, 0x20202020 },
1753 { 0x0000a3d4, 0x20202020 },
1754 { 0x0000a3dc, 0x1ce739ce },
1755 { 0x0000a3e0, 0x000001ce },
1756};
1757
1758static const u32 ar5416Bank0_9160[][2] = {
1759 { 0x000098b0, 0x1e5795e5 },
1760 { 0x000098e0, 0x02008020 },
1761};
1762
1763static const u32 ar5416BB_RfGain_9160[][3] = {
1764 { 0x00009a00, 0x00000000, 0x00000000 },
1765 { 0x00009a04, 0x00000040, 0x00000040 },
1766 { 0x00009a08, 0x00000080, 0x00000080 },
1767 { 0x00009a0c, 0x000001a1, 0x00000141 },
1768 { 0x00009a10, 0x000001e1, 0x00000181 },
1769 { 0x00009a14, 0x00000021, 0x000001c1 },
1770 { 0x00009a18, 0x00000061, 0x00000001 },
1771 { 0x00009a1c, 0x00000168, 0x00000041 },
1772 { 0x00009a20, 0x000001a8, 0x000001a8 },
1773 { 0x00009a24, 0x000001e8, 0x000001e8 },
1774 { 0x00009a28, 0x00000028, 0x00000028 },
1775 { 0x00009a2c, 0x00000068, 0x00000068 },
1776 { 0x00009a30, 0x00000189, 0x000000a8 },
1777 { 0x00009a34, 0x000001c9, 0x00000169 },
1778 { 0x00009a38, 0x00000009, 0x000001a9 },
1779 { 0x00009a3c, 0x00000049, 0x000001e9 },
1780 { 0x00009a40, 0x00000089, 0x00000029 },
1781 { 0x00009a44, 0x00000170, 0x00000069 },
1782 { 0x00009a48, 0x000001b0, 0x00000190 },
1783 { 0x00009a4c, 0x000001f0, 0x000001d0 },
1784 { 0x00009a50, 0x00000030, 0x00000010 },
1785 { 0x00009a54, 0x00000070, 0x00000050 },
1786 { 0x00009a58, 0x00000191, 0x00000090 },
1787 { 0x00009a5c, 0x000001d1, 0x00000151 },
1788 { 0x00009a60, 0x00000011, 0x00000191 },
1789 { 0x00009a64, 0x00000051, 0x000001d1 },
1790 { 0x00009a68, 0x00000091, 0x00000011 },
1791 { 0x00009a6c, 0x000001b8, 0x00000051 },
1792 { 0x00009a70, 0x000001f8, 0x00000198 },
1793 { 0x00009a74, 0x00000038, 0x000001d8 },
1794 { 0x00009a78, 0x00000078, 0x00000018 },
1795 { 0x00009a7c, 0x00000199, 0x00000058 },
1796 { 0x00009a80, 0x000001d9, 0x00000098 },
1797 { 0x00009a84, 0x00000019, 0x00000159 },
1798 { 0x00009a88, 0x00000059, 0x00000199 },
1799 { 0x00009a8c, 0x00000099, 0x000001d9 },
1800 { 0x00009a90, 0x000000d9, 0x00000019 },
1801 { 0x00009a94, 0x000000f9, 0x00000059 },
1802 { 0x00009a98, 0x000000f9, 0x00000099 },
1803 { 0x00009a9c, 0x000000f9, 0x000000d9 },
1804 { 0x00009aa0, 0x000000f9, 0x000000f9 },
1805 { 0x00009aa4, 0x000000f9, 0x000000f9 },
1806 { 0x00009aa8, 0x000000f9, 0x000000f9 },
1807 { 0x00009aac, 0x000000f9, 0x000000f9 },
1808 { 0x00009ab0, 0x000000f9, 0x000000f9 },
1809 { 0x00009ab4, 0x000000f9, 0x000000f9 },
1810 { 0x00009ab8, 0x000000f9, 0x000000f9 },
1811 { 0x00009abc, 0x000000f9, 0x000000f9 },
1812 { 0x00009ac0, 0x000000f9, 0x000000f9 },
1813 { 0x00009ac4, 0x000000f9, 0x000000f9 },
1814 { 0x00009ac8, 0x000000f9, 0x000000f9 },
1815 { 0x00009acc, 0x000000f9, 0x000000f9 },
1816 { 0x00009ad0, 0x000000f9, 0x000000f9 },
1817 { 0x00009ad4, 0x000000f9, 0x000000f9 },
1818 { 0x00009ad8, 0x000000f9, 0x000000f9 },
1819 { 0x00009adc, 0x000000f9, 0x000000f9 },
1820 { 0x00009ae0, 0x000000f9, 0x000000f9 },
1821 { 0x00009ae4, 0x000000f9, 0x000000f9 },
1822 { 0x00009ae8, 0x000000f9, 0x000000f9 },
1823 { 0x00009aec, 0x000000f9, 0x000000f9 },
1824 { 0x00009af0, 0x000000f9, 0x000000f9 },
1825 { 0x00009af4, 0x000000f9, 0x000000f9 },
1826 { 0x00009af8, 0x000000f9, 0x000000f9 },
1827 { 0x00009afc, 0x000000f9, 0x000000f9 },
1828};
1829
1830static const u32 ar5416Bank1_9160[][2] = {
1831 { 0x000098b0, 0x02108421 },
1832 { 0x000098ec, 0x00000008 },
1833};
1834
1835static const u32 ar5416Bank2_9160[][2] = {
1836 { 0x000098b0, 0x0e73ff17 },
1837 { 0x000098e0, 0x00000420 },
1838};
1839
1840static const u32 ar5416Bank3_9160[][3] = {
1841 { 0x000098f0, 0x01400018, 0x01c00018 },
1842};
1843
1844static const u32 ar5416Bank6_9160[][3] = {
1845
1846 { 0x0000989c, 0x00000000, 0x00000000 },
1847 { 0x0000989c, 0x00000000, 0x00000000 },
1848 { 0x0000989c, 0x00000000, 0x00000000 },
1849 { 0x0000989c, 0x00e00000, 0x00e00000 },
1850 { 0x0000989c, 0x005e0000, 0x005e0000 },
1851 { 0x0000989c, 0x00120000, 0x00120000 },
1852 { 0x0000989c, 0x00620000, 0x00620000 },
1853 { 0x0000989c, 0x00020000, 0x00020000 },
1854 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1855 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1856 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1857 { 0x0000989c, 0x40ff0000, 0x40ff0000 },
1858 { 0x0000989c, 0x005f0000, 0x005f0000 },
1859 { 0x0000989c, 0x00870000, 0x00870000 },
1860 { 0x0000989c, 0x00f90000, 0x00f90000 },
1861 { 0x0000989c, 0x007b0000, 0x007b0000 },
1862 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1863 { 0x0000989c, 0x00f50000, 0x00f50000 },
1864 { 0x0000989c, 0x00dc0000, 0x00dc0000 },
1865 { 0x0000989c, 0x00110000, 0x00110000 },
1866 { 0x0000989c, 0x006100a8, 0x006100a8 },
1867 { 0x0000989c, 0x004210a2, 0x004210a2 },
1868 { 0x0000989c, 0x0014008f, 0x0014008f },
1869 { 0x0000989c, 0x00c40003, 0x00c40003 },
1870 { 0x0000989c, 0x003000f2, 0x003000f2 },
1871 { 0x0000989c, 0x00440016, 0x00440016 },
1872 { 0x0000989c, 0x00410040, 0x00410040 },
1873 { 0x0000989c, 0x0001805e, 0x0001805e },
1874 { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
1875 { 0x0000989c, 0x000000f1, 0x000000f1 },
1876 { 0x0000989c, 0x00002081, 0x00002081 },
1877 { 0x0000989c, 0x000000d4, 0x000000d4 },
1878 { 0x000098d0, 0x0000000f, 0x0010000f },
1879};
1880
1881static const u32 ar5416Bank6TPC_9160[][3] = {
1882 { 0x0000989c, 0x00000000, 0x00000000 },
1883 { 0x0000989c, 0x00000000, 0x00000000 },
1884 { 0x0000989c, 0x00000000, 0x00000000 },
1885 { 0x0000989c, 0x00e00000, 0x00e00000 },
1886 { 0x0000989c, 0x005e0000, 0x005e0000 },
1887 { 0x0000989c, 0x00120000, 0x00120000 },
1888 { 0x0000989c, 0x00620000, 0x00620000 },
1889 { 0x0000989c, 0x00020000, 0x00020000 },
1890 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1891 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1892 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1893 { 0x0000989c, 0x40ff0000, 0x40ff0000 },
1894 { 0x0000989c, 0x005f0000, 0x005f0000 },
1895 { 0x0000989c, 0x00870000, 0x00870000 },
1896 { 0x0000989c, 0x00f90000, 0x00f90000 },
1897 { 0x0000989c, 0x007b0000, 0x007b0000 },
1898 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1899 { 0x0000989c, 0x00f50000, 0x00f50000 },
1900 { 0x0000989c, 0x00dc0000, 0x00dc0000 },
1901 { 0x0000989c, 0x00110000, 0x00110000 },
1902 { 0x0000989c, 0x006100a8, 0x006100a8 },
1903 { 0x0000989c, 0x00423022, 0x00423022 },
1904 { 0x0000989c, 0x2014008f, 0x2014008f },
1905 { 0x0000989c, 0x00c40002, 0x00c40002 },
1906 { 0x0000989c, 0x003000f2, 0x003000f2 },
1907 { 0x0000989c, 0x00440016, 0x00440016 },
1908 { 0x0000989c, 0x00410040, 0x00410040 },
1909 { 0x0000989c, 0x0001805e, 0x0001805e },
1910 { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
1911 { 0x0000989c, 0x000000e1, 0x000000e1 },
1912 { 0x0000989c, 0x00007080, 0x00007080 },
1913 { 0x0000989c, 0x000000d4, 0x000000d4 },
1914 { 0x000098d0, 0x0000000f, 0x0010000f },
1915};
1916
1917static const u32 ar5416Bank7_9160[][2] = {
1918 { 0x0000989c, 0x00000500 },
1919 { 0x0000989c, 0x00000800 },
1920 { 0x000098cc, 0x0000000e },
1921};
1922
1923
1924static u32 ar5416Addac_9160[][2] = {
1925 {0x0000989c, 0x00000000 },
1926 {0x0000989c, 0x00000000 },
1927 {0x0000989c, 0x00000000 },
1928 {0x0000989c, 0x00000000 },
1929 {0x0000989c, 0x00000000 },
1930 {0x0000989c, 0x00000000 },
1931 {0x0000989c, 0x000000c0 },
1932 {0x0000989c, 0x00000018 },
1933 {0x0000989c, 0x00000004 },
1934 {0x0000989c, 0x00000000 },
1935 {0x0000989c, 0x00000000 },
1936 {0x0000989c, 0x00000000 },
1937 {0x0000989c, 0x00000000 },
1938 {0x0000989c, 0x00000000 },
1939 {0x0000989c, 0x00000000 },
1940 {0x0000989c, 0x00000000 },
1941 {0x0000989c, 0x00000000 },
1942 {0x0000989c, 0x00000000 },
1943 {0x0000989c, 0x00000000 },
1944 {0x0000989c, 0x00000000 },
1945 {0x0000989c, 0x00000000 },
1946 {0x0000989c, 0x000000c0 },
1947 {0x0000989c, 0x00000019 },
1948 {0x0000989c, 0x00000004 },
1949 {0x0000989c, 0x00000000 },
1950 {0x0000989c, 0x00000000 },
1951 {0x0000989c, 0x00000000 },
1952 {0x0000989c, 0x00000004 },
1953 {0x0000989c, 0x00000003 },
1954 {0x0000989c, 0x00000008 },
1955 {0x0000989c, 0x00000000 },
1956 {0x000098cc, 0x00000000 },
1957};
1958
1959
1960static u32 ar5416Addac_91601_1[][2] = {
1961 {0x0000989c, 0x00000000 },
1962 {0x0000989c, 0x00000000 },
1963 {0x0000989c, 0x00000000 },
1964 {0x0000989c, 0x00000000 },
1965 {0x0000989c, 0x00000000 },
1966 {0x0000989c, 0x00000000 },
1967 {0x0000989c, 0x000000c0 },
1968 {0x0000989c, 0x00000018 },
1969 {0x0000989c, 0x00000004 },
1970 {0x0000989c, 0x00000000 },
1971 {0x0000989c, 0x00000000 },
1972 {0x0000989c, 0x00000000 },
1973 {0x0000989c, 0x00000000 },
1974 {0x0000989c, 0x00000000 },
1975 {0x0000989c, 0x00000000 },
1976 {0x0000989c, 0x00000000 },
1977 {0x0000989c, 0x00000000 },
1978 {0x0000989c, 0x00000000 },
1979 {0x0000989c, 0x00000000 },
1980 {0x0000989c, 0x00000000 },
1981 {0x0000989c, 0x00000000 },
1982 {0x0000989c, 0x000000c0 },
1983 {0x0000989c, 0x00000019 },
1984 {0x0000989c, 0x00000004 },
1985 {0x0000989c, 0x00000000 },
1986 {0x0000989c, 0x00000000 },
1987 {0x0000989c, 0x00000000 },
1988 {0x0000989c, 0x00000000 },
1989 {0x0000989c, 0x00000000 },
1990 {0x0000989c, 0x00000000 },
1991 {0x0000989c, 0x00000000 },
1992 {0x000098cc, 0x00000000 },
1993};
1994
1995
1996
1997static const u32 ar9280Modes_9280[][6] = {
1998 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
1999 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
2000 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
2001 { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
2002 { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801080, 0x08400840, 0x06e006e0 },
2003 { 0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b, 0x0988004f },
2004 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
2005 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
2006 { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
2007 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
2008 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
2009 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
2010 { 0x00009844, 0x1372161e, 0x1372161e, 0x137216a0, 0x137216a0, 0x137216a0 },
2011 { 0x00009848, 0x00028566, 0x00028566, 0x00028563, 0x00028563, 0x00028563 },
2012 { 0x0000a848, 0x00028566, 0x00028566, 0x00028563, 0x00028563, 0x00028563 },
2013 { 0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2 },
2014 { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e },
2015 { 0x0000985c, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e },
2016 { 0x00009860, 0x00049d18, 0x00049d18, 0x00049d20, 0x00049d20, 0x00049d18 },
2017 { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
2018 { 0x00009868, 0x5ac64190, 0x5ac64190, 0x5ac64190, 0x5ac64190, 0x5ac64190 },
2019 { 0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881, 0x06903881 },
2020 { 0x00009914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, 0x000007d0 },
2021 { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
2022 { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d },
2023 { 0x00009944, 0xdfbc1010, 0xdfbc1010, 0xdfbc1010, 0xdfbc1010, 0xdfbc1010 },
2024 { 0x00009960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 },
2025 { 0x0000a960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 },
2026 { 0x00009964, 0x00000210, 0x00000210, 0x00000210, 0x00000210, 0x00000210 },
2027 { 0x0000c9b8, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a },
2028 { 0x0000c9bc, 0x00000600, 0x00000600, 0x00000c00, 0x00000c00, 0x00000c00 },
2029 { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 },
2030 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
2031 { 0x000099c8, 0x60f6532c, 0x60f6532c, 0x60f6532c, 0x60f6532c, 0x60f6532c },
2032 { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
2033 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
2034 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
2035 { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
2036 { 0x00009a00, 0x00008184, 0x00008184, 0x00000214, 0x00000214, 0x00000214 },
2037 { 0x00009a04, 0x00008188, 0x00008188, 0x00000218, 0x00000218, 0x00000218 },
2038 { 0x00009a08, 0x0000818c, 0x0000818c, 0x00000224, 0x00000224, 0x00000224 },
2039 { 0x00009a0c, 0x00008190, 0x00008190, 0x00000228, 0x00000228, 0x00000228 },
2040 { 0x00009a10, 0x00008194, 0x00008194, 0x0000022c, 0x0000022c, 0x0000022c },
2041 { 0x00009a14, 0x00008200, 0x00008200, 0x00000230, 0x00000230, 0x00000230 },
2042 { 0x00009a18, 0x00008204, 0x00008204, 0x000002a4, 0x000002a4, 0x000002a4 },
2043 { 0x00009a1c, 0x00008208, 0x00008208, 0x000002a8, 0x000002a8, 0x000002a8 },
2044 { 0x00009a20, 0x0000820c, 0x0000820c, 0x000002ac, 0x000002ac, 0x000002ac },
2045 { 0x00009a24, 0x00008210, 0x00008210, 0x000002b0, 0x000002b0, 0x000002b0 },
2046 { 0x00009a28, 0x00008214, 0x00008214, 0x000002b4, 0x000002b4, 0x000002b4 },
2047 { 0x00009a2c, 0x00008280, 0x00008280, 0x000002b8, 0x000002b8, 0x000002b8 },
2048 { 0x00009a30, 0x00008284, 0x00008284, 0x00000390, 0x00000390, 0x00000390 },
2049 { 0x00009a34, 0x00008288, 0x00008288, 0x00000394, 0x00000394, 0x00000394 },
2050 { 0x00009a38, 0x0000828c, 0x0000828c, 0x00000398, 0x00000398, 0x00000398 },
2051 { 0x00009a3c, 0x00008290, 0x00008290, 0x00000334, 0x00000334, 0x00000334 },
2052 { 0x00009a40, 0x00008300, 0x00008300, 0x00000338, 0x00000338, 0x00000338 },
2053 { 0x00009a44, 0x00008304, 0x00008304, 0x000003ac, 0x000003ac, 0x000003ac },
2054 { 0x00009a48, 0x00008308, 0x00008308, 0x000003b0, 0x000003b0, 0x000003b0 },
2055 { 0x00009a4c, 0x0000830c, 0x0000830c, 0x000003b4, 0x000003b4, 0x000003b4 },
2056 { 0x00009a50, 0x00008310, 0x00008310, 0x000003b8, 0x000003b8, 0x000003b8 },
2057 { 0x00009a54, 0x00008314, 0x00008314, 0x000003a5, 0x000003a5, 0x000003a5 },
2058 { 0x00009a58, 0x00008380, 0x00008380, 0x000003a9, 0x000003a9, 0x000003a9 },
2059 { 0x00009a5c, 0x00008384, 0x00008384, 0x000003ad, 0x000003ad, 0x000003ad },
2060 { 0x00009a60, 0x00008388, 0x00008388, 0x00008194, 0x00008194, 0x00008194 },
2061 { 0x00009a64, 0x0000838c, 0x0000838c, 0x000081a0, 0x000081a0, 0x000081a0 },
2062 { 0x00009a68, 0x00008390, 0x00008390, 0x0000820c, 0x0000820c, 0x0000820c },
2063 { 0x00009a6c, 0x00008394, 0x00008394, 0x000081a8, 0x000081a8, 0x000081a8 },
2064 { 0x00009a70, 0x0000a380, 0x0000a380, 0x00008284, 0x00008284, 0x00008284 },
2065 { 0x00009a74, 0x0000a384, 0x0000a384, 0x00008288, 0x00008288, 0x00008288 },
2066 { 0x00009a78, 0x0000a388, 0x0000a388, 0x00008224, 0x00008224, 0x00008224 },
2067 { 0x00009a7c, 0x0000a38c, 0x0000a38c, 0x00008290, 0x00008290, 0x00008290 },
2068 { 0x00009a80, 0x0000a390, 0x0000a390, 0x00008300, 0x00008300, 0x00008300 },
2069 { 0x00009a84, 0x0000a394, 0x0000a394, 0x00008304, 0x00008304, 0x00008304 },
2070 { 0x00009a88, 0x0000a780, 0x0000a780, 0x00008308, 0x00008308, 0x00008308 },
2071 { 0x00009a8c, 0x0000a784, 0x0000a784, 0x0000830c, 0x0000830c, 0x0000830c },
2072 { 0x00009a90, 0x0000a788, 0x0000a788, 0x00008380, 0x00008380, 0x00008380 },
2073 { 0x00009a94, 0x0000a78c, 0x0000a78c, 0x00008384, 0x00008384, 0x00008384 },
2074 { 0x00009a98, 0x0000a790, 0x0000a790, 0x00008700, 0x00008700, 0x00008700 },
2075 { 0x00009a9c, 0x0000a794, 0x0000a794, 0x00008704, 0x00008704, 0x00008704 },
2076 { 0x00009aa0, 0x0000ab84, 0x0000ab84, 0x00008708, 0x00008708, 0x00008708 },
2077 { 0x00009aa4, 0x0000ab88, 0x0000ab88, 0x0000870c, 0x0000870c, 0x0000870c },
2078 { 0x00009aa8, 0x0000ab8c, 0x0000ab8c, 0x00008780, 0x00008780, 0x00008780 },
2079 { 0x00009aac, 0x0000ab90, 0x0000ab90, 0x00008784, 0x00008784, 0x00008784 },
2080 { 0x00009ab0, 0x0000ab94, 0x0000ab94, 0x00008b00, 0x00008b00, 0x00008b00 },
2081 { 0x00009ab4, 0x0000af80, 0x0000af80, 0x00008b04, 0x00008b04, 0x00008b04 },
2082 { 0x00009ab8, 0x0000af84, 0x0000af84, 0x00008b08, 0x00008b08, 0x00008b08 },
2083 { 0x00009abc, 0x0000af88, 0x0000af88, 0x00008b0c, 0x00008b0c, 0x00008b0c },
2084 { 0x00009ac0, 0x0000af8c, 0x0000af8c, 0x00008b80, 0x00008b80, 0x00008b80 },
2085 { 0x00009ac4, 0x0000af90, 0x0000af90, 0x00008b84, 0x00008b84, 0x00008b84 },
2086 { 0x00009ac8, 0x0000af94, 0x0000af94, 0x00008b88, 0x00008b88, 0x00008b88 },
2087 { 0x00009acc, 0x0000b380, 0x0000b380, 0x00008b8c, 0x00008b8c, 0x00008b8c },
2088 { 0x00009ad0, 0x0000b384, 0x0000b384, 0x00008b90, 0x00008b90, 0x00008b90 },
2089 { 0x00009ad4, 0x0000b388, 0x0000b388, 0x00008f80, 0x00008f80, 0x00008f80 },
2090 { 0x00009ad8, 0x0000b38c, 0x0000b38c, 0x00008f84, 0x00008f84, 0x00008f84 },
2091 { 0x00009adc, 0x0000b390, 0x0000b390, 0x00008f88, 0x00008f88, 0x00008f88 },
2092 { 0x00009ae0, 0x0000b394, 0x0000b394, 0x00008f8c, 0x00008f8c, 0x00008f8c },
2093 { 0x00009ae4, 0x0000b398, 0x0000b398, 0x00008f90, 0x00008f90, 0x00008f90 },
2094 { 0x00009ae8, 0x0000b780, 0x0000b780, 0x0000930c, 0x0000930c, 0x0000930c },
2095 { 0x00009aec, 0x0000b784, 0x0000b784, 0x00009310, 0x00009310, 0x00009310 },
2096 { 0x00009af0, 0x0000b788, 0x0000b788, 0x00009384, 0x00009384, 0x00009384 },
2097 { 0x00009af4, 0x0000b78c, 0x0000b78c, 0x00009388, 0x00009388, 0x00009388 },
2098 { 0x00009af8, 0x0000b790, 0x0000b790, 0x00009324, 0x00009324, 0x00009324 },
2099 { 0x00009afc, 0x0000b794, 0x0000b794, 0x00009704, 0x00009704, 0x00009704 },
2100 { 0x00009b00, 0x0000b798, 0x0000b798, 0x000096a4, 0x000096a4, 0x000096a4 },
2101 { 0x00009b04, 0x0000d784, 0x0000d784, 0x000096a8, 0x000096a8, 0x000096a8 },
2102 { 0x00009b08, 0x0000d788, 0x0000d788, 0x00009710, 0x00009710, 0x00009710 },
2103 { 0x00009b0c, 0x0000d78c, 0x0000d78c, 0x00009714, 0x00009714, 0x00009714 },
2104 { 0x00009b10, 0x0000d790, 0x0000d790, 0x00009720, 0x00009720, 0x00009720 },
2105 { 0x00009b14, 0x0000f780, 0x0000f780, 0x00009724, 0x00009724, 0x00009724 },
2106 { 0x00009b18, 0x0000f784, 0x0000f784, 0x00009728, 0x00009728, 0x00009728 },
2107 { 0x00009b1c, 0x0000f788, 0x0000f788, 0x0000972c, 0x0000972c, 0x0000972c },
2108 { 0x00009b20, 0x0000f78c, 0x0000f78c, 0x000097a0, 0x000097a0, 0x000097a0 },
2109 { 0x00009b24, 0x0000f790, 0x0000f790, 0x000097a4, 0x000097a4, 0x000097a4 },
2110 { 0x00009b28, 0x0000f794, 0x0000f794, 0x000097a8, 0x000097a8, 0x000097a8 },
2111 { 0x00009b2c, 0x0000f7a4, 0x0000f7a4, 0x000097b0, 0x000097b0, 0x000097b0 },
2112 { 0x00009b30, 0x0000f7a8, 0x0000f7a8, 0x000097b4, 0x000097b4, 0x000097b4 },
2113 { 0x00009b34, 0x0000f7ac, 0x0000f7ac, 0x000097b8, 0x000097b8, 0x000097b8 },
2114 { 0x00009b38, 0x0000f7b0, 0x0000f7b0, 0x000097a5, 0x000097a5, 0x000097a5 },
2115 { 0x00009b3c, 0x0000f7b4, 0x0000f7b4, 0x000097a9, 0x000097a9, 0x000097a9 },
2116 { 0x00009b40, 0x0000f7a1, 0x0000f7a1, 0x000097ad, 0x000097ad, 0x000097ad },
2117 { 0x00009b44, 0x0000f7a5, 0x0000f7a5, 0x000097b1, 0x000097b1, 0x000097b1 },
2118 { 0x00009b48, 0x0000f7a9, 0x0000f7a9, 0x000097b5, 0x000097b5, 0x000097b5 },
2119 { 0x00009b4c, 0x0000f7ad, 0x0000f7ad, 0x000097b9, 0x000097b9, 0x000097b9 },
2120 { 0x00009b50, 0x0000f7b1, 0x0000f7b1, 0x000097c5, 0x000097c5, 0x000097c5 },
2121 { 0x00009b54, 0x0000f7b5, 0x0000f7b5, 0x000097c9, 0x000097c9, 0x000097c9 },
2122 { 0x00009b58, 0x0000f7c5, 0x0000f7c5, 0x000097d1, 0x000097d1, 0x000097d1 },
2123 { 0x00009b5c, 0x0000f7c9, 0x0000f7c9, 0x000097d5, 0x000097d5, 0x000097d5 },
2124 { 0x00009b60, 0x0000f7cd, 0x0000f7cd, 0x000097d9, 0x000097d9, 0x000097d9 },
2125 { 0x00009b64, 0x0000f7d1, 0x0000f7d1, 0x000097c6, 0x000097c6, 0x000097c6 },
2126 { 0x00009b68, 0x0000f7d5, 0x0000f7d5, 0x000097ca, 0x000097ca, 0x000097ca },
2127 { 0x00009b6c, 0x0000f7c2, 0x0000f7c2, 0x000097ce, 0x000097ce, 0x000097ce },
2128 { 0x00009b70, 0x0000f7c6, 0x0000f7c6, 0x000097d2, 0x000097d2, 0x000097d2 },
2129 { 0x00009b74, 0x0000f7ca, 0x0000f7ca, 0x000097d6, 0x000097d6, 0x000097d6 },
2130 { 0x00009b78, 0x0000f7ce, 0x0000f7ce, 0x000097c3, 0x000097c3, 0x000097c3 },
2131 { 0x00009b7c, 0x0000f7d2, 0x0000f7d2, 0x000097c7, 0x000097c7, 0x000097c7 },
2132 { 0x00009b80, 0x0000f7d6, 0x0000f7d6, 0x000097cb, 0x000097cb, 0x000097cb },
2133 { 0x00009b84, 0x0000f7c3, 0x0000f7c3, 0x000097cf, 0x000097cf, 0x000097cf },
2134 { 0x00009b88, 0x0000f7c7, 0x0000f7c7, 0x000097d7, 0x000097d7, 0x000097d7 },
2135 { 0x00009b8c, 0x0000f7cb, 0x0000f7cb, 0x000097db, 0x000097db, 0x000097db },
2136 { 0x00009b90, 0x0000f7d3, 0x0000f7d3, 0x000097db, 0x000097db, 0x000097db },
2137 { 0x00009b94, 0x0000f7d7, 0x0000f7d7, 0x000097db, 0x000097db, 0x000097db },
2138 { 0x00009b98, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2139 { 0x00009b9c, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2140 { 0x00009ba0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2141 { 0x00009ba4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2142 { 0x00009ba8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2143 { 0x00009bac, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2144 { 0x00009bb0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2145 { 0x00009bb4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2146 { 0x00009bb8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2147 { 0x00009bbc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2148 { 0x00009bc0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2149 { 0x00009bc4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2150 { 0x00009bc8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2151 { 0x00009bcc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2152 { 0x00009bd0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2153 { 0x00009bd4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2154 { 0x00009bd8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2155 { 0x00009bdc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2156 { 0x00009be0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2157 { 0x00009be4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2158 { 0x00009be8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2159 { 0x00009bec, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2160 { 0x00009bf0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2161 { 0x00009bf4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2162 { 0x00009bf8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2163 { 0x00009bfc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2164 { 0x0000a204, 0x00000444, 0x00000444, 0x00000444, 0x00000444, 0x00000444 },
2165 { 0x0000a208, 0x803e4788, 0x803e4788, 0x803e4788, 0x803e4788, 0x803e4788 },
2166 { 0x0000a20c, 0x000c6019, 0x000c6019, 0x000c6019, 0x000c6019, 0x000c6019 },
2167 { 0x0000b20c, 0x000c6019, 0x000c6019, 0x000c6019, 0x000c6019, 0x000c6019 },
2168 { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
2169 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
2170 { 0x0000a274, 0x0a19c652, 0x0a19c652, 0x0a1aa652, 0x0a1aa652, 0x0a1aa652 },
2171 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
2172 { 0x0000a304, 0x00003002, 0x00003002, 0x00003002, 0x00003002, 0x00003002 },
2173 { 0x0000a308, 0x00006004, 0x00006004, 0x00008009, 0x00008009, 0x00008009 },
2174 { 0x0000a30c, 0x0000a006, 0x0000a006, 0x0000b00b, 0x0000b00b, 0x0000b00b },
2175 { 0x0000a310, 0x0000e012, 0x0000e012, 0x0000e012, 0x0000e012, 0x0000e012 },
2176 { 0x0000a314, 0x00011014, 0x00011014, 0x00012048, 0x00012048, 0x00012048 },
2177 { 0x0000a318, 0x0001504a, 0x0001504a, 0x0001604a, 0x0001604a, 0x0001604a },
2178 { 0x0000a31c, 0x0001904c, 0x0001904c, 0x0001a211, 0x0001a211, 0x0001a211 },
2179 { 0x0000a320, 0x0001c04e, 0x0001c04e, 0x0001e213, 0x0001e213, 0x0001e213 },
2180 { 0x0000a324, 0x00020092, 0x00020092, 0x0002121b, 0x0002121b, 0x0002121b },
2181 { 0x0000a328, 0x0002410a, 0x0002410a, 0x00024412, 0x00024412, 0x00024412 },
2182 { 0x0000a32c, 0x0002710c, 0x0002710c, 0x00028414, 0x00028414, 0x00028414 },
2183 { 0x0000a330, 0x0002b18b, 0x0002b18b, 0x0002b44a, 0x0002b44a, 0x0002b44a },
2184 { 0x0000a334, 0x0002e1cc, 0x0002e1cc, 0x00030649, 0x00030649, 0x00030649 },
2185 { 0x0000a338, 0x000321ec, 0x000321ec, 0x0003364b, 0x0003364b, 0x0003364b },
2186 { 0x0000a33c, 0x000321ec, 0x000321ec, 0x00038a49, 0x00038a49, 0x00038a49 },
2187 { 0x0000a340, 0x000321ec, 0x000321ec, 0x0003be48, 0x0003be48, 0x0003be48 },
2188 { 0x0000a344, 0x000321ec, 0x000321ec, 0x0003ee4a, 0x0003ee4a, 0x0003ee4a },
2189 { 0x0000a348, 0x000321ec, 0x000321ec, 0x00042e88, 0x00042e88, 0x00042e88 },
2190 { 0x0000a34c, 0x000321ec, 0x000321ec, 0x00046e8a, 0x00046e8a, 0x00046e8a },
2191 { 0x0000a350, 0x000321ec, 0x000321ec, 0x00049ec9, 0x00049ec9, 0x00049ec9 },
2192 { 0x0000a354, 0x000321ec, 0x000321ec, 0x0004bf42, 0x0004bf42, 0x0004bf42 },
2193 { 0x0000784c, 0x0e4f048c, 0x0e4f048c, 0x0e4d048c, 0x0e4d048c, 0x0e4d048c },
2194 { 0x00007854, 0x12031828, 0x12031828, 0x12035828, 0x12035828, 0x12035828 },
2195 { 0x00007870, 0x807ec400, 0x807ec400, 0x807ec000, 0x807ec000, 0x807ec000 },
2196 { 0x0000788c, 0x00010000, 0x00010000, 0x00110000, 0x00110000, 0x00110000 },
2197};
2198
2199static const u32 ar9280Common_9280[][2] = {
2200 { 0x0000000c, 0x00000000 },
2201 { 0x00000030, 0x00020015 },
2202 { 0x00000034, 0x00000005 },
2203 { 0x00000040, 0x00000000 },
2204 { 0x00000044, 0x00000008 },
2205 { 0x00000048, 0x00000008 },
2206 { 0x0000004c, 0x00000010 },
2207 { 0x00000050, 0x00000000 },
2208 { 0x00000054, 0x0000001f },
2209 { 0x00000800, 0x00000000 },
2210 { 0x00000804, 0x00000000 },
2211 { 0x00000808, 0x00000000 },
2212 { 0x0000080c, 0x00000000 },
2213 { 0x00000810, 0x00000000 },
2214 { 0x00000814, 0x00000000 },
2215 { 0x00000818, 0x00000000 },
2216 { 0x0000081c, 0x00000000 },
2217 { 0x00000820, 0x00000000 },
2218 { 0x00000824, 0x00000000 },
2219 { 0x00001040, 0x002ffc0f },
2220 { 0x00001044, 0x002ffc0f },
2221 { 0x00001048, 0x002ffc0f },
2222 { 0x0000104c, 0x002ffc0f },
2223 { 0x00001050, 0x002ffc0f },
2224 { 0x00001054, 0x002ffc0f },
2225 { 0x00001058, 0x002ffc0f },
2226 { 0x0000105c, 0x002ffc0f },
2227 { 0x00001060, 0x002ffc0f },
2228 { 0x00001064, 0x002ffc0f },
2229 { 0x00001230, 0x00000000 },
2230 { 0x00001270, 0x00000000 },
2231 { 0x00001038, 0x00000000 },
2232 { 0x00001078, 0x00000000 },
2233 { 0x000010b8, 0x00000000 },
2234 { 0x000010f8, 0x00000000 },
2235 { 0x00001138, 0x00000000 },
2236 { 0x00001178, 0x00000000 },
2237 { 0x000011b8, 0x00000000 },
2238 { 0x000011f8, 0x00000000 },
2239 { 0x00001238, 0x00000000 },
2240 { 0x00001278, 0x00000000 },
2241 { 0x000012b8, 0x00000000 },
2242 { 0x000012f8, 0x00000000 },
2243 { 0x00001338, 0x00000000 },
2244 { 0x00001378, 0x00000000 },
2245 { 0x000013b8, 0x00000000 },
2246 { 0x000013f8, 0x00000000 },
2247 { 0x00001438, 0x00000000 },
2248 { 0x00001478, 0x00000000 },
2249 { 0x000014b8, 0x00000000 },
2250 { 0x000014f8, 0x00000000 },
2251 { 0x00001538, 0x00000000 },
2252 { 0x00001578, 0x00000000 },
2253 { 0x000015b8, 0x00000000 },
2254 { 0x000015f8, 0x00000000 },
2255 { 0x00001638, 0x00000000 },
2256 { 0x00001678, 0x00000000 },
2257 { 0x000016b8, 0x00000000 },
2258 { 0x000016f8, 0x00000000 },
2259 { 0x00001738, 0x00000000 },
2260 { 0x00001778, 0x00000000 },
2261 { 0x000017b8, 0x00000000 },
2262 { 0x000017f8, 0x00000000 },
2263 { 0x0000103c, 0x00000000 },
2264 { 0x0000107c, 0x00000000 },
2265 { 0x000010bc, 0x00000000 },
2266 { 0x000010fc, 0x00000000 },
2267 { 0x0000113c, 0x00000000 },
2268 { 0x0000117c, 0x00000000 },
2269 { 0x000011bc, 0x00000000 },
2270 { 0x000011fc, 0x00000000 },
2271 { 0x0000123c, 0x00000000 },
2272 { 0x0000127c, 0x00000000 },
2273 { 0x000012bc, 0x00000000 },
2274 { 0x000012fc, 0x00000000 },
2275 { 0x0000133c, 0x00000000 },
2276 { 0x0000137c, 0x00000000 },
2277 { 0x000013bc, 0x00000000 },
2278 { 0x000013fc, 0x00000000 },
2279 { 0x0000143c, 0x00000000 },
2280 { 0x0000147c, 0x00000000 },
2281 { 0x00004030, 0x00000002 },
2282 { 0x0000403c, 0x00000002 },
2283 { 0x00004024, 0x0000001f },
2284 { 0x00007010, 0x00000033 },
2285 { 0x00007038, 0x000004c2 },
2286 { 0x00008004, 0x00000000 },
2287 { 0x00008008, 0x00000000 },
2288 { 0x0000800c, 0x00000000 },
2289 { 0x00008018, 0x00000700 },
2290 { 0x00008020, 0x00000000 },
2291 { 0x00008038, 0x00000000 },
2292 { 0x0000803c, 0x00000000 },
2293 { 0x00008048, 0x40000000 },
2294 { 0x00008054, 0x00000000 },
2295 { 0x00008058, 0x00000000 },
2296 { 0x0000805c, 0x000fc78f },
2297 { 0x00008060, 0x0000000f },
2298 { 0x00008064, 0x00000000 },
2299 { 0x00008070, 0x00000000 },
2300 { 0x000080c0, 0x2a82301a },
2301 { 0x000080c4, 0x05dc01e0 },
2302 { 0x000080c8, 0x1f402710 },
2303 { 0x000080cc, 0x01f40000 },
2304 { 0x000080d0, 0x00001e00 },
2305 { 0x000080d4, 0x00000000 },
2306 { 0x000080d8, 0x00400000 },
2307 { 0x000080e0, 0xffffffff },
2308 { 0x000080e4, 0x0000ffff },
2309 { 0x000080e8, 0x003f3f3f },
2310 { 0x000080ec, 0x00000000 },
2311 { 0x000080f0, 0x00000000 },
2312 { 0x000080f4, 0x00000000 },
2313 { 0x000080f8, 0x00000000 },
2314 { 0x000080fc, 0x00020000 },
2315 { 0x00008100, 0x00020000 },
2316 { 0x00008104, 0x00000001 },
2317 { 0x00008108, 0x00000052 },
2318 { 0x0000810c, 0x00000000 },
2319 { 0x00008110, 0x00000168 },
2320 { 0x00008118, 0x000100aa },
2321 { 0x0000811c, 0x00003210 },
2322 { 0x00008120, 0x08f04800 },
2323 { 0x00008124, 0x00000000 },
2324 { 0x00008128, 0x00000000 },
2325 { 0x0000812c, 0x00000000 },
2326 { 0x00008130, 0x00000000 },
2327 { 0x00008134, 0x00000000 },
2328 { 0x00008138, 0x00000000 },
2329 { 0x0000813c, 0x00000000 },
2330 { 0x00008144, 0x00000000 },
2331 { 0x00008168, 0x00000000 },
2332 { 0x0000816c, 0x00000000 },
2333 { 0x00008170, 0x32143320 },
2334 { 0x00008174, 0xfaa4fa50 },
2335 { 0x00008178, 0x00000100 },
2336 { 0x0000817c, 0x00000000 },
2337 { 0x000081c4, 0x00000000 },
2338 { 0x000081d0, 0x00003210 },
2339 { 0x000081ec, 0x00000000 },
2340 { 0x000081f0, 0x00000000 },
2341 { 0x000081f4, 0x00000000 },
2342 { 0x000081f8, 0x00000000 },
2343 { 0x000081fc, 0x00000000 },
2344 { 0x00008200, 0x00000000 },
2345 { 0x00008204, 0x00000000 },
2346 { 0x00008208, 0x00000000 },
2347 { 0x0000820c, 0x00000000 },
2348 { 0x00008210, 0x00000000 },
2349 { 0x00008214, 0x00000000 },
2350 { 0x00008218, 0x00000000 },
2351 { 0x0000821c, 0x00000000 },
2352 { 0x00008220, 0x00000000 },
2353 { 0x00008224, 0x00000000 },
2354 { 0x00008228, 0x00000000 },
2355 { 0x0000822c, 0x00000000 },
2356 { 0x00008230, 0x00000000 },
2357 { 0x00008234, 0x00000000 },
2358 { 0x00008238, 0x00000000 },
2359 { 0x0000823c, 0x00000000 },
2360 { 0x00008240, 0x00100000 },
2361 { 0x00008244, 0x0010f400 },
2362 { 0x00008248, 0x00000100 },
2363 { 0x0000824c, 0x0001e800 },
2364 { 0x00008250, 0x00000000 },
2365 { 0x00008254, 0x00000000 },
2366 { 0x00008258, 0x00000000 },
2367 { 0x0000825c, 0x400000ff },
2368 { 0x00008260, 0x00080922 },
2369 { 0x00008270, 0x00000000 },
2370 { 0x00008274, 0x40000000 },
2371 { 0x00008278, 0x003e4180 },
2372 { 0x0000827c, 0x00000000 },
2373 { 0x00008284, 0x0000002c },
2374 { 0x00008288, 0x0000002c },
2375 { 0x0000828c, 0x00000000 },
2376 { 0x00008294, 0x00000000 },
2377 { 0x00008298, 0x00000000 },
2378 { 0x00008300, 0x00000000 },
2379 { 0x00008304, 0x00000000 },
2380 { 0x00008308, 0x00000000 },
2381 { 0x0000830c, 0x00000000 },
2382 { 0x00008310, 0x00000000 },
2383 { 0x00008314, 0x00000000 },
2384 { 0x00008318, 0x00000000 },
2385 { 0x00008328, 0x00000000 },
2386 { 0x0000832c, 0x00000007 },
2387 { 0x00008330, 0x00000302 },
2388 { 0x00008334, 0x00000e00 },
2389 { 0x00008338, 0x00000000 },
2390 { 0x0000833c, 0x00000000 },
2391 { 0x00008340, 0x000107ff },
2392 { 0x00008344, 0x00000000 },
2393 { 0x00009808, 0x00000000 },
2394 { 0x0000980c, 0xaf268e30 },
2395 { 0x00009810, 0xfd14e000 },
2396 { 0x00009814, 0x9c0a9f6b },
2397 { 0x0000981c, 0x00000000 },
2398 { 0x0000982c, 0x0000a000 },
2399 { 0x00009830, 0x00000000 },
2400 { 0x0000983c, 0x00200400 },
2401 { 0x00009840, 0x206a01ae },
2402 { 0x0000984c, 0x0040233c },
2403 { 0x0000a84c, 0x0040233c },
2404 { 0x00009854, 0x00000044 },
2405 { 0x00009900, 0x00000000 },
2406 { 0x00009904, 0x00000000 },
2407 { 0x00009908, 0x00000000 },
2408 { 0x0000990c, 0x00000000 },
2409 { 0x0000991c, 0x10000fff },
2410 { 0x00009920, 0x04900000 },
2411 { 0x0000a920, 0x04900000 },
2412 { 0x00009928, 0x00000001 },
2413 { 0x0000992c, 0x00000004 },
2414 { 0x00009934, 0x1e1f2022 },
2415 { 0x00009938, 0x0a0b0c0d },
2416 { 0x0000993c, 0x00000000 },
2417 { 0x00009948, 0x9280c00a },
2418 { 0x0000994c, 0x00020028 },
2419 { 0x00009954, 0xe250a51e },
2420 { 0x00009958, 0x3388ffff },
2421 { 0x00009940, 0x00781204 },
2422 { 0x0000c95c, 0x004b6a8e },
2423 { 0x0000c968, 0x000003ce },
2424 { 0x00009970, 0x190fb514 },
2425 { 0x00009974, 0x00000000 },
2426 { 0x00009978, 0x00000001 },
2427 { 0x0000997c, 0x00000000 },
2428 { 0x00009980, 0x00000000 },
2429 { 0x00009984, 0x00000000 },
2430 { 0x00009988, 0x00000000 },
2431 { 0x0000998c, 0x00000000 },
2432 { 0x00009990, 0x00000000 },
2433 { 0x00009994, 0x00000000 },
2434 { 0x00009998, 0x00000000 },
2435 { 0x0000999c, 0x00000000 },
2436 { 0x000099a0, 0x00000000 },
2437 { 0x000099a4, 0x00000001 },
2438 { 0x000099a8, 0x201fff00 },
2439 { 0x000099ac, 0x006f00c4 },
2440 { 0x000099b0, 0x03051000 },
2441 { 0x000099b4, 0x00000820 },
2442 { 0x000099dc, 0x00000000 },
2443 { 0x000099e0, 0x00000000 },
2444 { 0x000099e4, 0xaaaaaaaa },
2445 { 0x000099e8, 0x3c466478 },
2446 { 0x000099ec, 0x0cc80caa },
2447 { 0x000099fc, 0x00001042 },
2448 { 0x0000a210, 0x4080a333 },
2449 { 0x0000a214, 0x40206c10 },
2450 { 0x0000a218, 0x009c4060 },
2451 { 0x0000a220, 0x01834061 },
2452 { 0x0000a224, 0x00000400 },
2453 { 0x0000a228, 0x000003b5 },
2454 { 0x0000a22c, 0x23277200 },
2455 { 0x0000a234, 0x20202020 },
2456 { 0x0000a238, 0x20202020 },
2457 { 0x0000a23c, 0x13c889af },
2458 { 0x0000a240, 0x38490a20 },
2459 { 0x0000a244, 0x00007bb6 },
2460 { 0x0000a248, 0x0fff3ffc },
2461 { 0x0000a24c, 0x00000001 },
2462 { 0x0000a250, 0x001da000 },
2463 { 0x0000a254, 0x00000000 },
2464 { 0x0000a258, 0x0cdbd380 },
2465 { 0x0000a25c, 0x0f0f0f01 },
2466 { 0x0000a260, 0xdfa91f01 },
2467 { 0x0000a268, 0x00000000 },
2468 { 0x0000a26c, 0x0ebae9c6 },
2469 { 0x0000b26c, 0x0ebae9c6 },
2470 { 0x0000d270, 0x00820820 },
2471 { 0x0000a278, 0x1ce739ce },
2472 { 0x0000a27c, 0x050701ce },
2473 { 0x0000a358, 0x7999aa0f },
2474 { 0x0000d35c, 0x07ffffef },
2475 { 0x0000d360, 0x0fffffe7 },
2476 { 0x0000d364, 0x17ffffe5 },
2477 { 0x0000d368, 0x1fffffe4 },
2478 { 0x0000d36c, 0x37ffffe3 },
2479 { 0x0000d370, 0x3fffffe3 },
2480 { 0x0000d374, 0x57ffffe3 },
2481 { 0x0000d378, 0x5fffffe2 },
2482 { 0x0000d37c, 0x7fffffe2 },
2483 { 0x0000d380, 0x7f3c7bba },
2484 { 0x0000d384, 0xf3307ff0 },
2485 { 0x0000a388, 0x0c000000 },
2486 { 0x0000a38c, 0x20202020 },
2487 { 0x0000a390, 0x20202020 },
2488 { 0x0000a394, 0x1ce739ce },
2489 { 0x0000a398, 0x000001ce },
2490 { 0x0000a39c, 0x00000001 },
2491 { 0x0000a3a0, 0x00000000 },
2492 { 0x0000a3a4, 0x00000000 },
2493 { 0x0000a3a8, 0x00000000 },
2494 { 0x0000a3ac, 0x00000000 },
2495 { 0x0000a3b0, 0x00000000 },
2496 { 0x0000a3b4, 0x00000000 },
2497 { 0x0000a3b8, 0x00000000 },
2498 { 0x0000a3bc, 0x00000000 },
2499 { 0x0000a3c0, 0x00000000 },
2500 { 0x0000a3c4, 0x00000000 },
2501 { 0x0000a3c8, 0x00000246 },
2502 { 0x0000a3cc, 0x20202020 },
2503 { 0x0000a3d0, 0x20202020 },
2504 { 0x0000a3d4, 0x20202020 },
2505 { 0x0000a3dc, 0x1ce739ce },
2506 { 0x0000a3e0, 0x000001ce },
2507 { 0x0000a3e4, 0x00000000 },
2508 { 0x0000a3e8, 0x18c43433 },
2509 { 0x0000a3ec, 0x00f38081 },
2510 { 0x00007800, 0x00040000 },
2511 { 0x00007804, 0xdb005012 },
2512 { 0x00007808, 0x04924914 },
2513 { 0x0000780c, 0x21084210 },
2514 { 0x00007810, 0x6d801300 },
2515 { 0x00007814, 0x0019beff },
2516 { 0x00007818, 0x07e40000 },
2517 { 0x0000781c, 0x00492000 },
2518 { 0x00007820, 0x92492480 },
2519 { 0x00007824, 0x00040000 },
2520 { 0x00007828, 0xdb005012 },
2521 { 0x0000782c, 0x04924914 },
2522 { 0x00007830, 0x21084210 },
2523 { 0x00007834, 0x6d801300 },
2524 { 0x00007838, 0x0019beff },
2525 { 0x0000783c, 0x07e40000 },
2526 { 0x00007840, 0x00492000 },
2527 { 0x00007844, 0x92492480 },
2528 { 0x00007848, 0x00120000 },
2529 { 0x00007850, 0x54214514 },
2530 { 0x00007858, 0x92592692 },
2531 { 0x00007860, 0x52802000 },
2532 { 0x00007864, 0x0a8e370e },
2533 { 0x00007868, 0xc0102850 },
2534 { 0x0000786c, 0x812d4000 },
2535 { 0x00007874, 0x001b6db0 },
2536 { 0x00007878, 0x00376b63 },
2537 { 0x0000787c, 0x06db6db6 },
2538 { 0x00007880, 0x006d8000 },
2539 { 0x00007884, 0xffeffffe },
2540 { 0x00007888, 0xffeffffe },
2541 { 0x00007890, 0x00060aeb },
2542 { 0x00007894, 0x5a108000 },
2543 { 0x00007898, 0x2a850160 },
2544};
2545
2546
2547
2548
2549static const u32 ar9280Modes_9280_2[][6] = {
2550 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
2551 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
2552 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
2553 { 0x000010f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000008 },
2554 { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
2555 { 0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b, 0x0988004f },
2556 { 0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440, 0x00006880 },
2557 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
2558 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
2559 { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
2560 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
2561 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
2562 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
2563 { 0x00009840, 0x206a012e, 0x206a012e, 0x206a022e, 0x206a022e, 0x206a022e },
2564 { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 },
2565 { 0x00009848, 0x00001066, 0x00001066, 0x00001063, 0x00001063, 0x00001063 },
2566 { 0x0000a848, 0x00001066, 0x00001066, 0x00001063, 0x00001063, 0x00001063 },
2567 { 0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2 },
2568 { 0x00009858, 0x7ec84d2e, 0x7ec84d2e, 0x7ec88d2e, 0x7ec88d2e, 0x7ec88d2e },
2569 { 0x0000985c, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e },
2570 { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 },
2571 { 0x0000c864, 0x0000fe00, 0x0000fe00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
2572 { 0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0 },
2573 { 0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881, 0x06903881 },
2574 { 0x00009914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, 0x000007d0 },
2575 { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
2576 { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d },
2577 { 0x00009944, 0xdfbc1010, 0xdfbc1010, 0xdfbc1010, 0xdfbc1010, 0xdfbc1010 },
2578 { 0x00009960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 },
2579 { 0x0000a960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 },
2580 { 0x00009964, 0x00000210, 0x00000210, 0x00000210, 0x00000210, 0x00000210 },
2581 { 0x0000c9b8, 0x0000000f, 0x0000000f, 0x0000001c, 0x0000001c, 0x0000001c },
2582 { 0x0000c9bc, 0x00000600, 0x00000600, 0x00000c00, 0x00000c00, 0x00000c00 },
2583 { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 },
2584 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
2585 { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 },
2586 { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
2587 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
2588 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
2589 { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
2590 { 0x00009a00, 0x00008184, 0x00008184, 0x00000290, 0x00000290, 0x00000290 },
2591 { 0x00009a04, 0x00008188, 0x00008188, 0x00000300, 0x00000300, 0x00000300 },
2592 { 0x00009a08, 0x0000818c, 0x0000818c, 0x00000304, 0x00000304, 0x00000304 },
2593 { 0x00009a0c, 0x00008190, 0x00008190, 0x00000308, 0x00000308, 0x00000308 },
2594 { 0x00009a10, 0x00008194, 0x00008194, 0x0000030c, 0x0000030c, 0x0000030c },
2595 { 0x00009a14, 0x00008200, 0x00008200, 0x00008000, 0x00008000, 0x00008000 },
2596 { 0x00009a18, 0x00008204, 0x00008204, 0x00008004, 0x00008004, 0x00008004 },
2597 { 0x00009a1c, 0x00008208, 0x00008208, 0x00008008, 0x00008008, 0x00008008 },
2598 { 0x00009a20, 0x0000820c, 0x0000820c, 0x0000800c, 0x0000800c, 0x0000800c },
2599 { 0x00009a24, 0x00008210, 0x00008210, 0x00008080, 0x00008080, 0x00008080 },
2600 { 0x00009a28, 0x00008214, 0x00008214, 0x00008084, 0x00008084, 0x00008084 },
2601 { 0x00009a2c, 0x00008280, 0x00008280, 0x00008088, 0x00008088, 0x00008088 },
2602 { 0x00009a30, 0x00008284, 0x00008284, 0x0000808c, 0x0000808c, 0x0000808c },
2603 { 0x00009a34, 0x00008288, 0x00008288, 0x00008100, 0x00008100, 0x00008100 },
2604 { 0x00009a38, 0x0000828c, 0x0000828c, 0x00008104, 0x00008104, 0x00008104 },
2605 { 0x00009a3c, 0x00008290, 0x00008290, 0x00008108, 0x00008108, 0x00008108 },
2606 { 0x00009a40, 0x00008300, 0x00008300, 0x0000810c, 0x0000810c, 0x0000810c },
2607 { 0x00009a44, 0x00008304, 0x00008304, 0x00008110, 0x00008110, 0x00008110 },
2608 { 0x00009a48, 0x00008308, 0x00008308, 0x00008114, 0x00008114, 0x00008114 },
2609 { 0x00009a4c, 0x0000830c, 0x0000830c, 0x00008180, 0x00008180, 0x00008180 },
2610 { 0x00009a50, 0x00008310, 0x00008310, 0x00008184, 0x00008184, 0x00008184 },
2611 { 0x00009a54, 0x00008314, 0x00008314, 0x00008188, 0x00008188, 0x00008188 },
2612 { 0x00009a58, 0x00008380, 0x00008380, 0x0000818c, 0x0000818c, 0x0000818c },
2613 { 0x00009a5c, 0x00008384, 0x00008384, 0x00008190, 0x00008190, 0x00008190 },
2614 { 0x00009a60, 0x00008388, 0x00008388, 0x00008194, 0x00008194, 0x00008194 },
2615 { 0x00009a64, 0x0000838c, 0x0000838c, 0x000081a0, 0x000081a0, 0x000081a0 },
2616 { 0x00009a68, 0x00008390, 0x00008390, 0x0000820c, 0x0000820c, 0x0000820c },
2617 { 0x00009a6c, 0x00008394, 0x00008394, 0x000081a8, 0x000081a8, 0x000081a8 },
2618 { 0x00009a70, 0x0000a380, 0x0000a380, 0x00008284, 0x00008284, 0x00008284 },
2619 { 0x00009a74, 0x0000a384, 0x0000a384, 0x00008288, 0x00008288, 0x00008288 },
2620 { 0x00009a78, 0x0000a388, 0x0000a388, 0x00008224, 0x00008224, 0x00008224 },
2621 { 0x00009a7c, 0x0000a38c, 0x0000a38c, 0x00008290, 0x00008290, 0x00008290 },
2622 { 0x00009a80, 0x0000a390, 0x0000a390, 0x00008300, 0x00008300, 0x00008300 },
2623 { 0x00009a84, 0x0000a394, 0x0000a394, 0x00008304, 0x00008304, 0x00008304 },
2624 { 0x00009a88, 0x0000a780, 0x0000a780, 0x00008308, 0x00008308, 0x00008308 },
2625 { 0x00009a8c, 0x0000a784, 0x0000a784, 0x0000830c, 0x0000830c, 0x0000830c },
2626 { 0x00009a90, 0x0000a788, 0x0000a788, 0x00008380, 0x00008380, 0x00008380 },
2627 { 0x00009a94, 0x0000a78c, 0x0000a78c, 0x00008384, 0x00008384, 0x00008384 },
2628 { 0x00009a98, 0x0000a790, 0x0000a790, 0x00008700, 0x00008700, 0x00008700 },
2629 { 0x00009a9c, 0x0000a794, 0x0000a794, 0x00008704, 0x00008704, 0x00008704 },
2630 { 0x00009aa0, 0x0000ab84, 0x0000ab84, 0x00008708, 0x00008708, 0x00008708 },
2631 { 0x00009aa4, 0x0000ab88, 0x0000ab88, 0x0000870c, 0x0000870c, 0x0000870c },
2632 { 0x00009aa8, 0x0000ab8c, 0x0000ab8c, 0x00008780, 0x00008780, 0x00008780 },
2633 { 0x00009aac, 0x0000ab90, 0x0000ab90, 0x00008784, 0x00008784, 0x00008784 },
2634 { 0x00009ab0, 0x0000ab94, 0x0000ab94, 0x00008b00, 0x00008b00, 0x00008b00 },
2635 { 0x00009ab4, 0x0000af80, 0x0000af80, 0x00008b04, 0x00008b04, 0x00008b04 },
2636 { 0x00009ab8, 0x0000af84, 0x0000af84, 0x00008b08, 0x00008b08, 0x00008b08 },
2637 { 0x00009abc, 0x0000af88, 0x0000af88, 0x00008b0c, 0x00008b0c, 0x00008b0c },
2638 { 0x00009ac0, 0x0000af8c, 0x0000af8c, 0x00008b80, 0x00008b80, 0x00008b80 },
2639 { 0x00009ac4, 0x0000af90, 0x0000af90, 0x00008b84, 0x00008b84, 0x00008b84 },
2640 { 0x00009ac8, 0x0000af94, 0x0000af94, 0x00008b88, 0x00008b88, 0x00008b88 },
2641 { 0x00009acc, 0x0000b380, 0x0000b380, 0x00008b8c, 0x00008b8c, 0x00008b8c },
2642 { 0x00009ad0, 0x0000b384, 0x0000b384, 0x00008b90, 0x00008b90, 0x00008b90 },
2643 { 0x00009ad4, 0x0000b388, 0x0000b388, 0x00008f80, 0x00008f80, 0x00008f80 },
2644 { 0x00009ad8, 0x0000b38c, 0x0000b38c, 0x00008f84, 0x00008f84, 0x00008f84 },
2645 { 0x00009adc, 0x0000b390, 0x0000b390, 0x00008f88, 0x00008f88, 0x00008f88 },
2646 { 0x00009ae0, 0x0000b394, 0x0000b394, 0x00008f8c, 0x00008f8c, 0x00008f8c },
2647 { 0x00009ae4, 0x0000b398, 0x0000b398, 0x00008f90, 0x00008f90, 0x00008f90 },
2648 { 0x00009ae8, 0x0000b780, 0x0000b780, 0x0000930c, 0x0000930c, 0x0000930c },
2649 { 0x00009aec, 0x0000b784, 0x0000b784, 0x00009310, 0x00009310, 0x00009310 },
2650 { 0x00009af0, 0x0000b788, 0x0000b788, 0x00009384, 0x00009384, 0x00009384 },
2651 { 0x00009af4, 0x0000b78c, 0x0000b78c, 0x00009388, 0x00009388, 0x00009388 },
2652 { 0x00009af8, 0x0000b790, 0x0000b790, 0x00009324, 0x00009324, 0x00009324 },
2653 { 0x00009afc, 0x0000b794, 0x0000b794, 0x00009704, 0x00009704, 0x00009704 },
2654 { 0x00009b00, 0x0000b798, 0x0000b798, 0x000096a4, 0x000096a4, 0x000096a4 },
2655 { 0x00009b04, 0x0000d784, 0x0000d784, 0x000096a8, 0x000096a8, 0x000096a8 },
2656 { 0x00009b08, 0x0000d788, 0x0000d788, 0x00009710, 0x00009710, 0x00009710 },
2657 { 0x00009b0c, 0x0000d78c, 0x0000d78c, 0x00009714, 0x00009714, 0x00009714 },
2658 { 0x00009b10, 0x0000d790, 0x0000d790, 0x00009720, 0x00009720, 0x00009720 },
2659 { 0x00009b14, 0x0000f780, 0x0000f780, 0x00009724, 0x00009724, 0x00009724 },
2660 { 0x00009b18, 0x0000f784, 0x0000f784, 0x00009728, 0x00009728, 0x00009728 },
2661 { 0x00009b1c, 0x0000f788, 0x0000f788, 0x0000972c, 0x0000972c, 0x0000972c },
2662 { 0x00009b20, 0x0000f78c, 0x0000f78c, 0x000097a0, 0x000097a0, 0x000097a0 },
2663 { 0x00009b24, 0x0000f790, 0x0000f790, 0x000097a4, 0x000097a4, 0x000097a4 },
2664 { 0x00009b28, 0x0000f794, 0x0000f794, 0x000097a8, 0x000097a8, 0x000097a8 },
2665 { 0x00009b2c, 0x0000f7a4, 0x0000f7a4, 0x000097b0, 0x000097b0, 0x000097b0 },
2666 { 0x00009b30, 0x0000f7a8, 0x0000f7a8, 0x000097b4, 0x000097b4, 0x000097b4 },
2667 { 0x00009b34, 0x0000f7ac, 0x0000f7ac, 0x000097b8, 0x000097b8, 0x000097b8 },
2668 { 0x00009b38, 0x0000f7b0, 0x0000f7b0, 0x000097a5, 0x000097a5, 0x000097a5 },
2669 { 0x00009b3c, 0x0000f7b4, 0x0000f7b4, 0x000097a9, 0x000097a9, 0x000097a9 },
2670 { 0x00009b40, 0x0000f7a1, 0x0000f7a1, 0x000097ad, 0x000097ad, 0x000097ad },
2671 { 0x00009b44, 0x0000f7a5, 0x0000f7a5, 0x000097b1, 0x000097b1, 0x000097b1 },
2672 { 0x00009b48, 0x0000f7a9, 0x0000f7a9, 0x000097b5, 0x000097b5, 0x000097b5 },
2673 { 0x00009b4c, 0x0000f7ad, 0x0000f7ad, 0x000097b9, 0x000097b9, 0x000097b9 },
2674 { 0x00009b50, 0x0000f7b1, 0x0000f7b1, 0x000097c5, 0x000097c5, 0x000097c5 },
2675 { 0x00009b54, 0x0000f7b5, 0x0000f7b5, 0x000097c9, 0x000097c9, 0x000097c9 },
2676 { 0x00009b58, 0x0000f7c5, 0x0000f7c5, 0x000097d1, 0x000097d1, 0x000097d1 },
2677 { 0x00009b5c, 0x0000f7c9, 0x0000f7c9, 0x000097d5, 0x000097d5, 0x000097d5 },
2678 { 0x00009b60, 0x0000f7cd, 0x0000f7cd, 0x000097d9, 0x000097d9, 0x000097d9 },
2679 { 0x00009b64, 0x0000f7d1, 0x0000f7d1, 0x000097c6, 0x000097c6, 0x000097c6 },
2680 { 0x00009b68, 0x0000f7d5, 0x0000f7d5, 0x000097ca, 0x000097ca, 0x000097ca },
2681 { 0x00009b6c, 0x0000f7c2, 0x0000f7c2, 0x000097ce, 0x000097ce, 0x000097ce },
2682 { 0x00009b70, 0x0000f7c6, 0x0000f7c6, 0x000097d2, 0x000097d2, 0x000097d2 },
2683 { 0x00009b74, 0x0000f7ca, 0x0000f7ca, 0x000097d6, 0x000097d6, 0x000097d6 },
2684 { 0x00009b78, 0x0000f7ce, 0x0000f7ce, 0x000097c3, 0x000097c3, 0x000097c3 },
2685 { 0x00009b7c, 0x0000f7d2, 0x0000f7d2, 0x000097c7, 0x000097c7, 0x000097c7 },
2686 { 0x00009b80, 0x0000f7d6, 0x0000f7d6, 0x000097cb, 0x000097cb, 0x000097cb },
2687 { 0x00009b84, 0x0000f7c3, 0x0000f7c3, 0x000097cf, 0x000097cf, 0x000097cf },
2688 { 0x00009b88, 0x0000f7c7, 0x0000f7c7, 0x000097d7, 0x000097d7, 0x000097d7 },
2689 { 0x00009b8c, 0x0000f7cb, 0x0000f7cb, 0x000097db, 0x000097db, 0x000097db },
2690 { 0x00009b90, 0x0000f7d3, 0x0000f7d3, 0x000097db, 0x000097db, 0x000097db },
2691 { 0x00009b94, 0x0000f7d7, 0x0000f7d7, 0x000097db, 0x000097db, 0x000097db },
2692 { 0x00009b98, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2693 { 0x00009b9c, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2694 { 0x00009ba0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2695 { 0x00009ba4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2696 { 0x00009ba8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2697 { 0x00009bac, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2698 { 0x00009bb0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2699 { 0x00009bb4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2700 { 0x00009bb8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2701 { 0x00009bbc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2702 { 0x00009bc0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2703 { 0x00009bc4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2704 { 0x00009bc8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2705 { 0x00009bcc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2706 { 0x00009bd0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2707 { 0x00009bd4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2708 { 0x00009bd8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2709 { 0x00009bdc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2710 { 0x00009be0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2711 { 0x00009be4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2712 { 0x00009be8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2713 { 0x00009bec, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2714 { 0x00009bf0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2715 { 0x00009bf4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2716 { 0x00009bf8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2717 { 0x00009bfc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2718 { 0x0000a204, 0x00000444, 0x00000444, 0x00000444, 0x00000444, 0x00000444 },
2719 { 0x0000a208, 0x803e4788, 0x803e4788, 0x803e4788, 0x803e4788, 0x803e4788 },
2720 { 0x0000a20c, 0x00000014, 0x00000014, 0x0001f019, 0x0001f019, 0x0001f019 },
2721 { 0x0000b20c, 0x00000014, 0x00000014, 0x0001f019, 0x0001f019, 0x0001f019 },
2722 { 0x0000a21c, 0x1463800a, 0x1463800a, 0x1463800a, 0x1463800a, 0x1463800a },
2723 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
2724 { 0x0000a250, 0x001ff000, 0x001ff000, 0x001da000, 0x001da000, 0x001da000 },
2725 { 0x0000a274, 0x0a19c652, 0x0a19c652, 0x0a1aa652, 0x0a1aa652, 0x0a1aa652 },
2726 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
2727 { 0x0000a304, 0x00003002, 0x00003002, 0x00003002, 0x00003002, 0x00003002 },
2728 { 0x0000a308, 0x00006004, 0x00006004, 0x00008009, 0x00008009, 0x00008009 },
2729 { 0x0000a30c, 0x0000a006, 0x0000a006, 0x0000b00b, 0x0000b00b, 0x0000b00b },
2730 { 0x0000a310, 0x0000e012, 0x0000e012, 0x0000e012, 0x0000e012, 0x0000e012 },
2731 { 0x0000a314, 0x00011014, 0x00011014, 0x00012048, 0x00012048, 0x00012048 },
2732 { 0x0000a318, 0x0001504a, 0x0001504a, 0x0001604a, 0x0001604a, 0x0001604a },
2733 { 0x0000a31c, 0x0001904c, 0x0001904c, 0x0001a211, 0x0001a211, 0x0001a211 },
2734 { 0x0000a320, 0x0001c04e, 0x0001c04e, 0x0001e213, 0x0001e213, 0x0001e213 },
2735 { 0x0000a324, 0x00020092, 0x00020092, 0x0002121b, 0x0002121b, 0x0002121b },
2736 { 0x0000a328, 0x0002410a, 0x0002410a, 0x00024412, 0x00024412, 0x00024412 },
2737 { 0x0000a32c, 0x0002710c, 0x0002710c, 0x00028414, 0x00028414, 0x00028414 },
2738 { 0x0000a330, 0x0002b18b, 0x0002b18b, 0x0002b44a, 0x0002b44a, 0x0002b44a },
2739 { 0x0000a334, 0x0002e1cc, 0x0002e1cc, 0x00030649, 0x00030649, 0x00030649 },
2740 { 0x0000a338, 0x000321ec, 0x000321ec, 0x0003364b, 0x0003364b, 0x0003364b },
2741 { 0x0000a33c, 0x000321ec, 0x000321ec, 0x00038a49, 0x00038a49, 0x00038a49 },
2742 { 0x0000a340, 0x000321ec, 0x000321ec, 0x0003be48, 0x0003be48, 0x0003be48 },
2743 { 0x0000a344, 0x000321ec, 0x000321ec, 0x0003ee4a, 0x0003ee4a, 0x0003ee4a },
2744 { 0x0000a348, 0x000321ec, 0x000321ec, 0x00042e88, 0x00042e88, 0x00042e88 },
2745 { 0x0000a34c, 0x000321ec, 0x000321ec, 0x00046e8a, 0x00046e8a, 0x00046e8a },
2746 { 0x0000a350, 0x000321ec, 0x000321ec, 0x00049ec9, 0x00049ec9, 0x00049ec9 },
2747 { 0x0000a354, 0x000321ec, 0x000321ec, 0x0004bf42, 0x0004bf42, 0x0004bf42 },
2748 { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e },
2749 { 0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
2750 { 0x00007894, 0x5a508000, 0x5a508000, 0x5a508000, 0x5a508000, 0x5a508000 },
2751};
2752
2753static const u32 ar9280Common_9280_2[][2] = {
2754 { 0x0000000c, 0x00000000 },
2755 { 0x00000030, 0x00020015 },
2756 { 0x00000034, 0x00000005 },
2757 { 0x00000040, 0x00000000 },
2758 { 0x00000044, 0x00000008 },
2759 { 0x00000048, 0x00000008 },
2760 { 0x0000004c, 0x00000010 },
2761 { 0x00000050, 0x00000000 },
2762 { 0x00000054, 0x0000001f },
2763 { 0x00000800, 0x00000000 },
2764 { 0x00000804, 0x00000000 },
2765 { 0x00000808, 0x00000000 },
2766 { 0x0000080c, 0x00000000 },
2767 { 0x00000810, 0x00000000 },
2768 { 0x00000814, 0x00000000 },
2769 { 0x00000818, 0x00000000 },
2770 { 0x0000081c, 0x00000000 },
2771 { 0x00000820, 0x00000000 },
2772 { 0x00000824, 0x00000000 },
2773 { 0x00001040, 0x002ffc0f },
2774 { 0x00001044, 0x002ffc0f },
2775 { 0x00001048, 0x002ffc0f },
2776 { 0x0000104c, 0x002ffc0f },
2777 { 0x00001050, 0x002ffc0f },
2778 { 0x00001054, 0x002ffc0f },
2779 { 0x00001058, 0x002ffc0f },
2780 { 0x0000105c, 0x002ffc0f },
2781 { 0x00001060, 0x002ffc0f },
2782 { 0x00001064, 0x002ffc0f },
2783 { 0x00001230, 0x00000000 },
2784 { 0x00001270, 0x00000000 },
2785 { 0x00001038, 0x00000000 },
2786 { 0x00001078, 0x00000000 },
2787 { 0x000010b8, 0x00000000 },
2788 { 0x000010f8, 0x00000000 },
2789 { 0x00001138, 0x00000000 },
2790 { 0x00001178, 0x00000000 },
2791 { 0x000011b8, 0x00000000 },
2792 { 0x000011f8, 0x00000000 },
2793 { 0x00001238, 0x00000000 },
2794 { 0x00001278, 0x00000000 },
2795 { 0x000012b8, 0x00000000 },
2796 { 0x000012f8, 0x00000000 },
2797 { 0x00001338, 0x00000000 },
2798 { 0x00001378, 0x00000000 },
2799 { 0x000013b8, 0x00000000 },
2800 { 0x000013f8, 0x00000000 },
2801 { 0x00001438, 0x00000000 },
2802 { 0x00001478, 0x00000000 },
2803 { 0x000014b8, 0x00000000 },
2804 { 0x000014f8, 0x00000000 },
2805 { 0x00001538, 0x00000000 },
2806 { 0x00001578, 0x00000000 },
2807 { 0x000015b8, 0x00000000 },
2808 { 0x000015f8, 0x00000000 },
2809 { 0x00001638, 0x00000000 },
2810 { 0x00001678, 0x00000000 },
2811 { 0x000016b8, 0x00000000 },
2812 { 0x000016f8, 0x00000000 },
2813 { 0x00001738, 0x00000000 },
2814 { 0x00001778, 0x00000000 },
2815 { 0x000017b8, 0x00000000 },
2816 { 0x000017f8, 0x00000000 },
2817 { 0x0000103c, 0x00000000 },
2818 { 0x0000107c, 0x00000000 },
2819 { 0x000010bc, 0x00000000 },
2820 { 0x000010fc, 0x00000000 },
2821 { 0x0000113c, 0x00000000 },
2822 { 0x0000117c, 0x00000000 },
2823 { 0x000011bc, 0x00000000 },
2824 { 0x000011fc, 0x00000000 },
2825 { 0x0000123c, 0x00000000 },
2826 { 0x0000127c, 0x00000000 },
2827 { 0x000012bc, 0x00000000 },
2828 { 0x000012fc, 0x00000000 },
2829 { 0x0000133c, 0x00000000 },
2830 { 0x0000137c, 0x00000000 },
2831 { 0x000013bc, 0x00000000 },
2832 { 0x000013fc, 0x00000000 },
2833 { 0x0000143c, 0x00000000 },
2834 { 0x0000147c, 0x00000000 },
2835 { 0x00004030, 0x00000002 },
2836 { 0x0000403c, 0x00000002 },
2837 { 0x00004024, 0x0000001f },
2838 { 0x00004060, 0x00000000 },
2839 { 0x00004064, 0x00000000 },
2840 { 0x00007010, 0x00000033 },
2841 { 0x00007034, 0x00000002 },
2842 { 0x00007038, 0x000004c2 },
2843 { 0x00008004, 0x00000000 },
2844 { 0x00008008, 0x00000000 },
2845 { 0x0000800c, 0x00000000 },
2846 { 0x00008018, 0x00000700 },
2847 { 0x00008020, 0x00000000 },
2848 { 0x00008038, 0x00000000 },
2849 { 0x0000803c, 0x00000000 },
2850 { 0x00008048, 0x40000000 },
2851 { 0x00008054, 0x00000000 },
2852 { 0x00008058, 0x00000000 },
2853 { 0x0000805c, 0x000fc78f },
2854 { 0x00008060, 0x0000000f },
2855 { 0x00008064, 0x00000000 },
2856 { 0x00008070, 0x00000000 },
2857 { 0x000080c0, 0x2a80001a },
2858 { 0x000080c4, 0x05dc01e0 },
2859 { 0x000080c8, 0x1f402710 },
2860 { 0x000080cc, 0x01f40000 },
2861 { 0x000080d0, 0x00001e00 },
2862 { 0x000080d4, 0x00000000 },
2863 { 0x000080d8, 0x00400000 },
2864 { 0x000080e0, 0xffffffff },
2865 { 0x000080e4, 0x0000ffff },
2866 { 0x000080e8, 0x003f3f3f },
2867 { 0x000080ec, 0x00000000 },
2868 { 0x000080f0, 0x00000000 },
2869 { 0x000080f4, 0x00000000 },
2870 { 0x000080f8, 0x00000000 },
2871 { 0x000080fc, 0x00020000 },
2872 { 0x00008100, 0x00020000 },
2873 { 0x00008104, 0x00000001 },
2874 { 0x00008108, 0x00000052 },
2875 { 0x0000810c, 0x00000000 },
2876 { 0x00008110, 0x00000168 },
2877 { 0x00008118, 0x000100aa },
2878 { 0x0000811c, 0x00003210 },
2879 { 0x00008120, 0x08f04800 },
2880 { 0x00008124, 0x00000000 },
2881 { 0x00008128, 0x00000000 },
2882 { 0x0000812c, 0x00000000 },
2883 { 0x00008130, 0x00000000 },
2884 { 0x00008134, 0x00000000 },
2885 { 0x00008138, 0x00000000 },
2886 { 0x0000813c, 0x00000000 },
2887 { 0x00008144, 0x00000000 },
2888 { 0x00008168, 0x00000000 },
2889 { 0x0000816c, 0x00000000 },
2890 { 0x00008170, 0x32143320 },
2891 { 0x00008174, 0xfaa4fa50 },
2892 { 0x00008178, 0x00000100 },
2893 { 0x0000817c, 0x00000000 },
2894 { 0x000081c0, 0x00000000 },
2895 { 0x000081d0, 0x00003210 },
2896 { 0x000081ec, 0x00000000 },
2897 { 0x000081f0, 0x00000000 },
2898 { 0x000081f4, 0x00000000 },
2899 { 0x000081f8, 0x00000000 },
2900 { 0x000081fc, 0x00000000 },
2901 { 0x00008200, 0x00000000 },
2902 { 0x00008204, 0x00000000 },
2903 { 0x00008208, 0x00000000 },
2904 { 0x0000820c, 0x00000000 },
2905 { 0x00008210, 0x00000000 },
2906 { 0x00008214, 0x00000000 },
2907 { 0x00008218, 0x00000000 },
2908 { 0x0000821c, 0x00000000 },
2909 { 0x00008220, 0x00000000 },
2910 { 0x00008224, 0x00000000 },
2911 { 0x00008228, 0x00000000 },
2912 { 0x0000822c, 0x00000000 },
2913 { 0x00008230, 0x00000000 },
2914 { 0x00008234, 0x00000000 },
2915 { 0x00008238, 0x00000000 },
2916 { 0x0000823c, 0x00000000 },
2917 { 0x00008240, 0x00100000 },
2918 { 0x00008244, 0x0010f400 },
2919 { 0x00008248, 0x00000100 },
2920 { 0x0000824c, 0x0001e800 },
2921 { 0x00008250, 0x00000000 },
2922 { 0x00008254, 0x00000000 },
2923 { 0x00008258, 0x00000000 },
2924 { 0x0000825c, 0x400000ff },
2925 { 0x00008260, 0x00080922 },
2926 { 0x00008270, 0x00000000 },
2927 { 0x00008274, 0x40000000 },
2928 { 0x00008278, 0x003e4180 },
2929 { 0x0000827c, 0x00000000 },
2930 { 0x00008284, 0x0000002c },
2931 { 0x00008288, 0x0000002c },
2932 { 0x0000828c, 0x00000000 },
2933 { 0x00008294, 0x00000000 },
2934 { 0x00008298, 0x00000000 },
2935 { 0x0000829c, 0x00000000 },
2936 { 0x00008300, 0x00000040 },
2937 { 0x00008314, 0x00000000 },
2938 { 0x00008328, 0x00000000 },
2939 { 0x0000832c, 0x00000007 },
2940 { 0x00008330, 0x00000302 },
2941 { 0x00008334, 0x00000e00 },
2942 { 0x00008338, 0x00000000 },
2943 { 0x0000833c, 0x00000000 },
2944 { 0x00008340, 0x000107ff },
2945 { 0x00008344, 0x00581043 },
2946 { 0x00009808, 0x00000000 },
2947 { 0x0000980c, 0xafa68e30 },
2948 { 0x00009810, 0xfd14e000 },
2949 { 0x00009814, 0x9c0a9f6b },
2950 { 0x0000981c, 0x00000000 },
2951 { 0x0000982c, 0x0000a000 },
2952 { 0x00009830, 0x00000000 },
2953 { 0x0000983c, 0x00200400 },
2954 { 0x0000984c, 0x0040233c },
2955 { 0x0000a84c, 0x0040233c },
2956 { 0x00009854, 0x00000044 },
2957 { 0x00009900, 0x00000000 },
2958 { 0x00009904, 0x00000000 },
2959 { 0x00009908, 0x00000000 },
2960 { 0x0000990c, 0x00000000 },
2961 { 0x00009910, 0x01002310 },
2962 { 0x0000991c, 0x10000fff },
2963 { 0x00009920, 0x04900000 },
2964 { 0x0000a920, 0x04900000 },
2965 { 0x00009928, 0x00000001 },
2966 { 0x0000992c, 0x00000004 },
2967 { 0x00009934, 0x1e1f2022 },
2968 { 0x00009938, 0x0a0b0c0d },
2969 { 0x0000993c, 0x00000000 },
2970 { 0x00009948, 0x9280c00a },
2971 { 0x0000994c, 0x00020028 },
2972 { 0x00009954, 0x5f3ca3de },
2973 { 0x00009958, 0x2108ecff },
2974 { 0x00009940, 0x14750604 },
2975 { 0x0000c95c, 0x004b6a8e },
2976 { 0x0000c968, 0x000003ce },
2977 { 0x00009970, 0x190fb515 },
2978 { 0x00009974, 0x00000000 },
2979 { 0x00009978, 0x00000001 },
2980 { 0x0000997c, 0x00000000 },
2981 { 0x00009980, 0x00000000 },
2982 { 0x00009984, 0x00000000 },
2983 { 0x00009988, 0x00000000 },
2984 { 0x0000998c, 0x00000000 },
2985 { 0x00009990, 0x00000000 },
2986 { 0x00009994, 0x00000000 },
2987 { 0x00009998, 0x00000000 },
2988 { 0x0000999c, 0x00000000 },
2989 { 0x000099a0, 0x00000000 },
2990 { 0x000099a4, 0x00000001 },
2991 { 0x000099a8, 0x201fff00 },
2992 { 0x000099ac, 0x006f0000 },
2993 { 0x000099b0, 0x03051000 },
2994 { 0x000099b4, 0x00000820 },
2995 { 0x000099dc, 0x00000000 },
2996 { 0x000099e0, 0x00000000 },
2997 { 0x000099e4, 0xaaaaaaaa },
2998 { 0x000099e8, 0x3c466478 },
2999 { 0x000099ec, 0x0cc80caa },
3000 { 0x000099f0, 0x00000000 },
3001 { 0x000099fc, 0x00001042 },
3002 { 0x0000a210, 0x4080a333 },
3003 { 0x0000a214, 0x40206c10 },
3004 { 0x0000a218, 0x009c4060 },
3005 { 0x0000a220, 0x01834061 },
3006 { 0x0000a224, 0x00000400 },
3007 { 0x0000a228, 0x000003b5 },
3008 { 0x0000a22c, 0x233f71c0 },
3009 { 0x0000a234, 0x20202020 },
3010 { 0x0000a238, 0x20202020 },
3011 { 0x0000a23c, 0x13c88000 },
3012 { 0x0000a240, 0x38490a20 },
3013 { 0x0000a244, 0x00007bb6 },
3014 { 0x0000a248, 0x0fff3ffc },
3015 { 0x0000a24c, 0x00000000 },
3016 { 0x0000a254, 0x00000000 },
3017 { 0x0000a258, 0x0cdbd380 },
3018 { 0x0000a25c, 0x0f0f0f01 },
3019 { 0x0000a260, 0xdfa91f01 },
3020 { 0x0000a268, 0x00000000 },
3021 { 0x0000a26c, 0x0ebae9c6 },
3022 { 0x0000b26c, 0x0ebae9c6 },
3023 { 0x0000d270, 0x00820820 },
3024 { 0x0000a278, 0x1ce739ce },
3025 { 0x0000a27c, 0x050701ce },
3026 { 0x0000d35c, 0x07ffffef },
3027 { 0x0000d360, 0x0fffffe7 },
3028 { 0x0000d364, 0x17ffffe5 },
3029 { 0x0000d368, 0x1fffffe4 },
3030 { 0x0000d36c, 0x37ffffe3 },
3031 { 0x0000d370, 0x3fffffe3 },
3032 { 0x0000d374, 0x57ffffe3 },
3033 { 0x0000d378, 0x5fffffe2 },
3034 { 0x0000d37c, 0x7fffffe2 },
3035 { 0x0000d380, 0x7f3c7bba },
3036 { 0x0000d384, 0xf3307ff0 },
3037 { 0x0000a388, 0x0c000000 },
3038 { 0x0000a38c, 0x20202020 },
3039 { 0x0000a390, 0x20202020 },
3040 { 0x0000a394, 0x1ce739ce },
3041 { 0x0000a398, 0x000001ce },
3042 { 0x0000a39c, 0x00000001 },
3043 { 0x0000a3a0, 0x00000000 },
3044 { 0x0000a3a4, 0x00000000 },
3045 { 0x0000a3a8, 0x00000000 },
3046 { 0x0000a3ac, 0x00000000 },
3047 { 0x0000a3b0, 0x00000000 },
3048 { 0x0000a3b4, 0x00000000 },
3049 { 0x0000a3b8, 0x00000000 },
3050 { 0x0000a3bc, 0x00000000 },
3051 { 0x0000a3c0, 0x00000000 },
3052 { 0x0000a3c4, 0x00000000 },
3053 { 0x0000a3c8, 0x00000246 },
3054 { 0x0000a3cc, 0x20202020 },
3055 { 0x0000a3d0, 0x20202020 },
3056 { 0x0000a3d4, 0x20202020 },
3057 { 0x0000a3dc, 0x1ce739ce },
3058 { 0x0000a3e0, 0x000001ce },
3059 { 0x0000a3e4, 0x00000000 },
3060 { 0x0000a3e8, 0x18c43433 },
3061 { 0x0000a3ec, 0x00f70081 },
3062 { 0x00007800, 0x00040000 },
3063 { 0x00007804, 0xdb005012 },
3064 { 0x00007808, 0x04924914 },
3065 { 0x0000780c, 0x21084210 },
3066 { 0x00007810, 0x6d801300 },
3067 { 0x00007814, 0x0019beff },
3068 { 0x00007818, 0x07e41000 },
3069 { 0x0000781c, 0x00392000 },
3070 { 0x00007820, 0x92592480 },
3071 { 0x00007824, 0x00040000 },
3072 { 0x00007828, 0xdb005012 },
3073 { 0x0000782c, 0x04924914 },
3074 { 0x00007830, 0x21084210 },
3075 { 0x00007834, 0x6d801300 },
3076 { 0x00007838, 0x0019beff },
3077 { 0x0000783c, 0x07e40000 },
3078 { 0x00007840, 0x00392000 },
3079 { 0x00007844, 0x92592480 },
3080 { 0x00007848, 0x00100000 },
3081 { 0x0000784c, 0x773f0567 },
3082 { 0x00007850, 0x54214514 },
3083 { 0x00007854, 0x12035828 },
3084 { 0x00007858, 0x9259269a },
3085 { 0x00007860, 0x52802000 },
3086 { 0x00007864, 0x0a8e370e },
3087 { 0x00007868, 0xc0102850 },
3088 { 0x0000786c, 0x812d4000 },
3089 { 0x00007870, 0x807ec400 },
3090 { 0x00007874, 0x001b6db0 },
3091 { 0x00007878, 0x00376b63 },
3092 { 0x0000787c, 0x06db6db6 },
3093 { 0x00007880, 0x006d8000 },
3094 { 0x00007884, 0xffeffffe },
3095 { 0x00007888, 0xffeffffe },
3096 { 0x0000788c, 0x00010000 },
3097 { 0x00007890, 0x02060aeb },
3098 { 0x00007898, 0x2a850160 },
3099};
3100
3101static const u32 ar9280Modes_fast_clock_9280_2[][3] = {
3102 { 0x00001030, 0x00000268, 0x000004d0 },
3103 { 0x00001070, 0x0000018c, 0x00000318 },
3104 { 0x000010b0, 0x00000fd0, 0x00001fa0 },
3105 { 0x00008014, 0x044c044c, 0x08980898 },
3106 { 0x0000801c, 0x148ec02b, 0x148ec057 },
3107 { 0x00008318, 0x000044c0, 0x00008980 },
3108 { 0x00009820, 0x02020200, 0x02020200 },
3109 { 0x00009824, 0x00000f0f, 0x00000f0f },
3110 { 0x00009828, 0x0b020001, 0x0b020001 },
3111 { 0x00009834, 0x00000f0f, 0x00000f0f },
3112 { 0x00009844, 0x03721821, 0x03721821 },
3113 { 0x00009914, 0x00000898, 0x00000898 },
3114 { 0x00009918, 0x0000000b, 0x00000016 },
3115 { 0x00009944, 0xdfbc1210, 0xdfbc1210 },
3116};
3117
3118
3119
3120static const u32 ar9280PciePhy_clkreq_off_L1_9280[][2] = {
3121 {0x00004040, 0x9248fd00 },
3122 {0x00004040, 0x24924924 },
3123 {0x00004040, 0xa8000019 },
3124 {0x00004040, 0x13160820 },
3125 {0x00004040, 0xe5980560 },
3126 {0x00004040, 0x401dcffc },
3127 {0x00004040, 0x1aaabe40 },
3128 {0x00004040, 0xbe105554 },
3129 {0x00004040, 0x00043007 },
3130 {0x00004044, 0x00000000 },
3131};
3132
3133
3134
3135static const u32 ar9280PciePhy_clkreq_always_on_L1_9280[][2] = {
3136 {0x00004040, 0x9248fd00 },
3137 {0x00004040, 0x24924924 },
3138 {0x00004040, 0xa8000019 },
3139 {0x00004040, 0x13160820 },
3140 {0x00004040, 0xe5980560 },
3141 {0x00004040, 0x401dcffd },
3142 {0x00004040, 0x1aaabe40 },
3143 {0x00004040, 0xbe105554 },
3144 {0x00004040, 0x00043007 },
3145 {0x00004044, 0x00000000 },
3146};
diff --git a/drivers/net/wireless/ath9k/main.c b/drivers/net/wireless/ath9k/main.c
new file mode 100644
index 000000000000..2888778040e4
--- /dev/null
+++ b/drivers/net/wireless/ath9k/main.c
@@ -0,0 +1,1470 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17/* mac80211 and PCI callbacks */
18
19#include <linux/nl80211.h>
20#include "core.h"
21
22#define ATH_PCI_VERSION "0.1"
23
24#define IEEE80211_HTCAP_MAXRXAMPDU_FACTOR 13
25#define IEEE80211_ACTION_CAT_HT 7
26#define IEEE80211_ACTION_HT_TXCHWIDTH 0
27
28static char *dev_info = "ath9k";
29
30MODULE_AUTHOR("Atheros Communications");
31MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
32MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
33MODULE_LICENSE("Dual BSD/GPL");
34
35static struct pci_device_id ath_pci_id_table[] __devinitdata = {
36 { PCI_VDEVICE(ATHEROS, 0x0023) }, /* PCI */
37 { PCI_VDEVICE(ATHEROS, 0x0024) }, /* PCI-E */
38 { PCI_VDEVICE(ATHEROS, 0x0027) }, /* PCI */
39 { PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI */
40 { PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */
41 { 0 }
42};
43
44static int ath_get_channel(struct ath_softc *sc,
45 struct ieee80211_channel *chan)
46{
47 int i;
48
49 for (i = 0; i < sc->sc_ah->ah_nchan; i++) {
50 if (sc->sc_ah->ah_channels[i].channel == chan->center_freq)
51 return i;
52 }
53
54 return -1;
55}
56
57static u32 ath_get_extchanmode(struct ath_softc *sc,
58 struct ieee80211_channel *chan)
59{
60 u32 chanmode = 0;
61 u8 ext_chan_offset = sc->sc_ht_info.ext_chan_offset;
62 enum ath9k_ht_macmode tx_chan_width = sc->sc_ht_info.tx_chan_width;
63
64 switch (chan->band) {
65 case IEEE80211_BAND_2GHZ:
66 if ((ext_chan_offset == IEEE80211_HT_IE_CHA_SEC_NONE) &&
67 (tx_chan_width == ATH9K_HT_MACMODE_20))
68 chanmode = CHANNEL_G_HT20;
69 if ((ext_chan_offset == IEEE80211_HT_IE_CHA_SEC_ABOVE) &&
70 (tx_chan_width == ATH9K_HT_MACMODE_2040))
71 chanmode = CHANNEL_G_HT40PLUS;
72 if ((ext_chan_offset == IEEE80211_HT_IE_CHA_SEC_BELOW) &&
73 (tx_chan_width == ATH9K_HT_MACMODE_2040))
74 chanmode = CHANNEL_G_HT40MINUS;
75 break;
76 case IEEE80211_BAND_5GHZ:
77 if ((ext_chan_offset == IEEE80211_HT_IE_CHA_SEC_NONE) &&
78 (tx_chan_width == ATH9K_HT_MACMODE_20))
79 chanmode = CHANNEL_A_HT20;
80 if ((ext_chan_offset == IEEE80211_HT_IE_CHA_SEC_ABOVE) &&
81 (tx_chan_width == ATH9K_HT_MACMODE_2040))
82 chanmode = CHANNEL_A_HT40PLUS;
83 if ((ext_chan_offset == IEEE80211_HT_IE_CHA_SEC_BELOW) &&
84 (tx_chan_width == ATH9K_HT_MACMODE_2040))
85 chanmode = CHANNEL_A_HT40MINUS;
86 break;
87 default:
88 break;
89 }
90
91 return chanmode;
92}
93
94
95static int ath_setkey_tkip(struct ath_softc *sc,
96 struct ieee80211_key_conf *key,
97 struct ath9k_keyval *hk,
98 const u8 *addr)
99{
100 u8 *key_rxmic = NULL;
101 u8 *key_txmic = NULL;
102
103 key_txmic = key->key + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY;
104 key_rxmic = key->key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY;
105
106 if (addr == NULL) {
107 /* Group key installation */
108 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
109 return ath_keyset(sc, key->keyidx, hk, addr);
110 }
111 if (!sc->sc_splitmic) {
112 /*
113 * data key goes at first index,
114 * the hal handles the MIC keys at index+64.
115 */
116 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
117 memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_txmic));
118 return ath_keyset(sc, key->keyidx, hk, addr);
119 }
120 /*
121 * TX key goes at first index, RX key at +32.
122 * The hal handles the MIC keys at index+64.
123 */
124 memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
125 if (!ath_keyset(sc, key->keyidx, hk, NULL)) {
126 /* Txmic entry failed. No need to proceed further */
127 DPRINTF(sc, ATH_DBG_KEYCACHE,
128 "%s Setting TX MIC Key Failed\n", __func__);
129 return 0;
130 }
131
132 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
133 /* XXX delete tx key on failure? */
134 return ath_keyset(sc, key->keyidx+32, hk, addr);
135}
136
137static int ath_key_config(struct ath_softc *sc,
138 const u8 *addr,
139 struct ieee80211_key_conf *key)
140{
141 struct ieee80211_vif *vif;
142 struct ath9k_keyval hk;
143 const u8 *mac = NULL;
144 int ret = 0;
145 enum ieee80211_if_types opmode;
146
147 memset(&hk, 0, sizeof(hk));
148
149 switch (key->alg) {
150 case ALG_WEP:
151 hk.kv_type = ATH9K_CIPHER_WEP;
152 break;
153 case ALG_TKIP:
154 hk.kv_type = ATH9K_CIPHER_TKIP;
155 break;
156 case ALG_CCMP:
157 hk.kv_type = ATH9K_CIPHER_AES_CCM;
158 break;
159 default:
160 return -EINVAL;
161 }
162
163 hk.kv_len = key->keylen;
164 memcpy(hk.kv_val, key->key, key->keylen);
165
166 if (!sc->sc_vaps[0])
167 return -EIO;
168
169 vif = sc->sc_vaps[0]->av_if_data;
170 opmode = vif->type;
171
172 /*
173 * Strategy:
174 * For _M_STA mc tx, we will not setup a key at all since we never
175 * tx mc.
176 * _M_STA mc rx, we will use the keyID.
177 * for _M_IBSS mc tx, we will use the keyID, and no macaddr.
178 * for _M_IBSS mc rx, we will alloc a slot and plumb the mac of the
179 * peer node. BUT we will plumb a cleartext key so that we can do
180 * perSta default key table lookup in software.
181 */
182 if (is_broadcast_ether_addr(addr)) {
183 switch (opmode) {
184 case IEEE80211_IF_TYPE_STA:
185 /* default key: could be group WPA key
186 * or could be static WEP key */
187 mac = NULL;
188 break;
189 case IEEE80211_IF_TYPE_IBSS:
190 break;
191 case IEEE80211_IF_TYPE_AP:
192 break;
193 default:
194 ASSERT(0);
195 break;
196 }
197 } else {
198 mac = addr;
199 }
200
201 if (key->alg == ALG_TKIP)
202 ret = ath_setkey_tkip(sc, key, &hk, mac);
203 else
204 ret = ath_keyset(sc, key->keyidx, &hk, mac);
205
206 if (!ret)
207 return -EIO;
208
209 sc->sc_keytype = hk.kv_type;
210 return 0;
211}
212
213static void ath_key_delete(struct ath_softc *sc, struct ieee80211_key_conf *key)
214{
215#define ATH_MAX_NUM_KEYS 4
216 int freeslot;
217
218 freeslot = (key->keyidx >= ATH_MAX_NUM_KEYS) ? 1 : 0;
219 ath_key_reset(sc, key->keyidx, freeslot);
220#undef ATH_MAX_NUM_KEYS
221}
222
223static void setup_ht_cap(struct ieee80211_ht_info *ht_info)
224{
225/* Until mac80211 includes these fields */
226
227#define IEEE80211_HT_CAP_DSSSCCK40 0x1000
228#define IEEE80211_HT_CAP_MAXRXAMPDU_65536 0x3 /* 2 ^ 16 */
229#define IEEE80211_HT_CAP_MPDUDENSITY_8 0x6 /* 8 usec */
230
231 ht_info->ht_supported = 1;
232 ht_info->cap = (u16)IEEE80211_HT_CAP_SUP_WIDTH
233 |(u16)IEEE80211_HT_CAP_MIMO_PS
234 |(u16)IEEE80211_HT_CAP_SGI_40
235 |(u16)IEEE80211_HT_CAP_DSSSCCK40;
236
237 ht_info->ampdu_factor = IEEE80211_HT_CAP_MAXRXAMPDU_65536;
238 ht_info->ampdu_density = IEEE80211_HT_CAP_MPDUDENSITY_8;
239 /* setup supported mcs set */
240 memset(ht_info->supp_mcs_set, 0, 16);
241 ht_info->supp_mcs_set[0] = 0xff;
242 ht_info->supp_mcs_set[1] = 0xff;
243 ht_info->supp_mcs_set[12] = IEEE80211_HT_CAP_MCS_TX_DEFINED;
244}
245
246static int ath_rate2idx(struct ath_softc *sc, int rate)
247{
248 int i = 0, cur_band, n_rates;
249 struct ieee80211_hw *hw = sc->hw;
250
251 cur_band = hw->conf.channel->band;
252 n_rates = sc->sbands[cur_band].n_bitrates;
253
254 for (i = 0; i < n_rates; i++) {
255 if (sc->sbands[cur_band].bitrates[i].bitrate == rate)
256 break;
257 }
258
259 /*
260 * NB:mac80211 validates rx rate index against the supported legacy rate
261 * index only (should be done against ht rates also), return the highest
262 * legacy rate index for rx rate which does not match any one of the
263 * supported basic and extended rates to make mac80211 happy.
264 * The following hack will be cleaned up once the issue with
265 * the rx rate index validation in mac80211 is fixed.
266 */
267 if (i == n_rates)
268 return n_rates - 1;
269 return i;
270}
271
272static void ath9k_rx_prepare(struct ath_softc *sc,
273 struct sk_buff *skb,
274 struct ath_recv_status *status,
275 struct ieee80211_rx_status *rx_status)
276{
277 struct ieee80211_hw *hw = sc->hw;
278 struct ieee80211_channel *curchan = hw->conf.channel;
279
280 memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
281
282 rx_status->mactime = status->tsf;
283 rx_status->band = curchan->band;
284 rx_status->freq = curchan->center_freq;
285 rx_status->noise = ATH_DEFAULT_NOISE_FLOOR;
286 rx_status->signal = rx_status->noise + status->rssi;
287 rx_status->rate_idx = ath_rate2idx(sc, (status->rateKbps / 100));
288 rx_status->antenna = status->antenna;
289 rx_status->qual = status->rssi * 100 / 64;
290
291 if (status->flags & ATH_RX_MIC_ERROR)
292 rx_status->flag |= RX_FLAG_MMIC_ERROR;
293 if (status->flags & ATH_RX_FCS_ERROR)
294 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
295
296 rx_status->flag |= RX_FLAG_TSFT;
297}
298
299static u8 parse_mpdudensity(u8 mpdudensity)
300{
301 /*
302 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
303 * 0 for no restriction
304 * 1 for 1/4 us
305 * 2 for 1/2 us
306 * 3 for 1 us
307 * 4 for 2 us
308 * 5 for 4 us
309 * 6 for 8 us
310 * 7 for 16 us
311 */
312 switch (mpdudensity) {
313 case 0:
314 return 0;
315 case 1:
316 case 2:
317 case 3:
318 /* Our lower layer calculations limit our precision to
319 1 microsecond */
320 return 1;
321 case 4:
322 return 2;
323 case 5:
324 return 4;
325 case 6:
326 return 8;
327 case 7:
328 return 16;
329 default:
330 return 0;
331 }
332}
333
334static int ath9k_start(struct ieee80211_hw *hw)
335{
336 struct ath_softc *sc = hw->priv;
337 struct ieee80211_channel *curchan = hw->conf.channel;
338 int error = 0, pos;
339
340 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Starting driver with "
341 "initial channel: %d MHz\n", __func__, curchan->center_freq);
342
343 /* setup initial channel */
344
345 pos = ath_get_channel(sc, curchan);
346 if (pos == -1) {
347 DPRINTF(sc, ATH_DBG_FATAL, "%s: Invalid channel\n", __func__);
348 return -EINVAL;
349 }
350
351 sc->sc_ah->ah_channels[pos].chanmode =
352 (curchan->band == IEEE80211_BAND_2GHZ) ? CHANNEL_G : CHANNEL_A;
353
354 /* open ath_dev */
355 error = ath_open(sc, &sc->sc_ah->ah_channels[pos]);
356 if (error) {
357 DPRINTF(sc, ATH_DBG_FATAL,
358 "%s: Unable to complete ath_open\n", __func__);
359 return error;
360 }
361
362 ieee80211_wake_queues(hw);
363 return 0;
364}
365
366static int ath9k_tx(struct ieee80211_hw *hw,
367 struct sk_buff *skb)
368{
369 struct ath_softc *sc = hw->priv;
370 int hdrlen, padsize;
371
372 /* Add the padding after the header if this is not already done */
373 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
374 if (hdrlen & 3) {
375 padsize = hdrlen % 4;
376 if (skb_headroom(skb) < padsize)
377 return -1;
378 skb_push(skb, padsize);
379 memmove(skb->data, skb->data + padsize, hdrlen);
380 }
381
382 DPRINTF(sc, ATH_DBG_XMIT, "%s: transmitting packet, skb: %p\n",
383 __func__,
384 skb);
385
386 if (ath_tx_start(sc, skb) != 0) {
387 DPRINTF(sc, ATH_DBG_XMIT, "%s: TX failed\n", __func__);
388 dev_kfree_skb_any(skb);
389 /* FIXME: Check for proper return value from ATH_DEV */
390 return 0;
391 }
392
393 return 0;
394}
395
396static void ath9k_stop(struct ieee80211_hw *hw)
397{
398 struct ath_softc *sc = hw->priv;
399 int error;
400
401 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Driver halt\n", __func__);
402
403 error = ath_suspend(sc);
404 if (error)
405 DPRINTF(sc, ATH_DBG_CONFIG,
406 "%s: Device is no longer present\n", __func__);
407
408 ieee80211_stop_queues(hw);
409}
410
411static int ath9k_add_interface(struct ieee80211_hw *hw,
412 struct ieee80211_if_init_conf *conf)
413{
414 struct ath_softc *sc = hw->priv;
415 int error, ic_opmode = 0;
416
417 /* Support only vap for now */
418
419 if (sc->sc_nvaps)
420 return -ENOBUFS;
421
422 switch (conf->type) {
423 case IEEE80211_IF_TYPE_STA:
424 ic_opmode = ATH9K_M_STA;
425 break;
426 case IEEE80211_IF_TYPE_IBSS:
427 ic_opmode = ATH9K_M_IBSS;
428 break;
429 default:
430 DPRINTF(sc, ATH_DBG_FATAL,
431 "%s: Only STA and IBSS are supported currently\n",
432 __func__);
433 return -EOPNOTSUPP;
434 }
435
436 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Attach a VAP of type: %d\n",
437 __func__,
438 ic_opmode);
439
440 error = ath_vap_attach(sc, 0, conf->vif, ic_opmode);
441 if (error) {
442 DPRINTF(sc, ATH_DBG_FATAL,
443 "%s: Unable to attach vap, error: %d\n",
444 __func__, error);
445 return error;
446 }
447
448 return 0;
449}
450
451static void ath9k_remove_interface(struct ieee80211_hw *hw,
452 struct ieee80211_if_init_conf *conf)
453{
454 struct ath_softc *sc = hw->priv;
455 struct ath_vap *avp;
456 int error;
457
458 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Detach VAP\n", __func__);
459
460 avp = sc->sc_vaps[0];
461 if (avp == NULL) {
462 DPRINTF(sc, ATH_DBG_FATAL, "%s: Invalid interface\n",
463 __func__);
464 return;
465 }
466
467#ifdef CONFIG_SLOW_ANT_DIV
468 ath_slow_ant_div_stop(&sc->sc_antdiv);
469#endif
470
471 /* Update ratectrl */
472 ath_rate_newstate(sc, avp);
473
474 /* Reclaim beacon resources */
475 if (sc->sc_opmode == ATH9K_M_HOSTAP || sc->sc_opmode == ATH9K_M_IBSS) {
476 ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq);
477 ath_beacon_return(sc, avp);
478 }
479
480 /* Set interrupt mask */
481 sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
482 ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask & ~ATH9K_INT_GLOBAL);
483 sc->sc_beacons = 0;
484
485 error = ath_vap_detach(sc, 0);
486 if (error)
487 DPRINTF(sc, ATH_DBG_FATAL,
488 "%s: Unable to detach vap, error: %d\n",
489 __func__, error);
490}
491
492static int ath9k_config(struct ieee80211_hw *hw,
493 struct ieee80211_conf *conf)
494{
495 struct ath_softc *sc = hw->priv;
496 struct ieee80211_channel *curchan = hw->conf.channel;
497 int pos;
498
499 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Set channel: %d MHz\n",
500 __func__,
501 curchan->center_freq);
502
503 pos = ath_get_channel(sc, curchan);
504 if (pos == -1) {
505 DPRINTF(sc, ATH_DBG_FATAL, "%s: Invalid channel\n", __func__);
506 return -EINVAL;
507 }
508
509 sc->sc_ah->ah_channels[pos].chanmode =
510 (curchan->band == IEEE80211_BAND_2GHZ) ?
511 CHANNEL_G : CHANNEL_A;
512
513 if (sc->sc_curaid && hw->conf.ht_conf.ht_supported)
514 sc->sc_ah->ah_channels[pos].chanmode =
515 ath_get_extchanmode(sc, curchan);
516
517 sc->sc_config.txpowlimit = 2 * conf->power_level;
518
519 /* set h/w channel */
520 if (ath_set_channel(sc, &sc->sc_ah->ah_channels[pos]) < 0)
521 DPRINTF(sc, ATH_DBG_FATAL, "%s: Unable to set channel\n",
522 __func__);
523
524 return 0;
525}
526
527static int ath9k_config_interface(struct ieee80211_hw *hw,
528 struct ieee80211_vif *vif,
529 struct ieee80211_if_conf *conf)
530{
531 struct ath_softc *sc = hw->priv;
532 struct ath_vap *avp;
533 u32 rfilt = 0;
534 int error, i;
535 DECLARE_MAC_BUF(mac);
536
537 avp = sc->sc_vaps[0];
538 if (avp == NULL) {
539 DPRINTF(sc, ATH_DBG_FATAL, "%s: Invalid interface\n",
540 __func__);
541 return -EINVAL;
542 }
543
544 if ((conf->changed & IEEE80211_IFCC_BSSID) &&
545 !is_zero_ether_addr(conf->bssid)) {
546 switch (vif->type) {
547 case IEEE80211_IF_TYPE_STA:
548 case IEEE80211_IF_TYPE_IBSS:
549 /* Update ratectrl about the new state */
550 ath_rate_newstate(sc, avp);
551
552 /* Set rx filter */
553 rfilt = ath_calcrxfilter(sc);
554 ath9k_hw_setrxfilter(sc->sc_ah, rfilt);
555
556 /* Set BSSID */
557 memcpy(sc->sc_curbssid, conf->bssid, ETH_ALEN);
558 sc->sc_curaid = 0;
559 ath9k_hw_write_associd(sc->sc_ah, sc->sc_curbssid,
560 sc->sc_curaid);
561
562 /* Set aggregation protection mode parameters */
563 sc->sc_config.ath_aggr_prot = 0;
564
565 /*
566 * Reset our TSF so that its value is lower than the
567 * beacon that we are trying to catch.
568 * Only then hw will update its TSF register with the
569 * new beacon. Reset the TSF before setting the BSSID
570 * to avoid allowing in any frames that would update
571 * our TSF only to have us clear it
572 * immediately thereafter.
573 */
574 ath9k_hw_reset_tsf(sc->sc_ah);
575
576 /* Disable BMISS interrupt when we're not associated */
577 ath9k_hw_set_interrupts(sc->sc_ah,
578 sc->sc_imask &
579 ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS));
580 sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
581
582 DPRINTF(sc, ATH_DBG_CONFIG,
583 "%s: RX filter 0x%x bssid %s aid 0x%x\n",
584 __func__, rfilt,
585 print_mac(mac, sc->sc_curbssid), sc->sc_curaid);
586
587 /* need to reconfigure the beacon */
588 sc->sc_beacons = 0;
589
590 break;
591 default:
592 break;
593 }
594 }
595
596 if ((conf->changed & IEEE80211_IFCC_BEACON) &&
597 (vif->type == IEEE80211_IF_TYPE_IBSS)) {
598 /*
599 * Allocate and setup the beacon frame.
600 *
601 * Stop any previous beacon DMA. This may be
602 * necessary, for example, when an ibss merge
603 * causes reconfiguration; we may be called
604 * with beacon transmission active.
605 */
606 ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq);
607
608 error = ath_beacon_alloc(sc, 0);
609 if (error != 0)
610 return error;
611
612 ath_beacon_sync(sc, 0);
613 }
614
615 /* Check for WLAN_CAPABILITY_PRIVACY ? */
616 if ((avp->av_opmode != IEEE80211_IF_TYPE_STA)) {
617 for (i = 0; i < IEEE80211_WEP_NKID; i++)
618 if (ath9k_hw_keyisvalid(sc->sc_ah, (u16)i))
619 ath9k_hw_keysetmac(sc->sc_ah,
620 (u16)i,
621 sc->sc_curbssid);
622 }
623
624 /* Only legacy IBSS for now */
625 if (vif->type == IEEE80211_IF_TYPE_IBSS)
626 ath_update_chainmask(sc, 0);
627
628 return 0;
629}
630
631#define SUPPORTED_FILTERS \
632 (FIF_PROMISC_IN_BSS | \
633 FIF_ALLMULTI | \
634 FIF_CONTROL | \
635 FIF_OTHER_BSS | \
636 FIF_BCN_PRBRESP_PROMISC | \
637 FIF_FCSFAIL)
638
639/* Accept unicast, bcast and mcast frames */
640
641static void ath9k_configure_filter(struct ieee80211_hw *hw,
642 unsigned int changed_flags,
643 unsigned int *total_flags,
644 int mc_count,
645 struct dev_mc_list *mclist)
646{
647 struct ath_softc *sc = hw->priv;
648
649 changed_flags &= SUPPORTED_FILTERS;
650 *total_flags &= SUPPORTED_FILTERS;
651
652 if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
653 if (*total_flags & FIF_BCN_PRBRESP_PROMISC)
654 ath_scan_start(sc);
655 else
656 ath_scan_end(sc);
657 }
658}
659
660static void ath9k_sta_notify(struct ieee80211_hw *hw,
661 struct ieee80211_vif *vif,
662 enum sta_notify_cmd cmd,
663 const u8 *addr)
664{
665 struct ath_softc *sc = hw->priv;
666 struct ath_node *an;
667 unsigned long flags;
668 DECLARE_MAC_BUF(mac);
669
670 spin_lock_irqsave(&sc->node_lock, flags);
671 an = ath_node_find(sc, (u8 *) addr);
672 spin_unlock_irqrestore(&sc->node_lock, flags);
673
674 switch (cmd) {
675 case STA_NOTIFY_ADD:
676 spin_lock_irqsave(&sc->node_lock, flags);
677 if (!an) {
678 ath_node_attach(sc, (u8 *)addr, 0);
679 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Attach a node: %s\n",
680 __func__,
681 print_mac(mac, addr));
682 } else {
683 ath_node_get(sc, (u8 *)addr);
684 }
685 spin_unlock_irqrestore(&sc->node_lock, flags);
686 break;
687 case STA_NOTIFY_REMOVE:
688 if (!an)
689 DPRINTF(sc, ATH_DBG_FATAL,
690 "%s: Removal of a non-existent node\n",
691 __func__);
692 else {
693 ath_node_put(sc, an, ATH9K_BH_STATUS_INTACT);
694 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Put a node: %s\n",
695 __func__,
696 print_mac(mac, addr));
697 }
698 break;
699 default:
700 break;
701 }
702}
703
704static int ath9k_conf_tx(struct ieee80211_hw *hw,
705 u16 queue,
706 const struct ieee80211_tx_queue_params *params)
707{
708 struct ath_softc *sc = hw->priv;
709 struct ath9k_tx_queue_info qi;
710 int ret = 0, qnum;
711
712 if (queue >= WME_NUM_AC)
713 return 0;
714
715 qi.tqi_aifs = params->aifs;
716 qi.tqi_cwmin = params->cw_min;
717 qi.tqi_cwmax = params->cw_max;
718 qi.tqi_burstTime = params->txop;
719 qnum = ath_get_hal_qnum(queue, sc);
720
721 DPRINTF(sc, ATH_DBG_CONFIG,
722 "%s: Configure tx [queue/halq] [%d/%d], "
723 "aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
724 __func__,
725 queue,
726 qnum,
727 params->aifs,
728 params->cw_min,
729 params->cw_max,
730 params->txop);
731
732 ret = ath_txq_update(sc, qnum, &qi);
733 if (ret)
734 DPRINTF(sc, ATH_DBG_FATAL,
735 "%s: TXQ Update failed\n", __func__);
736
737 return ret;
738}
739
740static int ath9k_set_key(struct ieee80211_hw *hw,
741 enum set_key_cmd cmd,
742 const u8 *local_addr,
743 const u8 *addr,
744 struct ieee80211_key_conf *key)
745{
746 struct ath_softc *sc = hw->priv;
747 int ret = 0;
748
749 DPRINTF(sc, ATH_DBG_KEYCACHE, " %s: Set HW Key\n", __func__);
750
751 switch (cmd) {
752 case SET_KEY:
753 ret = ath_key_config(sc, addr, key);
754 if (!ret) {
755 set_bit(key->keyidx, sc->sc_keymap);
756 key->hw_key_idx = key->keyidx;
757 /* push IV and Michael MIC generation to stack */
758 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
759 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
760 }
761 break;
762 case DISABLE_KEY:
763 ath_key_delete(sc, key);
764 clear_bit(key->keyidx, sc->sc_keymap);
765 sc->sc_keytype = ATH9K_CIPHER_CLR;
766 break;
767 default:
768 ret = -EINVAL;
769 }
770
771 return ret;
772}
773
774static void ath9k_ht_conf(struct ath_softc *sc,
775 struct ieee80211_bss_conf *bss_conf)
776{
777#define IEEE80211_HT_CAP_40MHZ_INTOLERANT BIT(14)
778 struct ath_ht_info *ht_info = &sc->sc_ht_info;
779
780 if (bss_conf->assoc_ht) {
781 ht_info->ext_chan_offset =
782 bss_conf->ht_bss_conf->bss_cap &
783 IEEE80211_HT_IE_CHA_SEC_OFFSET;
784
785 if (!(bss_conf->ht_conf->cap &
786 IEEE80211_HT_CAP_40MHZ_INTOLERANT) &&
787 (bss_conf->ht_bss_conf->bss_cap &
788 IEEE80211_HT_IE_CHA_WIDTH))
789 ht_info->tx_chan_width = ATH9K_HT_MACMODE_2040;
790 else
791 ht_info->tx_chan_width = ATH9K_HT_MACMODE_20;
792
793 ath9k_hw_set11nmac2040(sc->sc_ah, ht_info->tx_chan_width);
794 ht_info->maxampdu = 1 << (IEEE80211_HTCAP_MAXRXAMPDU_FACTOR +
795 bss_conf->ht_conf->ampdu_factor);
796 ht_info->mpdudensity =
797 parse_mpdudensity(bss_conf->ht_conf->ampdu_density);
798
799 }
800
801#undef IEEE80211_HT_CAP_40MHZ_INTOLERANT
802}
803
804static void ath9k_bss_assoc_info(struct ath_softc *sc,
805 struct ieee80211_bss_conf *bss_conf)
806{
807 struct ieee80211_hw *hw = sc->hw;
808 struct ieee80211_channel *curchan = hw->conf.channel;
809 struct ath_vap *avp;
810 int pos;
811 DECLARE_MAC_BUF(mac);
812
813 if (bss_conf->assoc) {
814 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Bss Info ASSOC %d\n",
815 __func__,
816 bss_conf->aid);
817
818 avp = sc->sc_vaps[0];
819 if (avp == NULL) {
820 DPRINTF(sc, ATH_DBG_FATAL, "%s: Invalid interface\n",
821 __func__);
822 return;
823 }
824
825 /* New association, store aid */
826 if (avp->av_opmode == ATH9K_M_STA) {
827 sc->sc_curaid = bss_conf->aid;
828 ath9k_hw_write_associd(sc->sc_ah, sc->sc_curbssid,
829 sc->sc_curaid);
830 }
831
832 /* Configure the beacon */
833 ath_beacon_config(sc, 0);
834 sc->sc_beacons = 1;
835
836 /* Reset rssi stats */
837 sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER;
838 sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER;
839 sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER;
840 sc->sc_halstats.ns_avgtxrate = ATH_RATE_DUMMY_MARKER;
841
842 /* Update chainmask */
843 ath_update_chainmask(sc, bss_conf->assoc_ht);
844
845 DPRINTF(sc, ATH_DBG_CONFIG,
846 "%s: bssid %s aid 0x%x\n",
847 __func__,
848 print_mac(mac, sc->sc_curbssid), sc->sc_curaid);
849
850 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Set channel: %d MHz\n",
851 __func__,
852 curchan->center_freq);
853
854 pos = ath_get_channel(sc, curchan);
855 if (pos == -1) {
856 DPRINTF(sc, ATH_DBG_FATAL,
857 "%s: Invalid channel\n", __func__);
858 return;
859 }
860
861 if (hw->conf.ht_conf.ht_supported)
862 sc->sc_ah->ah_channels[pos].chanmode =
863 ath_get_extchanmode(sc, curchan);
864 else
865 sc->sc_ah->ah_channels[pos].chanmode =
866 (curchan->band == IEEE80211_BAND_2GHZ) ?
867 CHANNEL_G : CHANNEL_A;
868
869 /* set h/w channel */
870 if (ath_set_channel(sc, &sc->sc_ah->ah_channels[pos]) < 0)
871 DPRINTF(sc, ATH_DBG_FATAL,
872 "%s: Unable to set channel\n",
873 __func__);
874
875 ath_rate_newstate(sc, avp);
876 /* Update ratectrl about the new state */
877 ath_rc_node_update(hw, avp->rc_node);
878 } else {
879 DPRINTF(sc, ATH_DBG_CONFIG,
880 "%s: Bss Info DISSOC\n", __func__);
881 sc->sc_curaid = 0;
882 }
883}
884
885static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
886 struct ieee80211_vif *vif,
887 struct ieee80211_bss_conf *bss_conf,
888 u32 changed)
889{
890 struct ath_softc *sc = hw->priv;
891
892 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
893 DPRINTF(sc, ATH_DBG_CONFIG, "%s: BSS Changed PREAMBLE %d\n",
894 __func__,
895 bss_conf->use_short_preamble);
896 if (bss_conf->use_short_preamble)
897 sc->sc_flags |= ATH_PREAMBLE_SHORT;
898 else
899 sc->sc_flags &= ~ATH_PREAMBLE_SHORT;
900 }
901
902 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
903 DPRINTF(sc, ATH_DBG_CONFIG, "%s: BSS Changed CTS PROT %d\n",
904 __func__,
905 bss_conf->use_cts_prot);
906 if (bss_conf->use_cts_prot &&
907 hw->conf.channel->band != IEEE80211_BAND_5GHZ)
908 sc->sc_flags |= ATH_PROTECT_ENABLE;
909 else
910 sc->sc_flags &= ~ATH_PROTECT_ENABLE;
911 }
912
913 if (changed & BSS_CHANGED_HT) {
914 DPRINTF(sc, ATH_DBG_CONFIG, "%s: BSS Changed HT %d\n",
915 __func__,
916 bss_conf->assoc_ht);
917 ath9k_ht_conf(sc, bss_conf);
918 }
919
920 if (changed & BSS_CHANGED_ASSOC) {
921 DPRINTF(sc, ATH_DBG_CONFIG, "%s: BSS Changed ASSOC %d\n",
922 __func__,
923 bss_conf->assoc);
924 ath9k_bss_assoc_info(sc, bss_conf);
925 }
926}
927
928static u64 ath9k_get_tsf(struct ieee80211_hw *hw)
929{
930 u64 tsf;
931 struct ath_softc *sc = hw->priv;
932 struct ath_hal *ah = sc->sc_ah;
933
934 tsf = ath9k_hw_gettsf64(ah);
935
936 return tsf;
937}
938
939static void ath9k_reset_tsf(struct ieee80211_hw *hw)
940{
941 struct ath_softc *sc = hw->priv;
942 struct ath_hal *ah = sc->sc_ah;
943
944 ath9k_hw_reset_tsf(ah);
945}
946
947static int ath9k_ampdu_action(struct ieee80211_hw *hw,
948 enum ieee80211_ampdu_mlme_action action,
949 const u8 *addr,
950 u16 tid,
951 u16 *ssn)
952{
953 struct ath_softc *sc = hw->priv;
954 int ret = 0;
955
956 switch (action) {
957 case IEEE80211_AMPDU_RX_START:
958 ret = ath_rx_aggr_start(sc, addr, tid, ssn);
959 if (ret < 0)
960 DPRINTF(sc, ATH_DBG_FATAL,
961 "%s: Unable to start RX aggregation\n",
962 __func__);
963 break;
964 case IEEE80211_AMPDU_RX_STOP:
965 ret = ath_rx_aggr_stop(sc, addr, tid);
966 if (ret < 0)
967 DPRINTF(sc, ATH_DBG_FATAL,
968 "%s: Unable to stop RX aggregation\n",
969 __func__);
970 break;
971 case IEEE80211_AMPDU_TX_START:
972 ret = ath_tx_aggr_start(sc, addr, tid, ssn);
973 if (ret < 0)
974 DPRINTF(sc, ATH_DBG_FATAL,
975 "%s: Unable to start TX aggregation\n",
976 __func__);
977 else
978 ieee80211_start_tx_ba_cb_irqsafe(hw, (u8 *)addr, tid);
979 break;
980 case IEEE80211_AMPDU_TX_STOP:
981 ret = ath_tx_aggr_stop(sc, addr, tid);
982 if (ret < 0)
983 DPRINTF(sc, ATH_DBG_FATAL,
984 "%s: Unable to stop TX aggregation\n",
985 __func__);
986
987 ieee80211_stop_tx_ba_cb_irqsafe(hw, (u8 *)addr, tid);
988 break;
989 default:
990 DPRINTF(sc, ATH_DBG_FATAL,
991 "%s: Unknown AMPDU action\n", __func__);
992 }
993
994 return ret;
995}
996
997static struct ieee80211_ops ath9k_ops = {
998 .tx = ath9k_tx,
999 .start = ath9k_start,
1000 .stop = ath9k_stop,
1001 .add_interface = ath9k_add_interface,
1002 .remove_interface = ath9k_remove_interface,
1003 .config = ath9k_config,
1004 .config_interface = ath9k_config_interface,
1005 .configure_filter = ath9k_configure_filter,
1006 .get_stats = NULL,
1007 .sta_notify = ath9k_sta_notify,
1008 .conf_tx = ath9k_conf_tx,
1009 .get_tx_stats = NULL,
1010 .bss_info_changed = ath9k_bss_info_changed,
1011 .set_tim = NULL,
1012 .set_key = ath9k_set_key,
1013 .hw_scan = NULL,
1014 .get_tkip_seq = NULL,
1015 .set_rts_threshold = NULL,
1016 .set_frag_threshold = NULL,
1017 .set_retry_limit = NULL,
1018 .get_tsf = ath9k_get_tsf,
1019 .reset_tsf = ath9k_reset_tsf,
1020 .tx_last_beacon = NULL,
1021 .ampdu_action = ath9k_ampdu_action
1022};
1023
1024void ath_get_beaconconfig(struct ath_softc *sc,
1025 int if_id,
1026 struct ath_beacon_config *conf)
1027{
1028 struct ieee80211_hw *hw = sc->hw;
1029
1030 /* fill in beacon config data */
1031
1032 conf->beacon_interval = hw->conf.beacon_int;
1033 conf->listen_interval = 100;
1034 conf->dtim_count = 1;
1035 conf->bmiss_timeout = ATH_DEFAULT_BMISS_LIMIT * conf->listen_interval;
1036}
1037
1038int ath_update_beacon(struct ath_softc *sc,
1039 int if_id,
1040 struct ath_beacon_offset *bo,
1041 struct sk_buff *skb,
1042 int mcast)
1043{
1044 return 0;
1045}
1046
1047void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1048 struct ath_xmit_status *tx_status, struct ath_node *an)
1049{
1050 struct ieee80211_hw *hw = sc->hw;
1051 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1052
1053 DPRINTF(sc, ATH_DBG_XMIT,
1054 "%s: TX complete: skb: %p\n", __func__, skb);
1055
1056 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK ||
1057 tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
1058 /* free driver's private data area of tx_info */
1059 if (tx_info->driver_data[0] != NULL)
1060 kfree(tx_info->driver_data[0]);
1061 tx_info->driver_data[0] = NULL;
1062 }
1063
1064 if (tx_status->flags & ATH_TX_BAR) {
1065 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
1066 tx_status->flags &= ~ATH_TX_BAR;
1067 }
1068 if (tx_status->flags)
1069 tx_info->status.excessive_retries = 1;
1070
1071 tx_info->status.retry_count = tx_status->retries;
1072
1073 ieee80211_tx_status(hw, skb);
1074 if (an)
1075 ath_node_put(sc, an, ATH9K_BH_STATUS_CHANGE);
1076}
1077
1078int ath__rx_indicate(struct ath_softc *sc,
1079 struct sk_buff *skb,
1080 struct ath_recv_status *status,
1081 u16 keyix)
1082{
1083 struct ieee80211_hw *hw = sc->hw;
1084 struct ath_node *an = NULL;
1085 struct ieee80211_rx_status rx_status;
1086 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1087 int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1088 int padsize;
1089 enum ATH_RX_TYPE st;
1090
1091 /* see if any padding is done by the hw and remove it */
1092 if (hdrlen & 3) {
1093 padsize = hdrlen % 4;
1094 memmove(skb->data + padsize, skb->data, hdrlen);
1095 skb_pull(skb, padsize);
1096 }
1097
1098 /* remove FCS before passing up to protocol stack */
1099 skb_trim(skb, (skb->len - FCS_LEN));
1100
1101 /* Prepare rx status */
1102 ath9k_rx_prepare(sc, skb, status, &rx_status);
1103
1104 if (!(keyix == ATH9K_RXKEYIX_INVALID) &&
1105 !(status->flags & ATH_RX_DECRYPT_ERROR)) {
1106 rx_status.flag |= RX_FLAG_DECRYPTED;
1107 } else if ((le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_PROTECTED)
1108 && !(status->flags & ATH_RX_DECRYPT_ERROR)
1109 && skb->len >= hdrlen + 4) {
1110 keyix = skb->data[hdrlen + 3] >> 6;
1111
1112 if (test_bit(keyix, sc->sc_keymap))
1113 rx_status.flag |= RX_FLAG_DECRYPTED;
1114 }
1115
1116 spin_lock_bh(&sc->node_lock);
1117 an = ath_node_find(sc, hdr->addr2);
1118 spin_unlock_bh(&sc->node_lock);
1119
1120 if (an) {
1121 ath_rx_input(sc, an,
1122 hw->conf.ht_conf.ht_supported,
1123 skb, status, &st);
1124 }
1125 if (!an || (st != ATH_RX_CONSUMED))
1126 __ieee80211_rx(hw, skb, &rx_status);
1127
1128 return 0;
1129}
1130
1131int ath_rx_subframe(struct ath_node *an,
1132 struct sk_buff *skb,
1133 struct ath_recv_status *status)
1134{
1135 struct ath_softc *sc = an->an_sc;
1136 struct ieee80211_hw *hw = sc->hw;
1137 struct ieee80211_rx_status rx_status;
1138
1139 /* Prepare rx status */
1140 ath9k_rx_prepare(sc, skb, status, &rx_status);
1141 if (!(status->flags & ATH_RX_DECRYPT_ERROR))
1142 rx_status.flag |= RX_FLAG_DECRYPTED;
1143
1144 __ieee80211_rx(hw, skb, &rx_status);
1145
1146 return 0;
1147}
1148
1149enum ath9k_ht_macmode ath_cwm_macmode(struct ath_softc *sc)
1150{
1151 return sc->sc_ht_info.tx_chan_width;
1152}
1153
1154static int ath_detach(struct ath_softc *sc)
1155{
1156 struct ieee80211_hw *hw = sc->hw;
1157
1158 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Detach ATH hw\n", __func__);
1159
1160 /* Unregister hw */
1161
1162 ieee80211_unregister_hw(hw);
1163
1164 /* unregister Rate control */
1165 ath_rate_control_unregister();
1166
1167 /* tx/rx cleanup */
1168
1169 ath_rx_cleanup(sc);
1170 ath_tx_cleanup(sc);
1171
1172 /* Deinit */
1173
1174 ath_deinit(sc);
1175
1176 return 0;
1177}
1178
1179static int ath_attach(u16 devid,
1180 struct ath_softc *sc)
1181{
1182 struct ieee80211_hw *hw = sc->hw;
1183 int error = 0;
1184
1185 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Attach ATH hw\n", __func__);
1186
1187 error = ath_init(devid, sc);
1188 if (error != 0)
1189 return error;
1190
1191 /* Init nodes */
1192
1193 INIT_LIST_HEAD(&sc->node_list);
1194 spin_lock_init(&sc->node_lock);
1195
1196 /* get mac address from hardware and set in mac80211 */
1197
1198 SET_IEEE80211_PERM_ADDR(hw, sc->sc_myaddr);
1199
1200 /* setup channels and rates */
1201
1202 sc->sbands[IEEE80211_BAND_2GHZ].channels =
1203 sc->channels[IEEE80211_BAND_2GHZ];
1204 sc->sbands[IEEE80211_BAND_2GHZ].bitrates =
1205 sc->rates[IEEE80211_BAND_2GHZ];
1206 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
1207
1208 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)
1209 /* Setup HT capabilities for 2.4Ghz*/
1210 setup_ht_cap(&sc->sbands[IEEE80211_BAND_2GHZ].ht_info);
1211
1212 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
1213 &sc->sbands[IEEE80211_BAND_2GHZ];
1214
1215 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->ah_caps.wireless_modes)) {
1216 sc->sbands[IEEE80211_BAND_5GHZ].channels =
1217 sc->channels[IEEE80211_BAND_5GHZ];
1218 sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
1219 sc->rates[IEEE80211_BAND_5GHZ];
1220 sc->sbands[IEEE80211_BAND_5GHZ].band =
1221 IEEE80211_BAND_5GHZ;
1222
1223 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)
1224 /* Setup HT capabilities for 5Ghz*/
1225 setup_ht_cap(&sc->sbands[IEEE80211_BAND_5GHZ].ht_info);
1226
1227 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
1228 &sc->sbands[IEEE80211_BAND_5GHZ];
1229 }
1230
1231 /* FIXME: Have to figure out proper hw init values later */
1232
1233 hw->queues = 4;
1234 hw->ampdu_queues = 1;
1235
1236 /* Register rate control */
1237 hw->rate_control_algorithm = "ath9k_rate_control";
1238 error = ath_rate_control_register();
1239 if (error != 0) {
1240 DPRINTF(sc, ATH_DBG_FATAL,
1241 "%s: Unable to register rate control "
1242 "algorithm:%d\n", __func__, error);
1243 ath_rate_control_unregister();
1244 goto bad;
1245 }
1246
1247 error = ieee80211_register_hw(hw);
1248 if (error != 0) {
1249 ath_rate_control_unregister();
1250 goto bad;
1251 }
1252
1253 /* initialize tx/rx engine */
1254
1255 error = ath_tx_init(sc, ATH_TXBUF);
1256 if (error != 0)
1257 goto bad1;
1258
1259 error = ath_rx_init(sc, ATH_RXBUF);
1260 if (error != 0)
1261 goto bad1;
1262
1263 return 0;
1264bad1:
1265 ath_detach(sc);
1266bad:
1267 return error;
1268}
1269
1270static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1271{
1272 void __iomem *mem;
1273 struct ath_softc *sc;
1274 struct ieee80211_hw *hw;
1275 const char *athname;
1276 u8 csz;
1277 u32 val;
1278 int ret = 0;
1279
1280 if (pci_enable_device(pdev))
1281 return -EIO;
1282
1283 /* XXX 32-bit addressing only */
1284 if (pci_set_dma_mask(pdev, 0xffffffff)) {
1285 printk(KERN_ERR "ath_pci: 32-bit DMA not available\n");
1286 ret = -ENODEV;
1287 goto bad;
1288 }
1289
1290 /*
1291 * Cache line size is used to size and align various
1292 * structures used to communicate with the hardware.
1293 */
1294 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz);
1295 if (csz == 0) {
1296 /*
1297 * Linux 2.4.18 (at least) writes the cache line size
1298 * register as a 16-bit wide register which is wrong.
1299 * We must have this setup properly for rx buffer
1300 * DMA to work so force a reasonable value here if it
1301 * comes up zero.
1302 */
1303 csz = L1_CACHE_BYTES / sizeof(u32);
1304 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz);
1305 }
1306 /*
1307 * The default setting of latency timer yields poor results,
1308 * set it to the value used by other systems. It may be worth
1309 * tweaking this setting more.
1310 */
1311 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8);
1312
1313 pci_set_master(pdev);
1314
1315 /*
1316 * Disable the RETRY_TIMEOUT register (0x41) to keep
1317 * PCI Tx retries from interfering with C3 CPU state.
1318 */
1319 pci_read_config_dword(pdev, 0x40, &val);
1320 if ((val & 0x0000ff00) != 0)
1321 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
1322
1323 ret = pci_request_region(pdev, 0, "ath9k");
1324 if (ret) {
1325 dev_err(&pdev->dev, "PCI memory region reserve error\n");
1326 ret = -ENODEV;
1327 goto bad;
1328 }
1329
1330 mem = pci_iomap(pdev, 0, 0);
1331 if (!mem) {
1332 printk(KERN_ERR "PCI memory map error\n") ;
1333 ret = -EIO;
1334 goto bad1;
1335 }
1336
1337 hw = ieee80211_alloc_hw(sizeof(struct ath_softc), &ath9k_ops);
1338 if (hw == NULL) {
1339 printk(KERN_ERR "ath_pci: no memory for ieee80211_hw\n");
1340 goto bad2;
1341 }
1342
1343 hw->flags = IEEE80211_HW_SIGNAL_DBM |
1344 IEEE80211_HW_NOISE_DBM;
1345
1346 SET_IEEE80211_DEV(hw, &pdev->dev);
1347 pci_set_drvdata(pdev, hw);
1348
1349 sc = hw->priv;
1350 sc->hw = hw;
1351 sc->pdev = pdev;
1352 sc->mem = mem;
1353
1354 if (ath_attach(id->device, sc) != 0) {
1355 ret = -ENODEV;
1356 goto bad3;
1357 }
1358
1359 /* setup interrupt service routine */
1360
1361 if (request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath", sc)) {
1362 printk(KERN_ERR "%s: request_irq failed\n",
1363 wiphy_name(hw->wiphy));
1364 ret = -EIO;
1365 goto bad4;
1366 }
1367
1368 athname = ath9k_hw_probe(id->vendor, id->device);
1369
1370 printk(KERN_INFO "%s: %s: mem=0x%lx, irq=%d\n",
1371 wiphy_name(hw->wiphy),
1372 athname ? athname : "Atheros ???",
1373 (unsigned long)mem, pdev->irq);
1374
1375 return 0;
1376bad4:
1377 ath_detach(sc);
1378bad3:
1379 ieee80211_free_hw(hw);
1380bad2:
1381 pci_iounmap(pdev, mem);
1382bad1:
1383 pci_release_region(pdev, 0);
1384bad:
1385 pci_disable_device(pdev);
1386 return ret;
1387}
1388
1389static void ath_pci_remove(struct pci_dev *pdev)
1390{
1391 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1392 struct ath_softc *sc = hw->priv;
1393
1394 if (pdev->irq)
1395 free_irq(pdev->irq, sc);
1396 ath_detach(sc);
1397 pci_iounmap(pdev, sc->mem);
1398 pci_release_region(pdev, 0);
1399 pci_disable_device(pdev);
1400 ieee80211_free_hw(hw);
1401}
1402
1403#ifdef CONFIG_PM
1404
1405static int ath_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1406{
1407 pci_save_state(pdev);
1408 pci_disable_device(pdev);
1409 pci_set_power_state(pdev, 3);
1410
1411 return 0;
1412}
1413
1414static int ath_pci_resume(struct pci_dev *pdev)
1415{
1416 u32 val;
1417 int err;
1418
1419 err = pci_enable_device(pdev);
1420 if (err)
1421 return err;
1422 pci_restore_state(pdev);
1423 /*
1424 * Suspend/Resume resets the PCI configuration space, so we have to
1425 * re-disable the RETRY_TIMEOUT register (0x41) to keep
1426 * PCI Tx retries from interfering with C3 CPU state
1427 */
1428 pci_read_config_dword(pdev, 0x40, &val);
1429 if ((val & 0x0000ff00) != 0)
1430 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
1431
1432 return 0;
1433}
1434
1435#endif /* CONFIG_PM */
1436
1437MODULE_DEVICE_TABLE(pci, ath_pci_id_table);
1438
1439static struct pci_driver ath_pci_driver = {
1440 .name = "ath9k",
1441 .id_table = ath_pci_id_table,
1442 .probe = ath_pci_probe,
1443 .remove = ath_pci_remove,
1444#ifdef CONFIG_PM
1445 .suspend = ath_pci_suspend,
1446 .resume = ath_pci_resume,
1447#endif /* CONFIG_PM */
1448};
1449
1450static int __init init_ath_pci(void)
1451{
1452 printk(KERN_INFO "%s: %s\n", dev_info, ATH_PCI_VERSION);
1453
1454 if (pci_register_driver(&ath_pci_driver) < 0) {
1455 printk(KERN_ERR
1456 "ath_pci: No devices found, driver not installed.\n");
1457 pci_unregister_driver(&ath_pci_driver);
1458 return -ENODEV;
1459 }
1460
1461 return 0;
1462}
1463module_init(init_ath_pci);
1464
1465static void __exit exit_ath_pci(void)
1466{
1467 pci_unregister_driver(&ath_pci_driver);
1468 printk(KERN_INFO "%s: driver unloaded\n", dev_info);
1469}
1470module_exit(exit_ath_pci);
diff --git a/drivers/net/wireless/ath9k/phy.c b/drivers/net/wireless/ath9k/phy.c
new file mode 100644
index 000000000000..eb9121fdfd38
--- /dev/null
+++ b/drivers/net/wireless/ath9k/phy.c
@@ -0,0 +1,436 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "core.h"
18#include "hw.h"
19#include "reg.h"
20#include "phy.h"
21
22void
23ath9k_hw_write_regs(struct ath_hal *ah, u32 modesIndex, u32 freqIndex,
24 int regWrites)
25{
26 struct ath_hal_5416 *ahp = AH5416(ah);
27
28 REG_WRITE_ARRAY(&ahp->ah_iniBB_RfGain, freqIndex, regWrites);
29}
30
31bool
32ath9k_hw_set_channel(struct ath_hal *ah, struct ath9k_channel *chan)
33{
34 u32 channelSel = 0;
35 u32 bModeSynth = 0;
36 u32 aModeRefSel = 0;
37 u32 reg32 = 0;
38 u16 freq;
39 struct chan_centers centers;
40
41 ath9k_hw_get_channel_centers(ah, chan, &centers);
42 freq = centers.synth_center;
43
44 if (freq < 4800) {
45 u32 txctl;
46
47 if (((freq - 2192) % 5) == 0) {
48 channelSel = ((freq - 672) * 2 - 3040) / 10;
49 bModeSynth = 0;
50 } else if (((freq - 2224) % 5) == 0) {
51 channelSel = ((freq - 704) * 2 - 3040) / 10;
52 bModeSynth = 1;
53 } else {
54 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
55 "%s: invalid channel %u MHz\n", __func__,
56 freq);
57 return false;
58 }
59
60 channelSel = (channelSel << 2) & 0xff;
61 channelSel = ath9k_hw_reverse_bits(channelSel, 8);
62
63 txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL);
64 if (freq == 2484) {
65
66 REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
67 txctl | AR_PHY_CCK_TX_CTRL_JAPAN);
68 } else {
69 REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
70 txctl & ~AR_PHY_CCK_TX_CTRL_JAPAN);
71 }
72
73 } else if ((freq % 20) == 0 && freq >= 5120) {
74 channelSel =
75 ath9k_hw_reverse_bits(((freq - 4800) / 20 << 2), 8);
76 aModeRefSel = ath9k_hw_reverse_bits(1, 2);
77 } else if ((freq % 10) == 0) {
78 channelSel =
79 ath9k_hw_reverse_bits(((freq - 4800) / 10 << 1), 8);
80 if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah))
81 aModeRefSel = ath9k_hw_reverse_bits(2, 2);
82 else
83 aModeRefSel = ath9k_hw_reverse_bits(1, 2);
84 } else if ((freq % 5) == 0) {
85 channelSel = ath9k_hw_reverse_bits((freq - 4800) / 5, 8);
86 aModeRefSel = ath9k_hw_reverse_bits(1, 2);
87 } else {
88 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
89 "%s: invalid channel %u MHz\n", __func__, freq);
90 return false;
91 }
92
93 reg32 =
94 (channelSel << 8) | (aModeRefSel << 2) | (bModeSynth << 1) |
95 (1 << 5) | 0x1;
96
97 REG_WRITE(ah, AR_PHY(0x37), reg32);
98
99 ah->ah_curchan = chan;
100
101 AH5416(ah)->ah_curchanRadIndex = -1;
102
103 return true;
104}
105
106bool
107ath9k_hw_ar9280_set_channel(struct ath_hal *ah,
108 struct ath9k_channel *chan)
109{
110 u16 bMode, fracMode, aModeRefSel = 0;
111 u32 freq, ndiv, channelSel = 0, channelFrac = 0, reg32 = 0;
112 struct chan_centers centers;
113 u32 refDivA = 24;
114
115 ath9k_hw_get_channel_centers(ah, chan, &centers);
116 freq = centers.synth_center;
117
118 reg32 = REG_READ(ah, AR_PHY_SYNTH_CONTROL);
119 reg32 &= 0xc0000000;
120
121 if (freq < 4800) {
122 u32 txctl;
123
124 bMode = 1;
125 fracMode = 1;
126 aModeRefSel = 0;
127 channelSel = (freq * 0x10000) / 15;
128
129 txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL);
130 if (freq == 2484) {
131
132 REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
133 txctl | AR_PHY_CCK_TX_CTRL_JAPAN);
134 } else {
135 REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
136 txctl & ~AR_PHY_CCK_TX_CTRL_JAPAN);
137 }
138 } else {
139 bMode = 0;
140 fracMode = 0;
141
142 if ((freq % 20) == 0) {
143 aModeRefSel = 3;
144 } else if ((freq % 10) == 0) {
145 aModeRefSel = 2;
146 } else {
147 aModeRefSel = 0;
148
149 fracMode = 1;
150 refDivA = 1;
151 channelSel = (freq * 0x8000) / 15;
152
153 REG_RMW_FIELD(ah, AR_AN_SYNTH9,
154 AR_AN_SYNTH9_REFDIVA, refDivA);
155 }
156 if (!fracMode) {
157 ndiv = (freq * (refDivA >> aModeRefSel)) / 60;
158 channelSel = ndiv & 0x1ff;
159 channelFrac = (ndiv & 0xfffffe00) * 2;
160 channelSel = (channelSel << 17) | channelFrac;
161 }
162 }
163
164 reg32 = reg32 |
165 (bMode << 29) |
166 (fracMode << 28) | (aModeRefSel << 26) | (channelSel);
167
168 REG_WRITE(ah, AR_PHY_SYNTH_CONTROL, reg32);
169
170 ah->ah_curchan = chan;
171
172 AH5416(ah)->ah_curchanRadIndex = -1;
173
174 return true;
175}
176
177static void
178ath9k_phy_modify_rx_buffer(u32 *rfBuf, u32 reg32,
179 u32 numBits, u32 firstBit,
180 u32 column)
181{
182 u32 tmp32, mask, arrayEntry, lastBit;
183 int32_t bitPosition, bitsLeft;
184
185 tmp32 = ath9k_hw_reverse_bits(reg32, numBits);
186 arrayEntry = (firstBit - 1) / 8;
187 bitPosition = (firstBit - 1) % 8;
188 bitsLeft = numBits;
189 while (bitsLeft > 0) {
190 lastBit = (bitPosition + bitsLeft > 8) ?
191 8 : bitPosition + bitsLeft;
192 mask = (((1 << lastBit) - 1) ^ ((1 << bitPosition) - 1)) <<
193 (column * 8);
194 rfBuf[arrayEntry] &= ~mask;
195 rfBuf[arrayEntry] |= ((tmp32 << bitPosition) <<
196 (column * 8)) & mask;
197 bitsLeft -= 8 - bitPosition;
198 tmp32 = tmp32 >> (8 - bitPosition);
199 bitPosition = 0;
200 arrayEntry++;
201 }
202}
203
204bool
205ath9k_hw_set_rf_regs(struct ath_hal *ah, struct ath9k_channel *chan,
206 u16 modesIndex)
207{
208 struct ath_hal_5416 *ahp = AH5416(ah);
209
210 u32 eepMinorRev;
211 u32 ob5GHz = 0, db5GHz = 0;
212 u32 ob2GHz = 0, db2GHz = 0;
213 int regWrites = 0;
214
215 if (AR_SREV_9280_10_OR_LATER(ah))
216 return true;
217
218 eepMinorRev = ath9k_hw_get_eeprom(ahp, EEP_MINOR_REV);
219
220 RF_BANK_SETUP(ahp->ah_analogBank0Data, &ahp->ah_iniBank0, 1);
221
222 RF_BANK_SETUP(ahp->ah_analogBank1Data, &ahp->ah_iniBank1, 1);
223
224 RF_BANK_SETUP(ahp->ah_analogBank2Data, &ahp->ah_iniBank2, 1);
225
226 RF_BANK_SETUP(ahp->ah_analogBank3Data, &ahp->ah_iniBank3,
227 modesIndex);
228 {
229 int i;
230 for (i = 0; i < ahp->ah_iniBank6TPC.ia_rows; i++) {
231 ahp->ah_analogBank6Data[i] =
232 INI_RA(&ahp->ah_iniBank6TPC, i, modesIndex);
233 }
234 }
235
236 if (eepMinorRev >= 2) {
237 if (IS_CHAN_2GHZ(chan)) {
238 ob2GHz = ath9k_hw_get_eeprom(ahp, EEP_OB_2);
239 db2GHz = ath9k_hw_get_eeprom(ahp, EEP_DB_2);
240 ath9k_phy_modify_rx_buffer(ahp->ah_analogBank6Data,
241 ob2GHz, 3, 197, 0);
242 ath9k_phy_modify_rx_buffer(ahp->ah_analogBank6Data,
243 db2GHz, 3, 194, 0);
244 } else {
245 ob5GHz = ath9k_hw_get_eeprom(ahp, EEP_OB_5);
246 db5GHz = ath9k_hw_get_eeprom(ahp, EEP_DB_5);
247 ath9k_phy_modify_rx_buffer(ahp->ah_analogBank6Data,
248 ob5GHz, 3, 203, 0);
249 ath9k_phy_modify_rx_buffer(ahp->ah_analogBank6Data,
250 db5GHz, 3, 200, 0);
251 }
252 }
253
254 RF_BANK_SETUP(ahp->ah_analogBank7Data, &ahp->ah_iniBank7, 1);
255
256 REG_WRITE_RF_ARRAY(&ahp->ah_iniBank0, ahp->ah_analogBank0Data,
257 regWrites);
258 REG_WRITE_RF_ARRAY(&ahp->ah_iniBank1, ahp->ah_analogBank1Data,
259 regWrites);
260 REG_WRITE_RF_ARRAY(&ahp->ah_iniBank2, ahp->ah_analogBank2Data,
261 regWrites);
262 REG_WRITE_RF_ARRAY(&ahp->ah_iniBank3, ahp->ah_analogBank3Data,
263 regWrites);
264 REG_WRITE_RF_ARRAY(&ahp->ah_iniBank6TPC, ahp->ah_analogBank6Data,
265 regWrites);
266 REG_WRITE_RF_ARRAY(&ahp->ah_iniBank7, ahp->ah_analogBank7Data,
267 regWrites);
268
269 return true;
270}
271
272void
273ath9k_hw_rfdetach(struct ath_hal *ah)
274{
275 struct ath_hal_5416 *ahp = AH5416(ah);
276
277 if (ahp->ah_analogBank0Data != NULL) {
278 kfree(ahp->ah_analogBank0Data);
279 ahp->ah_analogBank0Data = NULL;
280 }
281 if (ahp->ah_analogBank1Data != NULL) {
282 kfree(ahp->ah_analogBank1Data);
283 ahp->ah_analogBank1Data = NULL;
284 }
285 if (ahp->ah_analogBank2Data != NULL) {
286 kfree(ahp->ah_analogBank2Data);
287 ahp->ah_analogBank2Data = NULL;
288 }
289 if (ahp->ah_analogBank3Data != NULL) {
290 kfree(ahp->ah_analogBank3Data);
291 ahp->ah_analogBank3Data = NULL;
292 }
293 if (ahp->ah_analogBank6Data != NULL) {
294 kfree(ahp->ah_analogBank6Data);
295 ahp->ah_analogBank6Data = NULL;
296 }
297 if (ahp->ah_analogBank6TPCData != NULL) {
298 kfree(ahp->ah_analogBank6TPCData);
299 ahp->ah_analogBank6TPCData = NULL;
300 }
301 if (ahp->ah_analogBank7Data != NULL) {
302 kfree(ahp->ah_analogBank7Data);
303 ahp->ah_analogBank7Data = NULL;
304 }
305 if (ahp->ah_addac5416_21 != NULL) {
306 kfree(ahp->ah_addac5416_21);
307 ahp->ah_addac5416_21 = NULL;
308 }
309 if (ahp->ah_bank6Temp != NULL) {
310 kfree(ahp->ah_bank6Temp);
311 ahp->ah_bank6Temp = NULL;
312 }
313}
314
315bool ath9k_hw_init_rf(struct ath_hal *ah, int *status)
316{
317 struct ath_hal_5416 *ahp = AH5416(ah);
318
319 if (!AR_SREV_9280_10_OR_LATER(ah)) {
320
321 ahp->ah_analogBank0Data =
322 kzalloc((sizeof(u32) *
323 ahp->ah_iniBank0.ia_rows), GFP_KERNEL);
324 ahp->ah_analogBank1Data =
325 kzalloc((sizeof(u32) *
326 ahp->ah_iniBank1.ia_rows), GFP_KERNEL);
327 ahp->ah_analogBank2Data =
328 kzalloc((sizeof(u32) *
329 ahp->ah_iniBank2.ia_rows), GFP_KERNEL);
330 ahp->ah_analogBank3Data =
331 kzalloc((sizeof(u32) *
332 ahp->ah_iniBank3.ia_rows), GFP_KERNEL);
333 ahp->ah_analogBank6Data =
334 kzalloc((sizeof(u32) *
335 ahp->ah_iniBank6.ia_rows), GFP_KERNEL);
336 ahp->ah_analogBank6TPCData =
337 kzalloc((sizeof(u32) *
338 ahp->ah_iniBank6TPC.ia_rows), GFP_KERNEL);
339 ahp->ah_analogBank7Data =
340 kzalloc((sizeof(u32) *
341 ahp->ah_iniBank7.ia_rows), GFP_KERNEL);
342
343 if (ahp->ah_analogBank0Data == NULL
344 || ahp->ah_analogBank1Data == NULL
345 || ahp->ah_analogBank2Data == NULL
346 || ahp->ah_analogBank3Data == NULL
347 || ahp->ah_analogBank6Data == NULL
348 || ahp->ah_analogBank6TPCData == NULL
349 || ahp->ah_analogBank7Data == NULL) {
350 DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
351 "%s: cannot allocate RF banks\n",
352 __func__);
353 *status = -ENOMEM;
354 return false;
355 }
356
357 ahp->ah_addac5416_21 =
358 kzalloc((sizeof(u32) *
359 ahp->ah_iniAddac.ia_rows *
360 ahp->ah_iniAddac.ia_columns), GFP_KERNEL);
361 if (ahp->ah_addac5416_21 == NULL) {
362 DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
363 "%s: cannot allocate ah_addac5416_21\n",
364 __func__);
365 *status = -ENOMEM;
366 return false;
367 }
368
369 ahp->ah_bank6Temp =
370 kzalloc((sizeof(u32) *
371 ahp->ah_iniBank6.ia_rows), GFP_KERNEL);
372 if (ahp->ah_bank6Temp == NULL) {
373 DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
374 "%s: cannot allocate ah_bank6Temp\n",
375 __func__);
376 *status = -ENOMEM;
377 return false;
378 }
379 }
380
381 return true;
382}
383
384void
385ath9k_hw_decrease_chain_power(struct ath_hal *ah, struct ath9k_channel *chan)
386{
387 int i, regWrites = 0;
388 struct ath_hal_5416 *ahp = AH5416(ah);
389 u32 bank6SelMask;
390 u32 *bank6Temp = ahp->ah_bank6Temp;
391
392 switch (ahp->ah_diversityControl) {
393 case ATH9K_ANT_FIXED_A:
394 bank6SelMask =
395 (ahp->
396 ah_antennaSwitchSwap & ANTSWAP_AB) ? REDUCE_CHAIN_0 :
397 REDUCE_CHAIN_1;
398 break;
399 case ATH9K_ANT_FIXED_B:
400 bank6SelMask =
401 (ahp->
402 ah_antennaSwitchSwap & ANTSWAP_AB) ? REDUCE_CHAIN_1 :
403 REDUCE_CHAIN_0;
404 break;
405 case ATH9K_ANT_VARIABLE:
406 return;
407 break;
408 default:
409 return;
410 break;
411 }
412
413 for (i = 0; i < ahp->ah_iniBank6.ia_rows; i++)
414 bank6Temp[i] = ahp->ah_analogBank6Data[i];
415
416 REG_WRITE(ah, AR_PHY_BASE + 0xD8, bank6SelMask);
417
418 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 189, 0);
419 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 190, 0);
420 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 191, 0);
421 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 192, 0);
422 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 193, 0);
423 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 222, 0);
424 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 245, 0);
425 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 246, 0);
426 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 247, 0);
427
428 REG_WRITE_RF_ARRAY(&ahp->ah_iniBank6, bank6Temp, regWrites);
429
430 REG_WRITE(ah, AR_PHY_BASE + 0xD8, 0x00000053);
431#ifdef ALTER_SWITCH
432 REG_WRITE(ah, PHY_SWITCH_CHAIN_0,
433 (REG_READ(ah, PHY_SWITCH_CHAIN_0) & ~0x38)
434 | ((REG_READ(ah, PHY_SWITCH_CHAIN_0) >> 3) & 0x38));
435#endif
436}
diff --git a/drivers/net/wireless/ath9k/phy.h b/drivers/net/wireless/ath9k/phy.h
new file mode 100644
index 000000000000..0cd399a5344a
--- /dev/null
+++ b/drivers/net/wireless/ath9k/phy.h
@@ -0,0 +1,543 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef PHY_H
18#define PHY_H
19
20bool ath9k_hw_ar9280_set_channel(struct ath_hal *ah,
21 struct ath9k_channel
22 *chan);
23bool ath9k_hw_set_channel(struct ath_hal *ah,
24 struct ath9k_channel *chan);
25void ath9k_hw_write_regs(struct ath_hal *ah, u32 modesIndex,
26 u32 freqIndex, int regWrites);
27bool ath9k_hw_set_rf_regs(struct ath_hal *ah,
28 struct ath9k_channel *chan,
29 u16 modesIndex);
30void ath9k_hw_decrease_chain_power(struct ath_hal *ah,
31 struct ath9k_channel *chan);
32bool ath9k_hw_init_rf(struct ath_hal *ah,
33 int *status);
34
35#define AR_PHY_BASE 0x9800
36#define AR_PHY(_n) (AR_PHY_BASE + ((_n)<<2))
37
38#define AR_PHY_TEST 0x9800
39#define PHY_AGC_CLR 0x10000000
40#define RFSILENT_BB 0x00002000
41
42#define AR_PHY_TURBO 0x9804
43#define AR_PHY_FC_TURBO_MODE 0x00000001
44#define AR_PHY_FC_TURBO_SHORT 0x00000002
45#define AR_PHY_FC_DYN2040_EN 0x00000004
46#define AR_PHY_FC_DYN2040_PRI_ONLY 0x00000008
47#define AR_PHY_FC_DYN2040_PRI_CH 0x00000010
48#define AR_PHY_FC_DYN2040_EXT_CH 0x00000020
49#define AR_PHY_FC_HT_EN 0x00000040
50#define AR_PHY_FC_SHORT_GI_40 0x00000080
51#define AR_PHY_FC_WALSH 0x00000100
52#define AR_PHY_FC_SINGLE_HT_LTF1 0x00000200
53
54#define AR_PHY_TIMING2 0x9810
55#define AR_PHY_TIMING3 0x9814
56#define AR_PHY_TIMING3_DSC_MAN 0xFFFE0000
57#define AR_PHY_TIMING3_DSC_MAN_S 17
58#define AR_PHY_TIMING3_DSC_EXP 0x0001E000
59#define AR_PHY_TIMING3_DSC_EXP_S 13
60
61#define AR_PHY_CHIP_ID 0x9818
62#define AR_PHY_CHIP_ID_REV_0 0x80
63#define AR_PHY_CHIP_ID_REV_1 0x81
64#define AR_PHY_CHIP_ID_9160_REV_0 0xb0
65
66#define AR_PHY_ACTIVE 0x981C
67#define AR_PHY_ACTIVE_EN 0x00000001
68#define AR_PHY_ACTIVE_DIS 0x00000000
69
70#define AR_PHY_RF_CTL2 0x9824
71#define AR_PHY_TX_END_DATA_START 0x000000FF
72#define AR_PHY_TX_END_DATA_START_S 0
73#define AR_PHY_TX_END_PA_ON 0x0000FF00
74#define AR_PHY_TX_END_PA_ON_S 8
75
76#define AR_PHY_RF_CTL3 0x9828
77#define AR_PHY_TX_END_TO_A2_RX_ON 0x00FF0000
78#define AR_PHY_TX_END_TO_A2_RX_ON_S 16
79
80#define AR_PHY_ADC_CTL 0x982C
81#define AR_PHY_ADC_CTL_OFF_INBUFGAIN 0x00000003
82#define AR_PHY_ADC_CTL_OFF_INBUFGAIN_S 0
83#define AR_PHY_ADC_CTL_OFF_PWDDAC 0x00002000
84#define AR_PHY_ADC_CTL_OFF_PWDBANDGAP 0x00004000
85#define AR_PHY_ADC_CTL_OFF_PWDADC 0x00008000
86#define AR_PHY_ADC_CTL_ON_INBUFGAIN 0x00030000
87#define AR_PHY_ADC_CTL_ON_INBUFGAIN_S 16
88
89#define AR_PHY_ADC_SERIAL_CTL 0x9830
90#define AR_PHY_SEL_INTERNAL_ADDAC 0x00000000
91#define AR_PHY_SEL_EXTERNAL_RADIO 0x00000001
92
93#define AR_PHY_RF_CTL4 0x9834
94#define AR_PHY_RF_CTL4_TX_END_XPAB_OFF 0xFF000000
95#define AR_PHY_RF_CTL4_TX_END_XPAB_OFF_S 24
96#define AR_PHY_RF_CTL4_TX_END_XPAA_OFF 0x00FF0000
97#define AR_PHY_RF_CTL4_TX_END_XPAA_OFF_S 16
98#define AR_PHY_RF_CTL4_FRAME_XPAB_ON 0x0000FF00
99#define AR_PHY_RF_CTL4_FRAME_XPAB_ON_S 8
100#define AR_PHY_RF_CTL4_FRAME_XPAA_ON 0x000000FF
101#define AR_PHY_RF_CTL4_FRAME_XPAA_ON_S 0
102
103#define AR_PHY_SETTLING 0x9844
104#define AR_PHY_SETTLING_SWITCH 0x00003F80
105#define AR_PHY_SETTLING_SWITCH_S 7
106
107#define AR_PHY_RXGAIN 0x9848
108#define AR_PHY_RXGAIN_TXRX_ATTEN 0x0003F000
109#define AR_PHY_RXGAIN_TXRX_ATTEN_S 12
110#define AR_PHY_RXGAIN_TXRX_RF_MAX 0x007C0000
111#define AR_PHY_RXGAIN_TXRX_RF_MAX_S 18
112#define AR9280_PHY_RXGAIN_TXRX_ATTEN 0x00003F80
113#define AR9280_PHY_RXGAIN_TXRX_ATTEN_S 7
114#define AR9280_PHY_RXGAIN_TXRX_MARGIN 0x001FC000
115#define AR9280_PHY_RXGAIN_TXRX_MARGIN_S 14
116
117#define AR_PHY_DESIRED_SZ 0x9850
118#define AR_PHY_DESIRED_SZ_ADC 0x000000FF
119#define AR_PHY_DESIRED_SZ_ADC_S 0
120#define AR_PHY_DESIRED_SZ_PGA 0x0000FF00
121#define AR_PHY_DESIRED_SZ_PGA_S 8
122#define AR_PHY_DESIRED_SZ_TOT_DES 0x0FF00000
123#define AR_PHY_DESIRED_SZ_TOT_DES_S 20
124
125#define AR_PHY_FIND_SIG 0x9858
126#define AR_PHY_FIND_SIG_FIRSTEP 0x0003F000
127#define AR_PHY_FIND_SIG_FIRSTEP_S 12
128#define AR_PHY_FIND_SIG_FIRPWR 0x03FC0000
129#define AR_PHY_FIND_SIG_FIRPWR_S 18
130
131#define AR_PHY_AGC_CTL1 0x985C
132#define AR_PHY_AGC_CTL1_COARSE_LOW 0x00007F80
133#define AR_PHY_AGC_CTL1_COARSE_LOW_S 7
134#define AR_PHY_AGC_CTL1_COARSE_HIGH 0x003F8000
135#define AR_PHY_AGC_CTL1_COARSE_HIGH_S 15
136
137#define AR_PHY_AGC_CONTROL 0x9860
138#define AR_PHY_AGC_CONTROL_CAL 0x00000001
139#define AR_PHY_AGC_CONTROL_NF 0x00000002
140#define AR_PHY_AGC_CONTROL_ENABLE_NF 0x00008000
141#define AR_PHY_AGC_CONTROL_FLTR_CAL 0x00010000
142#define AR_PHY_AGC_CONTROL_NO_UPDATE_NF 0x00020000
143
144#define AR_PHY_CCA 0x9864
145#define AR_PHY_MINCCA_PWR 0x0FF80000
146#define AR_PHY_MINCCA_PWR_S 19
147#define AR_PHY_CCA_THRESH62 0x0007F000
148#define AR_PHY_CCA_THRESH62_S 12
149#define AR9280_PHY_MINCCA_PWR 0x1FF00000
150#define AR9280_PHY_MINCCA_PWR_S 20
151#define AR9280_PHY_CCA_THRESH62 0x000FF000
152#define AR9280_PHY_CCA_THRESH62_S 12
153
154#define AR_PHY_SFCORR_LOW 0x986C
155#define AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW 0x00000001
156#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW 0x00003F00
157#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW_S 8
158#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW 0x001FC000
159#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW_S 14
160#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW 0x0FE00000
161#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW_S 21
162
163#define AR_PHY_SFCORR 0x9868
164#define AR_PHY_SFCORR_M2COUNT_THR 0x0000001F
165#define AR_PHY_SFCORR_M2COUNT_THR_S 0
166#define AR_PHY_SFCORR_M1_THRESH 0x00FE0000
167#define AR_PHY_SFCORR_M1_THRESH_S 17
168#define AR_PHY_SFCORR_M2_THRESH 0x7F000000
169#define AR_PHY_SFCORR_M2_THRESH_S 24
170
171#define AR_PHY_SLEEP_CTR_CONTROL 0x9870
172#define AR_PHY_SLEEP_CTR_LIMIT 0x9874
173#define AR_PHY_SYNTH_CONTROL 0x9874
174#define AR_PHY_SLEEP_SCAL 0x9878
175
176#define AR_PHY_PLL_CTL 0x987c
177#define AR_PHY_PLL_CTL_40 0xaa
178#define AR_PHY_PLL_CTL_40_5413 0x04
179#define AR_PHY_PLL_CTL_44 0xab
180#define AR_PHY_PLL_CTL_44_2133 0xeb
181#define AR_PHY_PLL_CTL_40_2133 0xea
182
183#define AR_PHY_RX_DELAY 0x9914
184#define AR_PHY_SEARCH_START_DELAY 0x9918
185#define AR_PHY_RX_DELAY_DELAY 0x00003FFF
186
187#define AR_PHY_TIMING_CTRL4(_i) (0x9920 + ((_i) << 12))
188#define AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF 0x01F
189#define AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF_S 0
190#define AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF 0x7E0
191#define AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF_S 5
192#define AR_PHY_TIMING_CTRL4_IQCORR_ENABLE 0x800
193#define AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX 0xF000
194#define AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX_S 12
195#define AR_PHY_TIMING_CTRL4_DO_CAL 0x10000
196
197#define AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI 0x80000000
198#define AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER 0x40000000
199#define AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK 0x20000000
200#define AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK 0x10000000
201
202#define AR_PHY_TIMING5 0x9924
203#define AR_PHY_TIMING5_CYCPWR_THR1 0x000000FE
204#define AR_PHY_TIMING5_CYCPWR_THR1_S 1
205
206#define AR_PHY_POWER_TX_RATE1 0x9934
207#define AR_PHY_POWER_TX_RATE2 0x9938
208#define AR_PHY_POWER_TX_RATE_MAX 0x993c
209#define AR_PHY_POWER_TX_RATE_MAX_TPC_ENABLE 0x00000040
210
211#define AR_PHY_FRAME_CTL 0x9944
212#define AR_PHY_FRAME_CTL_TX_CLIP 0x00000038
213#define AR_PHY_FRAME_CTL_TX_CLIP_S 3
214
215#define AR_PHY_TXPWRADJ 0x994C
216#define AR_PHY_TXPWRADJ_CCK_GAIN_DELTA 0x00000FC0
217#define AR_PHY_TXPWRADJ_CCK_GAIN_DELTA_S 6
218#define AR_PHY_TXPWRADJ_CCK_PCDAC_INDEX 0x00FC0000
219#define AR_PHY_TXPWRADJ_CCK_PCDAC_INDEX_S 18
220
221#define AR_PHY_RADAR_EXT 0x9940
222#define AR_PHY_RADAR_EXT_ENA 0x00004000
223
224#define AR_PHY_RADAR_0 0x9954
225#define AR_PHY_RADAR_0_ENA 0x00000001
226#define AR_PHY_RADAR_0_FFT_ENA 0x80000000
227#define AR_PHY_RADAR_0_INBAND 0x0000003e
228#define AR_PHY_RADAR_0_INBAND_S 1
229#define AR_PHY_RADAR_0_PRSSI 0x00000FC0
230#define AR_PHY_RADAR_0_PRSSI_S 6
231#define AR_PHY_RADAR_0_HEIGHT 0x0003F000
232#define AR_PHY_RADAR_0_HEIGHT_S 12
233#define AR_PHY_RADAR_0_RRSSI 0x00FC0000
234#define AR_PHY_RADAR_0_RRSSI_S 18
235#define AR_PHY_RADAR_0_FIRPWR 0x7F000000
236#define AR_PHY_RADAR_0_FIRPWR_S 24
237
238#define AR_PHY_RADAR_1 0x9958
239#define AR_PHY_RADAR_1_RELPWR_ENA 0x00800000
240#define AR_PHY_RADAR_1_USE_FIR128 0x00400000
241#define AR_PHY_RADAR_1_RELPWR_THRESH 0x003F0000
242#define AR_PHY_RADAR_1_RELPWR_THRESH_S 16
243#define AR_PHY_RADAR_1_BLOCK_CHECK 0x00008000
244#define AR_PHY_RADAR_1_MAX_RRSSI 0x00004000
245#define AR_PHY_RADAR_1_RELSTEP_CHECK 0x00002000
246#define AR_PHY_RADAR_1_RELSTEP_THRESH 0x00001F00
247#define AR_PHY_RADAR_1_RELSTEP_THRESH_S 8
248#define AR_PHY_RADAR_1_MAXLEN 0x000000FF
249#define AR_PHY_RADAR_1_MAXLEN_S 0
250
251#define AR_PHY_SWITCH_CHAIN_0 0x9960
252#define AR_PHY_SWITCH_COM 0x9964
253
254#define AR_PHY_SIGMA_DELTA 0x996C
255#define AR_PHY_SIGMA_DELTA_ADC_SEL 0x00000003
256#define AR_PHY_SIGMA_DELTA_ADC_SEL_S 0
257#define AR_PHY_SIGMA_DELTA_FILT2 0x000000F8
258#define AR_PHY_SIGMA_DELTA_FILT2_S 3
259#define AR_PHY_SIGMA_DELTA_FILT1 0x00001F00
260#define AR_PHY_SIGMA_DELTA_FILT1_S 8
261#define AR_PHY_SIGMA_DELTA_ADC_CLIP 0x01FFE000
262#define AR_PHY_SIGMA_DELTA_ADC_CLIP_S 13
263
264#define AR_PHY_RESTART 0x9970
265#define AR_PHY_RESTART_DIV_GC 0x001C0000
266#define AR_PHY_RESTART_DIV_GC_S 18
267
268#define AR_PHY_RFBUS_REQ 0x997C
269#define AR_PHY_RFBUS_REQ_EN 0x00000001
270
271#define AR_PHY_TIMING7 0x9980
272#define AR_PHY_TIMING8 0x9984
273#define AR_PHY_TIMING8_PILOT_MASK_2 0x000FFFFF
274#define AR_PHY_TIMING8_PILOT_MASK_2_S 0
275
276#define AR_PHY_BIN_MASK2_1 0x9988
277#define AR_PHY_BIN_MASK2_2 0x998c
278#define AR_PHY_BIN_MASK2_3 0x9990
279#define AR_PHY_BIN_MASK2_4 0x9994
280
281#define AR_PHY_BIN_MASK_1 0x9900
282#define AR_PHY_BIN_MASK_2 0x9904
283#define AR_PHY_BIN_MASK_3 0x9908
284
285#define AR_PHY_MASK_CTL 0x990c
286
287#define AR_PHY_BIN_MASK2_4_MASK_4 0x00003FFF
288#define AR_PHY_BIN_MASK2_4_MASK_4_S 0
289
290#define AR_PHY_TIMING9 0x9998
291#define AR_PHY_TIMING10 0x999c
292#define AR_PHY_TIMING10_PILOT_MASK_2 0x000FFFFF
293#define AR_PHY_TIMING10_PILOT_MASK_2_S 0
294
295#define AR_PHY_TIMING11 0x99a0
296#define AR_PHY_TIMING11_SPUR_DELTA_PHASE 0x000FFFFF
297#define AR_PHY_TIMING11_SPUR_DELTA_PHASE_S 0
298#define AR_PHY_TIMING11_SPUR_FREQ_SD 0x3FF00000
299#define AR_PHY_TIMING11_SPUR_FREQ_SD_S 20
300#define AR_PHY_TIMING11_USE_SPUR_IN_AGC 0x40000000
301#define AR_PHY_TIMING11_USE_SPUR_IN_SELFCOR 0x80000000
302
303#define AR_PHY_RX_CHAINMASK 0x99a4
304#define AR_PHY_NEW_ADC_DC_GAIN_CORR(_i) (0x99b4 + ((_i) << 12))
305#define AR_PHY_NEW_ADC_GAIN_CORR_ENABLE 0x40000000
306#define AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE 0x80000000
307#define AR_PHY_MULTICHAIN_GAIN_CTL 0x99ac
308
309#define AR_PHY_EXT_CCA0 0x99b8
310#define AR_PHY_EXT_CCA0_THRESH62 0x000000FF
311#define AR_PHY_EXT_CCA0_THRESH62_S 0
312
313#define AR_PHY_EXT_CCA 0x99bc
314#define AR_PHY_EXT_CCA_CYCPWR_THR1 0x0000FE00
315#define AR_PHY_EXT_CCA_CYCPWR_THR1_S 9
316#define AR_PHY_EXT_CCA_THRESH62 0x007F0000
317#define AR_PHY_EXT_CCA_THRESH62_S 16
318#define AR_PHY_EXT_MINCCA_PWR 0xFF800000
319#define AR_PHY_EXT_MINCCA_PWR_S 23
320#define AR9280_PHY_EXT_MINCCA_PWR 0x01FF0000
321#define AR9280_PHY_EXT_MINCCA_PWR_S 16
322
323#define AR_PHY_SFCORR_EXT 0x99c0
324#define AR_PHY_SFCORR_EXT_M1_THRESH 0x0000007F
325#define AR_PHY_SFCORR_EXT_M1_THRESH_S 0
326#define AR_PHY_SFCORR_EXT_M2_THRESH 0x00003F80
327#define AR_PHY_SFCORR_EXT_M2_THRESH_S 7
328#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW 0x001FC000
329#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW_S 14
330#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW 0x0FE00000
331#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW_S 21
332#define AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S 28
333
334#define AR_PHY_HALFGI 0x99D0
335#define AR_PHY_HALFGI_DSC_MAN 0x0007FFF0
336#define AR_PHY_HALFGI_DSC_MAN_S 4
337#define AR_PHY_HALFGI_DSC_EXP 0x0000000F
338#define AR_PHY_HALFGI_DSC_EXP_S 0
339
340#define AR_PHY_CHAN_INFO_MEMORY 0x99DC
341#define AR_PHY_CHAN_INFO_MEMORY_CAPTURE_MASK 0x0001
342
343#define AR_PHY_HEAVY_CLIP_ENABLE 0x99E0
344
345#define AR_PHY_M_SLEEP 0x99f0
346#define AR_PHY_REFCLKDLY 0x99f4
347#define AR_PHY_REFCLKPD 0x99f8
348
349#define AR_PHY_CALMODE 0x99f0
350
351#define AR_PHY_CALMODE_IQ 0x00000000
352#define AR_PHY_CALMODE_ADC_GAIN 0x00000001
353#define AR_PHY_CALMODE_ADC_DC_PER 0x00000002
354#define AR_PHY_CALMODE_ADC_DC_INIT 0x00000003
355
356#define AR_PHY_CAL_MEAS_0(_i) (0x9c10 + ((_i) << 12))
357#define AR_PHY_CAL_MEAS_1(_i) (0x9c14 + ((_i) << 12))
358#define AR_PHY_CAL_MEAS_2(_i) (0x9c18 + ((_i) << 12))
359#define AR_PHY_CAL_MEAS_3(_i) (0x9c1c + ((_i) << 12))
360
361#define AR_PHY_CURRENT_RSSI 0x9c1c
362#define AR9280_PHY_CURRENT_RSSI 0x9c3c
363
364#define AR_PHY_RFBUS_GRANT 0x9C20
365#define AR_PHY_RFBUS_GRANT_EN 0x00000001
366
367#define AR_PHY_CHAN_INFO_GAIN_DIFF 0x9CF4
368#define AR_PHY_CHAN_INFO_GAIN_DIFF_UPPER_LIMIT 320
369
370#define AR_PHY_CHAN_INFO_GAIN 0x9CFC
371
372#define AR_PHY_MODE 0xA200
373#define AR_PHY_MODE_AR2133 0x08
374#define AR_PHY_MODE_AR5111 0x00
375#define AR_PHY_MODE_AR5112 0x08
376#define AR_PHY_MODE_DYNAMIC 0x04
377#define AR_PHY_MODE_RF2GHZ 0x02
378#define AR_PHY_MODE_RF5GHZ 0x00
379#define AR_PHY_MODE_CCK 0x01
380#define AR_PHY_MODE_OFDM 0x00
381#define AR_PHY_MODE_DYN_CCK_DISABLE 0x100
382
383#define AR_PHY_CCK_TX_CTRL 0xA204
384#define AR_PHY_CCK_TX_CTRL_JAPAN 0x00000010
385
386#define AR_PHY_CCK_DETECT 0xA208
387#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK 0x0000003F
388#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK_S 0
389/* [12:6] settling time for antenna switch */
390#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME 0x00001FC0
391#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME_S 6
392#define AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV 0x2000
393
394#define AR_PHY_GAIN_2GHZ 0xA20C
395#define AR_PHY_GAIN_2GHZ_RXTX_MARGIN 0x00FC0000
396#define AR_PHY_GAIN_2GHZ_RXTX_MARGIN_S 18
397#define AR_PHY_GAIN_2GHZ_BSW_MARGIN 0x00003C00
398#define AR_PHY_GAIN_2GHZ_BSW_MARGIN_S 10
399#define AR_PHY_GAIN_2GHZ_BSW_ATTEN 0x0000001F
400#define AR_PHY_GAIN_2GHZ_BSW_ATTEN_S 0
401
402#define AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN 0x003E0000
403#define AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN_S 17
404#define AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN 0x0001F000
405#define AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN_S 12
406#define AR_PHY_GAIN_2GHZ_XATTEN2_DB 0x00000FC0
407#define AR_PHY_GAIN_2GHZ_XATTEN2_DB_S 6
408#define AR_PHY_GAIN_2GHZ_XATTEN1_DB 0x0000003F
409#define AR_PHY_GAIN_2GHZ_XATTEN1_DB_S 0
410
411#define AR_PHY_CCK_RXCTRL4 0xA21C
412#define AR_PHY_CCK_RXCTRL4_FREQ_EST_SHORT 0x01F80000
413#define AR_PHY_CCK_RXCTRL4_FREQ_EST_SHORT_S 19
414
415#define AR_PHY_DAG_CTRLCCK 0xA228
416#define AR_PHY_DAG_CTRLCCK_EN_RSSI_THR 0x00000200
417#define AR_PHY_DAG_CTRLCCK_RSSI_THR 0x0001FC00
418#define AR_PHY_DAG_CTRLCCK_RSSI_THR_S 10
419
420#define AR_PHY_FORCE_CLKEN_CCK 0xA22C
421#define AR_PHY_FORCE_CLKEN_CCK_MRC_MUX 0x00000040
422
423#define AR_PHY_POWER_TX_RATE3 0xA234
424#define AR_PHY_POWER_TX_RATE4 0xA238
425
426#define AR_PHY_SCRM_SEQ_XR 0xA23C
427#define AR_PHY_HEADER_DETECT_XR 0xA240
428#define AR_PHY_CHIRP_DETECTED_XR 0xA244
429#define AR_PHY_BLUETOOTH 0xA254
430
431#define AR_PHY_TPCRG1 0xA258
432#define AR_PHY_TPCRG1_NUM_PD_GAIN 0x0000c000
433#define AR_PHY_TPCRG1_NUM_PD_GAIN_S 14
434
435#define AR_PHY_TPCRG1_PD_GAIN_1 0x00030000
436#define AR_PHY_TPCRG1_PD_GAIN_1_S 16
437#define AR_PHY_TPCRG1_PD_GAIN_2 0x000C0000
438#define AR_PHY_TPCRG1_PD_GAIN_2_S 18
439#define AR_PHY_TPCRG1_PD_GAIN_3 0x00300000
440#define AR_PHY_TPCRG1_PD_GAIN_3_S 20
441
442#define AR_PHY_VIT_MASK2_M_46_61 0xa3a0
443#define AR_PHY_MASK2_M_31_45 0xa3a4
444#define AR_PHY_MASK2_M_16_30 0xa3a8
445#define AR_PHY_MASK2_M_00_15 0xa3ac
446#define AR_PHY_MASK2_P_15_01 0xa3b8
447#define AR_PHY_MASK2_P_30_16 0xa3bc
448#define AR_PHY_MASK2_P_45_31 0xa3c0
449#define AR_PHY_MASK2_P_61_45 0xa3c4
450#define AR_PHY_SPUR_REG 0x994c
451
452#define AR_PHY_SPUR_REG_MASK_RATE_CNTL (0xFF << 18)
453#define AR_PHY_SPUR_REG_MASK_RATE_CNTL_S 18
454
455#define AR_PHY_SPUR_REG_ENABLE_MASK_PPM 0x20000
456#define AR_PHY_SPUR_REG_MASK_RATE_SELECT (0xFF << 9)
457#define AR_PHY_SPUR_REG_MASK_RATE_SELECT_S 9
458#define AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI 0x100
459#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH 0x7F
460#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH_S 0
461
462#define AR_PHY_PILOT_MASK_01_30 0xa3b0
463#define AR_PHY_PILOT_MASK_31_60 0xa3b4
464
465#define AR_PHY_CHANNEL_MASK_01_30 0x99d4
466#define AR_PHY_CHANNEL_MASK_31_60 0x99d8
467
468#define AR_PHY_ANALOG_SWAP 0xa268
469#define AR_PHY_SWAP_ALT_CHAIN 0x00000040
470
471#define AR_PHY_TPCRG5 0xA26C
472#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP 0x0000000F
473#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP_S 0
474#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1 0x000003F0
475#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1_S 4
476#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2 0x0000FC00
477#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2_S 10
478#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3 0x003F0000
479#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3_S 16
480#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4 0x0FC00000
481#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4_S 22
482
483#define AR_PHY_POWER_TX_RATE5 0xA38C
484#define AR_PHY_POWER_TX_RATE6 0xA390
485
486#define AR_PHY_CAL_CHAINMASK 0xA39C
487
488#define AR_PHY_POWER_TX_SUB 0xA3C8
489#define AR_PHY_POWER_TX_RATE7 0xA3CC
490#define AR_PHY_POWER_TX_RATE8 0xA3D0
491#define AR_PHY_POWER_TX_RATE9 0xA3D4
492
493#define AR_PHY_XPA_CFG 0xA3D8
494#define AR_PHY_FORCE_XPA_CFG 0x000000001
495#define AR_PHY_FORCE_XPA_CFG_S 0
496
497#define AR_PHY_CH1_CCA 0xa864
498#define AR_PHY_CH1_MINCCA_PWR 0x0FF80000
499#define AR_PHY_CH1_MINCCA_PWR_S 19
500#define AR9280_PHY_CH1_MINCCA_PWR 0x1FF00000
501#define AR9280_PHY_CH1_MINCCA_PWR_S 20
502
503#define AR_PHY_CH2_CCA 0xb864
504#define AR_PHY_CH2_MINCCA_PWR 0x0FF80000
505#define AR_PHY_CH2_MINCCA_PWR_S 19
506
507#define AR_PHY_CH1_EXT_CCA 0xa9bc
508#define AR_PHY_CH1_EXT_MINCCA_PWR 0xFF800000
509#define AR_PHY_CH1_EXT_MINCCA_PWR_S 23
510#define AR9280_PHY_CH1_EXT_MINCCA_PWR 0x01FF0000
511#define AR9280_PHY_CH1_EXT_MINCCA_PWR_S 16
512
513#define AR_PHY_CH2_EXT_CCA 0xb9bc
514#define AR_PHY_CH2_EXT_MINCCA_PWR 0xFF800000
515#define AR_PHY_CH2_EXT_MINCCA_PWR_S 23
516
517#define REG_WRITE_RF_ARRAY(iniarray, regData, regWr) do { \
518 int r; \
519 for (r = 0; r < ((iniarray)->ia_rows); r++) { \
520 REG_WRITE(ah, INI_RA((iniarray), r, 0), (regData)[r]); \
521 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL, \
522 "RF 0x%x V 0x%x\n", \
523 INI_RA((iniarray), r, 0), (regData)[r]); \
524 DO_DELAY(regWr); \
525 } \
526 } while (0)
527
528#define ATH9K_KEY_XOR 0xaa
529
530#define ATH9K_IS_MIC_ENABLED(ah) \
531 (AH5416(ah)->ah_staId1Defaults & AR_STA_ID1_CRPT_MIC_ENABLE)
532
533#define ANTSWAP_AB 0x0001
534#define REDUCE_CHAIN_0 0x00000050
535#define REDUCE_CHAIN_1 0x00000051
536
537#define RF_BANK_SETUP(_bank, _iniarray, _col) do { \
538 int i; \
539 for (i = 0; i < (_iniarray)->ia_rows; i++) \
540 (_bank)[i] = INI_RA((_iniarray), i, _col);; \
541 } while (0)
542
543#endif
diff --git a/drivers/net/wireless/ath9k/rc.c b/drivers/net/wireless/ath9k/rc.c
new file mode 100644
index 000000000000..73c460ad355f
--- /dev/null
+++ b/drivers/net/wireless/ath9k/rc.c
@@ -0,0 +1,2126 @@
1/*
2 * Copyright (c) 2004 Video54 Technologies, Inc.
3 * Copyright (c) 2004-2008 Atheros Communications, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18/*
19 * Atheros rate control algorithm
20 */
21
22#include "core.h"
23#include "../net/mac80211/rate.h"
24
25static u32 tx_triglevel_max;
26
27static struct ath_rate_table ar5416_11na_ratetable = {
28 42,
29 {
30 { TRUE, TRUE, WLAN_PHY_OFDM, 6000, /* 6 Mb */
31 5400, 0x0b, 0x00, 12,
32 0, 2, 1, 0, 0, 0, 0, 0 },
33 { TRUE, TRUE, WLAN_PHY_OFDM, 9000, /* 9 Mb */
34 7800, 0x0f, 0x00, 18,
35 0, 3, 1, 1, 1, 1, 1, 0 },
36 { TRUE, TRUE, WLAN_PHY_OFDM, 12000, /* 12 Mb */
37 10000, 0x0a, 0x00, 24,
38 2, 4, 2, 2, 2, 2, 2, 0 },
39 { TRUE, TRUE, WLAN_PHY_OFDM, 18000, /* 18 Mb */
40 13900, 0x0e, 0x00, 36,
41 2, 6, 2, 3, 3, 3, 3, 0 },
42 { TRUE, TRUE, WLAN_PHY_OFDM, 24000, /* 24 Mb */
43 17300, 0x09, 0x00, 48,
44 4, 10, 3, 4, 4, 4, 4, 0 },
45 { TRUE, TRUE, WLAN_PHY_OFDM, 36000, /* 36 Mb */
46 23000, 0x0d, 0x00, 72,
47 4, 14, 3, 5, 5, 5, 5, 0 },
48 { TRUE, TRUE, WLAN_PHY_OFDM, 48000, /* 48 Mb */
49 27400, 0x08, 0x00, 96,
50 4, 20, 3, 6, 6, 6, 6, 0 },
51 { TRUE, TRUE, WLAN_PHY_OFDM, 54000, /* 54 Mb */
52 29300, 0x0c, 0x00, 108,
53 4, 23, 3, 7, 7, 7, 7, 0 },
54 { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 6500, /* 6.5 Mb */
55 6400, 0x80, 0x00, 0,
56 0, 2, 3, 8, 24, 8, 24, 3216 },
57 { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 13000, /* 13 Mb */
58 12700, 0x81, 0x00, 1,
59 2, 4, 3, 9, 25, 9, 25, 6434 },
60 { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 19500, /* 19.5 Mb */
61 18800, 0x82, 0x00, 2,
62 2, 6, 3, 10, 26, 10, 26, 9650 },
63 { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 26000, /* 26 Mb */
64 25000, 0x83, 0x00, 3,
65 4, 10, 3, 11, 27, 11, 27, 12868 },
66 { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 39000, /* 39 Mb */
67 36700, 0x84, 0x00, 4,
68 4, 14, 3, 12, 28, 12, 28, 19304 },
69 { FALSE, TRUE_20, WLAN_PHY_HT_20_SS, 52000, /* 52 Mb */
70 48100, 0x85, 0x00, 5,
71 4, 20, 3, 13, 29, 13, 29, 25740 },
72 { FALSE, TRUE_20, WLAN_PHY_HT_20_SS, 58500, /* 58.5 Mb */
73 53500, 0x86, 0x00, 6,
74 4, 23, 3, 14, 30, 14, 30, 28956 },
75 { FALSE, TRUE_20, WLAN_PHY_HT_20_SS, 65000, /* 65 Mb */
76 59000, 0x87, 0x00, 7,
77 4, 25, 3, 15, 31, 15, 32, 32180 },
78 { FALSE, FALSE, WLAN_PHY_HT_20_DS, 13000, /* 13 Mb */
79 12700, 0x88, 0x00,
80 8, 0, 2, 3, 16, 33, 16, 33, 6430 },
81 { FALSE, FALSE, WLAN_PHY_HT_20_DS, 26000, /* 26 Mb */
82 24800, 0x89, 0x00, 9,
83 2, 4, 3, 17, 34, 17, 34, 12860 },
84 { FALSE, FALSE, WLAN_PHY_HT_20_DS, 39000, /* 39 Mb */
85 36600, 0x8a, 0x00, 10,
86 2, 6, 3, 18, 35, 18, 35, 19300 },
87 { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 52000, /* 52 Mb */
88 48100, 0x8b, 0x00, 11,
89 4, 10, 3, 19, 36, 19, 36, 25736 },
90 { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 78000, /* 78 Mb */
91 69500, 0x8c, 0x00, 12,
92 4, 14, 3, 20, 37, 20, 37, 38600 },
93 { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 104000, /* 104 Mb */
94 89500, 0x8d, 0x00, 13,
95 4, 20, 3, 21, 38, 21, 38, 51472 },
96 { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 117000, /* 117 Mb */
97 98900, 0x8e, 0x00, 14,
98 4, 23, 3, 22, 39, 22, 39, 57890 },
99 { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 130000, /* 130 Mb */
100 108300, 0x8f, 0x00, 15,
101 4, 25, 3, 23, 40, 23, 41, 64320 },
102 { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 13500, /* 13.5 Mb */
103 13200, 0x80, 0x00, 0,
104 0, 2, 3, 8, 24, 24, 24, 6684 },
105 { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 27500, /* 27.0 Mb */
106 25900, 0x81, 0x00, 1,
107 2, 4, 3, 9, 25, 25, 25, 13368 },
108 { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 40500, /* 40.5 Mb */
109 38600, 0x82, 0x00, 2,
110 2, 6, 3, 10, 26, 26, 26, 20052 },
111 { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 54000, /* 54 Mb */
112 49800, 0x83, 0x00, 3,
113 4, 10, 3, 11, 27, 27, 27, 26738 },
114 { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 81500, /* 81 Mb */
115 72200, 0x84, 0x00, 4,
116 4, 14, 3, 12, 28, 28, 28, 40104 },
117 { FALSE, TRUE_40, WLAN_PHY_HT_40_SS, 108000, /* 108 Mb */
118 92900, 0x85, 0x00, 5,
119 4, 20, 3, 13, 29, 29, 29, 53476 },
120 { FALSE, TRUE_40, WLAN_PHY_HT_40_SS, 121500, /* 121.5 Mb */
121 102700, 0x86, 0x00, 6,
122 4, 23, 3, 14, 30, 30, 30, 60156 },
123 { FALSE, TRUE_40, WLAN_PHY_HT_40_SS, 135000, /* 135 Mb */
124 112000, 0x87, 0x00, 7,
125 4, 25, 3, 15, 31, 32, 32, 66840 },
126 { FALSE, TRUE_40, WLAN_PHY_HT_40_SS_HGI, 150000, /* 150 Mb */
127 122000, 0x87, 0x00, 7,
128 4, 25, 3, 15, 31, 32, 32, 74200 },
129 { FALSE, FALSE, WLAN_PHY_HT_40_DS, 27000, /* 27 Mb */
130 25800, 0x88, 0x00, 8,
131 0, 2, 3, 16, 33, 33, 33, 13360 },
132 { FALSE, FALSE, WLAN_PHY_HT_40_DS, 54000, /* 54 Mb */
133 49800, 0x89, 0x00, 9,
134 2, 4, 3, 17, 34, 34, 34, 26720 },
135 { FALSE, FALSE, WLAN_PHY_HT_40_DS, 81000, /* 81 Mb */
136 71900, 0x8a, 0x00, 10,
137 2, 6, 3, 18, 35, 35, 35, 40080 },
138 { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 108000, /* 108 Mb */
139 92500, 0x8b, 0x00, 11,
140 4, 10, 3, 19, 36, 36, 36, 53440 },
141 { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 162000, /* 162 Mb */
142 130300, 0x8c, 0x00, 12,
143 4, 14, 3, 20, 37, 37, 37, 80160 },
144 { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 216000, /* 216 Mb */
145 162800, 0x8d, 0x00, 13,
146 4, 20, 3, 21, 38, 38, 38, 106880 },
147 { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 243000, /* 243 Mb */
148 178200, 0x8e, 0x00, 14,
149 4, 23, 3, 22, 39, 39, 39, 120240 },
150 { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 270000, /* 270 Mb */
151 192100, 0x8f, 0x00, 15,
152 4, 25, 3, 23, 40, 41, 41, 133600 },
153 { TRUE_40, FALSE, WLAN_PHY_HT_40_DS_HGI, 300000, /* 300 Mb */
154 207000, 0x8f, 0x00, 15,
155 4, 25, 3, 23, 40, 41, 41, 148400 },
156 },
157 50, /* probe interval */
158 50, /* rssi reduce interval */
159 WLAN_RC_HT_FLAG, /* Phy rates allowed initially */
160};
161
162/* TRUE_ALL - valid for 20/40/Legacy,
163 * TRUE - Legacy only,
164 * TRUE_20 - HT 20 only,
165 * TRUE_40 - HT 40 only */
166
167/* 4ms frame limit not used for NG mode. The values filled
168 * for HT are the 64K max aggregate limit */
169
170static struct ath_rate_table ar5416_11ng_ratetable = {
171 46,
172 {
173 { TRUE_ALL, TRUE_ALL, WLAN_PHY_CCK, 1000, /* 1 Mb */
174 900, 0x1b, 0x00, 2,
175 0, 0, 1, 0, 0, 0, 0, 0 },
176 { TRUE_ALL, TRUE_ALL, WLAN_PHY_CCK, 2000, /* 2 Mb */
177 1900, 0x1a, 0x04, 4,
178 1, 1, 1, 1, 1, 1, 1, 0 },
179 { TRUE_ALL, TRUE_ALL, WLAN_PHY_CCK, 5500, /* 5.5 Mb */
180 4900, 0x19, 0x04, 11,
181 2, 2, 2, 2, 2, 2, 2, 0 },
182 { TRUE_ALL, TRUE_ALL, WLAN_PHY_CCK, 11000, /* 11 Mb */
183 8100, 0x18, 0x04, 22,
184 3, 3, 2, 3, 3, 3, 3, 0 },
185 { FALSE, FALSE, WLAN_PHY_OFDM, 6000, /* 6 Mb */
186 5400, 0x0b, 0x00, 12,
187 4, 2, 1, 4, 4, 4, 4, 0 },
188 { FALSE, FALSE, WLAN_PHY_OFDM, 9000, /* 9 Mb */
189 7800, 0x0f, 0x00, 18,
190 4, 3, 1, 5, 5, 5, 5, 0 },
191 { TRUE, TRUE, WLAN_PHY_OFDM, 12000, /* 12 Mb */
192 10100, 0x0a, 0x00, 24,
193 6, 4, 1, 6, 6, 6, 6, 0 },
194 { TRUE, TRUE, WLAN_PHY_OFDM, 18000, /* 18 Mb */
195 14100, 0x0e, 0x00, 36,
196 6, 6, 2, 7, 7, 7, 7, 0 },
197 { TRUE, TRUE, WLAN_PHY_OFDM, 24000, /* 24 Mb */
198 17700, 0x09, 0x00, 48,
199 8, 10, 3, 8, 8, 8, 8, 0 },
200 { TRUE, TRUE, WLAN_PHY_OFDM, 36000, /* 36 Mb */
201 23700, 0x0d, 0x00, 72,
202 8, 14, 3, 9, 9, 9, 9, 0 },
203 { TRUE, TRUE, WLAN_PHY_OFDM, 48000, /* 48 Mb */
204 27400, 0x08, 0x00, 96,
205 8, 20, 3, 10, 10, 10, 10, 0 },
206 { TRUE, TRUE, WLAN_PHY_OFDM, 54000, /* 54 Mb */
207 30900, 0x0c, 0x00, 108,
208 8, 23, 3, 11, 11, 11, 11, 0 },
209 { FALSE, FALSE, WLAN_PHY_HT_20_SS, 6500, /* 6.5 Mb */
210 6400, 0x80, 0x00, 0,
211 4, 2, 3, 12, 28, 12, 28, 3216 },
212 { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 13000, /* 13 Mb */
213 12700, 0x81, 0x00, 1,
214 6, 4, 3, 13, 29, 13, 29, 6434 },
215 { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 19500, /* 19.5 Mb */
216 18800, 0x82, 0x00, 2,
217 6, 6, 3, 14, 30, 14, 30, 9650 },
218 { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 26000, /* 26 Mb */
219 25000, 0x83, 0x00, 3,
220 8, 10, 3, 15, 31, 15, 31, 12868 },
221 { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 39000, /* 39 Mb */
222 36700, 0x84, 0x00, 4,
223 8, 14, 3, 16, 32, 16, 32, 19304 },
224 { FALSE, TRUE_20, WLAN_PHY_HT_20_SS, 52000, /* 52 Mb */
225 48100, 0x85, 0x00, 5,
226 8, 20, 3, 17, 33, 17, 33, 25740 },
227 { FALSE, TRUE_20, WLAN_PHY_HT_20_SS, 58500, /* 58.5 Mb */
228 53500, 0x86, 0x00, 6,
229 8, 23, 3, 18, 34, 18, 34, 28956 },
230 { FALSE, TRUE_20, WLAN_PHY_HT_20_SS, 65000, /* 65 Mb */
231 59000, 0x87, 0x00, 7,
232 8, 25, 3, 19, 35, 19, 36, 32180 },
233 { FALSE, FALSE, WLAN_PHY_HT_20_DS, 13000, /* 13 Mb */
234 12700, 0x88, 0x00, 8,
235 4, 2, 3, 20, 37, 20, 37, 6430 },
236 { FALSE, FALSE, WLAN_PHY_HT_20_DS, 26000, /* 26 Mb */
237 24800, 0x89, 0x00, 9,
238 6, 4, 3, 21, 38, 21, 38, 12860 },
239 { FALSE, FALSE, WLAN_PHY_HT_20_DS, 39000, /* 39 Mb */
240 36600, 0x8a, 0x00, 10,
241 6, 6, 3, 22, 39, 22, 39, 19300 },
242 { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 52000, /* 52 Mb */
243 48100, 0x8b, 0x00, 11,
244 8, 10, 3, 23, 40, 23, 40, 25736 },
245 { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 78000, /* 78 Mb */
246 69500, 0x8c, 0x00, 12,
247 8, 14, 3, 24, 41, 24, 41, 38600 },
248 { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 104000, /* 104 Mb */
249 89500, 0x8d, 0x00, 13,
250 8, 20, 3, 25, 42, 25, 42, 51472 },
251 { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 117000, /* 117 Mb */
252 98900, 0x8e, 0x00, 14,
253 8, 23, 3, 26, 43, 26, 44, 57890 },
254 { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 130000, /* 130 Mb */
255 108300, 0x8f, 0x00, 15,
256 8, 25, 3, 27, 44, 27, 45, 64320 },
257 { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 13500, /* 13.5 Mb */
258 13200, 0x80, 0x00, 0,
259 8, 2, 3, 12, 28, 28, 28, 6684 },
260 { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 27500, /* 27.0 Mb */
261 25900, 0x81, 0x00, 1,
262 8, 4, 3, 13, 29, 29, 29, 13368 },
263 { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 40500, /* 40.5 Mb */
264 38600, 0x82, 0x00, 2,
265 8, 6, 3, 14, 30, 30, 30, 20052 },
266 { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 54000, /* 54 Mb */
267 49800, 0x83, 0x00, 3,
268 8, 10, 3, 15, 31, 31, 31, 26738 },
269 { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 81500, /* 81 Mb */
270 72200, 0x84, 0x00, 4,
271 8, 14, 3, 16, 32, 32, 32, 40104 },
272 { FALSE, TRUE_40, WLAN_PHY_HT_40_SS, 108000, /* 108 Mb */
273 92900, 0x85, 0x00, 5,
274 8, 20, 3, 17, 33, 33, 33, 53476 },
275 { FALSE, TRUE_40, WLAN_PHY_HT_40_SS, 121500, /* 121.5 Mb */
276 102700, 0x86, 0x00, 6,
277 8, 23, 3, 18, 34, 34, 34, 60156 },
278 { FALSE, TRUE_40, WLAN_PHY_HT_40_SS, 135000, /* 135 Mb */
279 112000, 0x87, 0x00, 7,
280 8, 23, 3, 19, 35, 36, 36, 66840 },
281 { FALSE, TRUE_40, WLAN_PHY_HT_40_SS_HGI, 150000, /* 150 Mb */
282 122000, 0x87, 0x00, 7,
283 8, 25, 3, 19, 35, 36, 36, 74200 },
284 { FALSE, FALSE, WLAN_PHY_HT_40_DS, 27000, /* 27 Mb */
285 25800, 0x88, 0x00, 8,
286 8, 2, 3, 20, 37, 37, 37, 13360 },
287 { FALSE, FALSE, WLAN_PHY_HT_40_DS, 54000, /* 54 Mb */
288 49800, 0x89, 0x00, 9,
289 8, 4, 3, 21, 38, 38, 38, 26720 },
290 { FALSE, FALSE, WLAN_PHY_HT_40_DS, 81000, /* 81 Mb */
291 71900, 0x8a, 0x00, 10,
292 8, 6, 3, 22, 39, 39, 39, 40080 },
293 { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 108000, /* 108 Mb */
294 92500, 0x8b, 0x00, 11,
295 8, 10, 3, 23, 40, 40, 40, 53440 },
296 { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 162000, /* 162 Mb */
297 130300, 0x8c, 0x00, 12,
298 8, 14, 3, 24, 41, 41, 41, 80160 },
299 { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 216000, /* 216 Mb */
300 162800, 0x8d, 0x00, 13,
301 8, 20, 3, 25, 42, 42, 42, 106880 },
302 { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 243000, /* 243 Mb */
303 178200, 0x8e, 0x00, 14,
304 8, 23, 3, 26, 43, 43, 43, 120240 },
305 { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 270000, /* 270 Mb */
306 192100, 0x8f, 0x00, 15,
307 8, 23, 3, 27, 44, 45, 45, 133600 },
308 { TRUE_40, FALSE, WLAN_PHY_HT_40_DS_HGI, 300000, /* 300 Mb */
309 207000, 0x8f, 0x00, 15,
310 8, 25, 3, 27, 44, 45, 45, 148400 },
311 },
312 50, /* probe interval */
313 50, /* rssi reduce interval */
314 WLAN_RC_HT_FLAG, /* Phy rates allowed initially */
315};
316
317static struct ath_rate_table ar5416_11a_ratetable = {
318 8,
319 {
320 { TRUE, TRUE, WLAN_PHY_OFDM, 6000, /* 6 Mb */
321 5400, 0x0b, 0x00, (0x80|12),
322 0, 2, 1, 0, 0 },
323 { TRUE, TRUE, WLAN_PHY_OFDM, 9000, /* 9 Mb */
324 7800, 0x0f, 0x00, 18,
325 0, 3, 1, 1, 0 },
326 { TRUE, TRUE, WLAN_PHY_OFDM, 12000, /* 12 Mb */
327 10000, 0x0a, 0x00, (0x80|24),
328 2, 4, 2, 2, 0 },
329 { TRUE, TRUE, WLAN_PHY_OFDM, 18000, /* 18 Mb */
330 13900, 0x0e, 0x00, 36,
331 2, 6, 2, 3, 0 },
332 { TRUE, TRUE, WLAN_PHY_OFDM, 24000, /* 24 Mb */
333 17300, 0x09, 0x00, (0x80|48),
334 4, 10, 3, 4, 0 },
335 { TRUE, TRUE, WLAN_PHY_OFDM, 36000, /* 36 Mb */
336 23000, 0x0d, 0x00, 72,
337 4, 14, 3, 5, 0 },
338 { TRUE, TRUE, WLAN_PHY_OFDM, 48000, /* 48 Mb */
339 27400, 0x08, 0x00, 96,
340 4, 19, 3, 6, 0 },
341 { TRUE, TRUE, WLAN_PHY_OFDM, 54000, /* 54 Mb */
342 29300, 0x0c, 0x00, 108,
343 4, 23, 3, 7, 0 },
344 },
345 50, /* probe interval */
346 50, /* rssi reduce interval */
347 0, /* Phy rates allowed initially */
348};
349
350static struct ath_rate_table ar5416_11a_ratetable_Half = {
351 8,
352 {
353 { TRUE, TRUE, WLAN_PHY_OFDM, 3000, /* 6 Mb */
354 2700, 0x0b, 0x00, (0x80|6),
355 0, 2, 1, 0, 0},
356 { TRUE, TRUE, WLAN_PHY_OFDM, 4500, /* 9 Mb */
357 3900, 0x0f, 0x00, 9,
358 0, 3, 1, 1, 0 },
359 { TRUE, TRUE, WLAN_PHY_OFDM, 6000, /* 12 Mb */
360 5000, 0x0a, 0x00, (0x80|12),
361 2, 4, 2, 2, 0 },
362 { TRUE, TRUE, WLAN_PHY_OFDM, 9000, /* 18 Mb */
363 6950, 0x0e, 0x00, 18,
364 2, 6, 2, 3, 0 },
365 { TRUE, TRUE, WLAN_PHY_OFDM, 12000, /* 24 Mb */
366 8650, 0x09, 0x00, (0x80|24),
367 4, 10, 3, 4, 0 },
368 { TRUE, TRUE, WLAN_PHY_OFDM, 18000, /* 36 Mb */
369 11500, 0x0d, 0x00, 36,
370 4, 14, 3, 5, 0 },
371 { TRUE, TRUE, WLAN_PHY_OFDM, 24000, /* 48 Mb */
372 13700, 0x08, 0x00, 48,
373 4, 19, 3, 6, 0 },
374 { TRUE, TRUE, WLAN_PHY_OFDM, 27000, /* 54 Mb */
375 14650, 0x0c, 0x00, 54,
376 4, 23, 3, 7, 0 },
377 },
378 50, /* probe interval */
379 50, /* rssi reduce interval */
380 0, /* Phy rates allowed initially */
381};
382
383static struct ath_rate_table ar5416_11a_ratetable_Quarter = {
384 8,
385 {
386 { TRUE, TRUE, WLAN_PHY_OFDM, 1500, /* 6 Mb */
387 1350, 0x0b, 0x00, (0x80|3),
388 0, 2, 1, 0, 0 },
389 { TRUE, TRUE, WLAN_PHY_OFDM, 2250, /* 9 Mb */
390 1950, 0x0f, 0x00, 4,
391 0, 3, 1, 1, 0 },
392 { TRUE, TRUE, WLAN_PHY_OFDM, 3000, /* 12 Mb */
393 2500, 0x0a, 0x00, (0x80|6),
394 2, 4, 2, 2, 0 },
395 { TRUE, TRUE, WLAN_PHY_OFDM, 4500, /* 18 Mb */
396 3475, 0x0e, 0x00, 9,
397 2, 6, 2, 3, 0 },
398 { TRUE, TRUE, WLAN_PHY_OFDM, 6000, /* 25 Mb */
399 4325, 0x09, 0x00, (0x80|12),
400 4, 10, 3, 4, 0 },
401 { TRUE, TRUE, WLAN_PHY_OFDM, 9000, /* 36 Mb */
402 5750, 0x0d, 0x00, 18,
403 4, 14, 3, 5, 0 },
404 { TRUE, TRUE, WLAN_PHY_OFDM, 12000, /* 48 Mb */
405 6850, 0x08, 0x00, 24,
406 4, 19, 3, 6, 0 },
407 { TRUE, TRUE, WLAN_PHY_OFDM, 13500, /* 54 Mb */
408 7325, 0x0c, 0x00, 27,
409 4, 23, 3, 7, 0 },
410 },
411 50, /* probe interval */
412 50, /* rssi reduce interval */
413 0, /* Phy rates allowed initially */
414};
415
416static struct ath_rate_table ar5416_11g_ratetable = {
417 12,
418 {
419 { TRUE, TRUE, WLAN_PHY_CCK, 1000, /* 1 Mb */
420 900, 0x1b, 0x00, 2,
421 0, 0, 1, 0, 0 },
422 { TRUE, TRUE, WLAN_PHY_CCK, 2000, /* 2 Mb */
423 1900, 0x1a, 0x04, 4,
424 1, 1, 1, 1, 0 },
425 { TRUE, TRUE, WLAN_PHY_CCK, 5500, /* 5.5 Mb */
426 4900, 0x19, 0x04, 11,
427 2, 2, 2, 2, 0 },
428 { TRUE, TRUE, WLAN_PHY_CCK, 11000, /* 11 Mb */
429 8100, 0x18, 0x04, 22,
430 3, 3, 2, 3, 0 },
431 { FALSE, FALSE, WLAN_PHY_OFDM, 6000, /* 6 Mb */
432 5400, 0x0b, 0x00, 12,
433 4, 2, 1, 4, 0 },
434 { FALSE, FALSE, WLAN_PHY_OFDM, 9000, /* 9 Mb */
435 7800, 0x0f, 0x00, 18,
436 4, 3, 1, 5, 0 },
437 { TRUE, TRUE, WLAN_PHY_OFDM, 12000, /* 12 Mb */
438 10000, 0x0a, 0x00, 24,
439 6, 4, 1, 6, 0 },
440 { TRUE, TRUE, WLAN_PHY_OFDM, 18000, /* 18 Mb */
441 13900, 0x0e, 0x00, 36,
442 6, 6, 2, 7, 0 },
443 { TRUE, TRUE, WLAN_PHY_OFDM, 24000, /* 24 Mb */
444 17300, 0x09, 0x00, 48,
445 8, 10, 3, 8, 0 },
446 { TRUE, TRUE, WLAN_PHY_OFDM, 36000, /* 36 Mb */
447 23000, 0x0d, 0x00, 72,
448 8, 14, 3, 9, 0 },
449 { TRUE, TRUE, WLAN_PHY_OFDM, 48000, /* 48 Mb */
450 27400, 0x08, 0x00, 96,
451 8, 19, 3, 10, 0 },
452 { TRUE, TRUE, WLAN_PHY_OFDM, 54000, /* 54 Mb */
453 29300, 0x0c, 0x00, 108,
454 8, 23, 3, 11, 0 },
455 },
456 50, /* probe interval */
457 50, /* rssi reduce interval */
458 0, /* Phy rates allowed initially */
459};
460
461static struct ath_rate_table ar5416_11b_ratetable = {
462 4,
463 {
464 { TRUE, TRUE, WLAN_PHY_CCK, 1000, /* 1 Mb */
465 900, 0x1b, 0x00, (0x80|2),
466 0, 0, 1, 0, 0 },
467 { TRUE, TRUE, WLAN_PHY_CCK, 2000, /* 2 Mb */
468 1800, 0x1a, 0x04, (0x80|4),
469 1, 1, 1, 1, 0 },
470 { TRUE, TRUE, WLAN_PHY_CCK, 5500, /* 5.5 Mb */
471 4300, 0x19, 0x04, (0x80|11),
472 1, 2, 2, 2, 0 },
473 { TRUE, TRUE, WLAN_PHY_CCK, 11000, /* 11 Mb */
474 7100, 0x18, 0x04, (0x80|22),
475 1, 4, 100, 3, 0 },
476 },
477 100, /* probe interval */
478 100, /* rssi reduce interval */
479 0, /* Phy rates allowed initially */
480};
481
482static void ar5416_attach_ratetables(struct ath_rate_softc *sc)
483{
484 /*
485 * Attach rate tables.
486 */
487 sc->hw_rate_table[ATH9K_MODE_11B] = &ar5416_11b_ratetable;
488 sc->hw_rate_table[ATH9K_MODE_11A] = &ar5416_11a_ratetable;
489 sc->hw_rate_table[ATH9K_MODE_11G] = &ar5416_11g_ratetable;
490
491 sc->hw_rate_table[ATH9K_MODE_11NA_HT20] = &ar5416_11na_ratetable;
492 sc->hw_rate_table[ATH9K_MODE_11NG_HT20] = &ar5416_11ng_ratetable;
493 sc->hw_rate_table[ATH9K_MODE_11NA_HT40PLUS] =
494 &ar5416_11na_ratetable;
495 sc->hw_rate_table[ATH9K_MODE_11NA_HT40MINUS] =
496 &ar5416_11na_ratetable;
497 sc->hw_rate_table[ATH9K_MODE_11NG_HT40PLUS] =
498 &ar5416_11ng_ratetable;
499 sc->hw_rate_table[ATH9K_MODE_11NG_HT40MINUS] =
500 &ar5416_11ng_ratetable;
501}
502
503static void ar5416_setquarter_ratetable(struct ath_rate_softc *sc)
504{
505 sc->hw_rate_table[ATH9K_MODE_11A] = &ar5416_11a_ratetable_Quarter;
506 return;
507}
508
509static void ar5416_sethalf_ratetable(struct ath_rate_softc *sc)
510{
511 sc->hw_rate_table[ATH9K_MODE_11A] = &ar5416_11a_ratetable_Half;
512 return;
513}
514
515static void ar5416_setfull_ratetable(struct ath_rate_softc *sc)
516{
517 sc->hw_rate_table[ATH9K_MODE_11A] = &ar5416_11a_ratetable;
518 return;
519}
520
521/*
522 * Return the median of three numbers
523 */
524static inline int8_t median(int8_t a, int8_t b, int8_t c)
525{
526 if (a >= b) {
527 if (b >= c)
528 return b;
529 else if (a > c)
530 return c;
531 else
532 return a;
533 } else {
534 if (a >= c)
535 return a;
536 else if (b >= c)
537 return c;
538 else
539 return b;
540 }
541}
542
543static void ath_rc_sort_validrates(const struct ath_rate_table *rate_table,
544 struct ath_tx_ratectrl *rate_ctrl)
545{
546 u8 i, j, idx, idx_next;
547
548 for (i = rate_ctrl->max_valid_rate - 1; i > 0; i--) {
549 for (j = 0; j <= i-1; j++) {
550 idx = rate_ctrl->valid_rate_index[j];
551 idx_next = rate_ctrl->valid_rate_index[j+1];
552
553 if (rate_table->info[idx].ratekbps >
554 rate_table->info[idx_next].ratekbps) {
555 rate_ctrl->valid_rate_index[j] = idx_next;
556 rate_ctrl->valid_rate_index[j+1] = idx;
557 }
558 }
559 }
560}
561
562/* Access functions for valid_txrate_mask */
563
564static void ath_rc_init_valid_txmask(struct ath_tx_ratectrl *rate_ctrl)
565{
566 u8 i;
567
568 for (i = 0; i < rate_ctrl->rate_table_size; i++)
569 rate_ctrl->valid_rate_index[i] = FALSE;
570}
571
572static inline void ath_rc_set_valid_txmask(struct ath_tx_ratectrl *rate_ctrl,
573 u8 index, int valid_tx_rate)
574{
575 ASSERT(index <= rate_ctrl->rate_table_size);
576 rate_ctrl->valid_rate_index[index] = valid_tx_rate ? TRUE : FALSE;
577}
578
579static inline int ath_rc_isvalid_txmask(struct ath_tx_ratectrl *rate_ctrl,
580 u8 index)
581{
582 ASSERT(index <= rate_ctrl->rate_table_size);
583 return rate_ctrl->valid_rate_index[index];
584}
585
586/* Iterators for valid_txrate_mask */
587static inline int
588ath_rc_get_nextvalid_txrate(const struct ath_rate_table *rate_table,
589 struct ath_tx_ratectrl *rate_ctrl,
590 u8 cur_valid_txrate,
591 u8 *next_idx)
592{
593 u8 i;
594
595 for (i = 0; i < rate_ctrl->max_valid_rate - 1; i++) {
596 if (rate_ctrl->valid_rate_index[i] == cur_valid_txrate) {
597 *next_idx = rate_ctrl->valid_rate_index[i+1];
598 return TRUE;
599 }
600 }
601
602 /* No more valid rates */
603 *next_idx = 0;
604 return FALSE;
605}
606
607/* Return true only for single stream */
608
609static int ath_rc_valid_phyrate(u32 phy, u32 capflag, int ignore_cw)
610{
611 if (WLAN_RC_PHY_HT(phy) & !(capflag & WLAN_RC_HT_FLAG))
612 return FALSE;
613 if (WLAN_RC_PHY_DS(phy) && !(capflag & WLAN_RC_DS_FLAG))
614 return FALSE;
615 if (WLAN_RC_PHY_SGI(phy) && !(capflag & WLAN_RC_SGI_FLAG))
616 return FALSE;
617 if (!ignore_cw && WLAN_RC_PHY_HT(phy))
618 if (WLAN_RC_PHY_40(phy) && !(capflag & WLAN_RC_40_FLAG))
619 return FALSE;
620 if (!WLAN_RC_PHY_40(phy) && (capflag & WLAN_RC_40_FLAG))
621 return FALSE;
622 return TRUE;
623}
624
625static inline int
626ath_rc_get_nextlowervalid_txrate(const struct ath_rate_table *rate_table,
627 struct ath_tx_ratectrl *rate_ctrl,
628 u8 cur_valid_txrate, u8 *next_idx)
629{
630 int8_t i;
631
632 for (i = 1; i < rate_ctrl->max_valid_rate ; i++) {
633 if (rate_ctrl->valid_rate_index[i] == cur_valid_txrate) {
634 *next_idx = rate_ctrl->valid_rate_index[i-1];
635 return TRUE;
636 }
637 }
638 return FALSE;
639}
640
641/*
642 * Initialize the Valid Rate Index from valid entries in Rate Table
643 */
644static u8
645ath_rc_sib_init_validrates(struct ath_rate_node *ath_rc_priv,
646 const struct ath_rate_table *rate_table,
647 u32 capflag)
648{
649 struct ath_tx_ratectrl *rate_ctrl;
650 u8 i, hi = 0;
651 u32 valid;
652
653 rate_ctrl = (struct ath_tx_ratectrl *)(ath_rc_priv);
654 for (i = 0; i < rate_table->rate_cnt; i++) {
655 valid = (ath_rc_priv->single_stream ?
656 rate_table->info[i].valid_single_stream :
657 rate_table->info[i].valid);
658 if (valid == TRUE) {
659 u32 phy = rate_table->info[i].phy;
660 u8 valid_rate_count = 0;
661
662 if (!ath_rc_valid_phyrate(phy, capflag, FALSE))
663 continue;
664
665 valid_rate_count = rate_ctrl->valid_phy_ratecnt[phy];
666
667 rate_ctrl->valid_phy_rateidx[phy][valid_rate_count] = i;
668 rate_ctrl->valid_phy_ratecnt[phy] += 1;
669 ath_rc_set_valid_txmask(rate_ctrl, i, TRUE);
670 hi = A_MAX(hi, i);
671 }
672 }
673 return hi;
674}
675
676/*
677 * Initialize the Valid Rate Index from Rate Set
678 */
679static u8
680ath_rc_sib_setvalid_rates(struct ath_rate_node *ath_rc_priv,
681 const struct ath_rate_table *rate_table,
682 struct ath_rateset *rateset,
683 u32 capflag)
684{
685 /* XXX: Clean me up and make identation friendly */
686 u8 i, j, hi = 0;
687 struct ath_tx_ratectrl *rate_ctrl =
688 (struct ath_tx_ratectrl *)(ath_rc_priv);
689
690 /* Use intersection of working rates and valid rates */
691 for (i = 0; i < rateset->rs_nrates; i++) {
692 for (j = 0; j < rate_table->rate_cnt; j++) {
693 u32 phy = rate_table->info[j].phy;
694 u32 valid = (ath_rc_priv->single_stream ?
695 rate_table->info[j].valid_single_stream :
696 rate_table->info[j].valid);
697
698 /* We allow a rate only if its valid and the
699 * capflag matches one of the validity
700 * (TRUE/TRUE_20/TRUE_40) flags */
701
702 /* XXX: catch the negative of this branch
703 * first and then continue */
704 if (((rateset->rs_rates[i] & 0x7F) ==
705 (rate_table->info[j].dot11rate & 0x7F)) &&
706 ((valid & WLAN_RC_CAP_MODE(capflag)) ==
707 WLAN_RC_CAP_MODE(capflag)) &&
708 !WLAN_RC_PHY_HT(phy)) {
709
710 u8 valid_rate_count = 0;
711
712 if (!ath_rc_valid_phyrate(phy, capflag, FALSE))
713 continue;
714
715 valid_rate_count =
716 rate_ctrl->valid_phy_ratecnt[phy];
717
718 rate_ctrl->valid_phy_rateidx[phy]
719 [valid_rate_count] = j;
720 rate_ctrl->valid_phy_ratecnt[phy] += 1;
721 ath_rc_set_valid_txmask(rate_ctrl, j, TRUE);
722 hi = A_MAX(hi, j);
723 }
724 }
725 }
726 return hi;
727}
728
729static u8
730ath_rc_sib_setvalid_htrates(struct ath_rate_node *ath_rc_priv,
731 const struct ath_rate_table *rate_table,
732 u8 *mcs_set, u32 capflag)
733{
734 u8 i, j, hi = 0;
735 struct ath_tx_ratectrl *rate_ctrl =
736 (struct ath_tx_ratectrl *)(ath_rc_priv);
737
738 /* Use intersection of working rates and valid rates */
739 for (i = 0; i < ((struct ath_rateset *)mcs_set)->rs_nrates; i++) {
740 for (j = 0; j < rate_table->rate_cnt; j++) {
741 u32 phy = rate_table->info[j].phy;
742 u32 valid = (ath_rc_priv->single_stream ?
743 rate_table->info[j].valid_single_stream :
744 rate_table->info[j].valid);
745
746 if (((((struct ath_rateset *)
747 mcs_set)->rs_rates[i] & 0x7F) !=
748 (rate_table->info[j].dot11rate & 0x7F)) ||
749 !WLAN_RC_PHY_HT(phy) ||
750 !WLAN_RC_PHY_HT_VALID(valid, capflag))
751 continue;
752
753 if (!ath_rc_valid_phyrate(phy, capflag, FALSE))
754 continue;
755
756 rate_ctrl->valid_phy_rateidx[phy]
757 [rate_ctrl->valid_phy_ratecnt[phy]] = j;
758 rate_ctrl->valid_phy_ratecnt[phy] += 1;
759 ath_rc_set_valid_txmask(rate_ctrl, j, TRUE);
760 hi = A_MAX(hi, j);
761 }
762 }
763 return hi;
764}
765
766/*
767 * Attach to a device instance. Setup the public definition
768 * of how much per-node space we need and setup the private
769 * phy tables that have rate control parameters.
770 */
771struct ath_rate_softc *ath_rate_attach(struct ath_hal *ah)
772{
773 struct ath_rate_softc *asc;
774
775 /* we are only in user context so we can sleep for memory */
776 asc = kzalloc(sizeof(struct ath_rate_softc), GFP_KERNEL);
777 if (asc == NULL)
778 return NULL;
779
780 ar5416_attach_ratetables(asc);
781
782 /* Save Maximum TX Trigger Level (used for 11n) */
783 tx_triglevel_max = ah->ah_caps.tx_triglevel_max;
784 /* return alias for ath_rate_softc * */
785 return asc;
786}
787
788static struct ath_rate_node *ath_rate_node_alloc(struct ath_vap *avp,
789 struct ath_rate_softc *rsc,
790 gfp_t gfp)
791{
792 struct ath_rate_node *anode;
793
794 anode = kzalloc(sizeof(struct ath_rate_node), gfp);
795 if (anode == NULL)
796 return NULL;
797
798 anode->avp = avp;
799 anode->asc = rsc;
800 avp->rc_node = anode;
801
802 return anode;
803}
804
805static void ath_rate_node_free(struct ath_rate_node *anode)
806{
807 if (anode != NULL)
808 kfree(anode);
809}
810
811void ath_rate_detach(struct ath_rate_softc *asc)
812{
813 if (asc != NULL)
814 kfree(asc);
815}
816
817u8 ath_rate_findrateix(struct ath_softc *sc,
818 u8 dot11rate)
819{
820 const struct ath_rate_table *ratetable;
821 struct ath_rate_softc *rsc = sc->sc_rc;
822 int i;
823
824 ratetable = rsc->hw_rate_table[sc->sc_curmode];
825
826 if (WARN_ON(!ratetable))
827 return 0;
828
829 for (i = 0; i < ratetable->rate_cnt; i++) {
830 if ((ratetable->info[i].dot11rate & 0x7f) == (dot11rate & 0x7f))
831 return i;
832 }
833
834 return 0;
835}
836
837/*
838 * Update rate-control state on a device state change. When
839 * operating as a station this includes associate/reassociate
840 * with an AP. Otherwise this gets called, for example, when
841 * the we transition to run state when operating as an AP.
842 */
843void ath_rate_newstate(struct ath_softc *sc, struct ath_vap *avp)
844{
845 struct ath_rate_softc *asc = sc->sc_rc;
846
847 /* For half and quarter rate channles use different
848 * rate tables
849 */
850 if (sc->sc_curchan.channelFlags & CHANNEL_HALF)
851 ar5416_sethalf_ratetable(asc);
852 else if (sc->sc_curchan.channelFlags & CHANNEL_QUARTER)
853 ar5416_setquarter_ratetable(asc);
854 else /* full rate */
855 ar5416_setfull_ratetable(asc);
856
857 if (avp->av_config.av_fixed_rateset != IEEE80211_FIXED_RATE_NONE) {
858 asc->fixedrix =
859 sc->sc_rixmap[avp->av_config.av_fixed_rateset & 0xff];
860 /* NB: check the fixed rate exists */
861 if (asc->fixedrix == 0xff)
862 asc->fixedrix = IEEE80211_FIXED_RATE_NONE;
863 } else {
864 asc->fixedrix = IEEE80211_FIXED_RATE_NONE;
865 }
866}
867
868static u8 ath_rc_ratefind_ht(struct ath_softc *sc,
869 struct ath_rate_node *ath_rc_priv,
870 const struct ath_rate_table *rate_table,
871 int probe_allowed, int *is_probing,
872 int is_retry)
873{
874 u32 dt, best_thruput, this_thruput, now_msec;
875 u8 rate, next_rate, best_rate, maxindex, minindex;
876 int8_t rssi_last, rssi_reduce = 0, index = 0;
877 struct ath_tx_ratectrl *rate_ctrl = NULL;
878
879 rate_ctrl = (struct ath_tx_ratectrl *)(ath_rc_priv ?
880 (ath_rc_priv) : NULL);
881
882 *is_probing = FALSE;
883
884 rssi_last = median(rate_ctrl->rssi_last,
885 rate_ctrl->rssi_last_prev,
886 rate_ctrl->rssi_last_prev2);
887
888 /*
889 * Age (reduce) last ack rssi based on how old it is.
890 * The bizarre numbers are so the delta is 160msec,
891 * meaning we divide by 16.
892 * 0msec <= dt <= 25msec: don't derate
893 * 25msec <= dt <= 185msec: derate linearly from 0 to 10dB
894 * 185msec <= dt: derate by 10dB
895 */
896
897 now_msec = jiffies_to_msecs(jiffies);
898 dt = now_msec - rate_ctrl->rssi_time;
899
900 if (dt >= 185)
901 rssi_reduce = 10;
902 else if (dt >= 25)
903 rssi_reduce = (u8)((dt - 25) >> 4);
904
905 /* Now reduce rssi_last by rssi_reduce */
906 if (rssi_last < rssi_reduce)
907 rssi_last = 0;
908 else
909 rssi_last -= rssi_reduce;
910
911 /*
912 * Now look up the rate in the rssi table and return it.
913 * If no rates match then we return 0 (lowest rate)
914 */
915
916 best_thruput = 0;
917 maxindex = rate_ctrl->max_valid_rate-1;
918
919 minindex = 0;
920 best_rate = minindex;
921
922 /*
923 * Try the higher rate first. It will reduce memory moving time
924 * if we have very good channel characteristics.
925 */
926 for (index = maxindex; index >= minindex ; index--) {
927 u8 per_thres;
928
929 rate = rate_ctrl->valid_rate_index[index];
930 if (rate > rate_ctrl->rate_max_phy)
931 continue;
932
933 /*
934 * For TCP the average collision rate is around 11%,
935 * so we ignore PERs less than this. This is to
936 * prevent the rate we are currently using (whose
937 * PER might be in the 10-15 range because of TCP
938 * collisions) looking worse than the next lower
939 * rate whose PER has decayed close to 0. If we
940 * used to next lower rate, its PER would grow to
941 * 10-15 and we would be worse off then staying
942 * at the current rate.
943 */
944 per_thres = rate_ctrl->state[rate].per;
945 if (per_thres < 12)
946 per_thres = 12;
947
948 this_thruput = rate_table->info[rate].user_ratekbps *
949 (100 - per_thres);
950
951 if (best_thruput <= this_thruput) {
952 best_thruput = this_thruput;
953 best_rate = rate;
954 }
955 }
956
957 rate = best_rate;
958
959 /* if we are retrying for more than half the number
960 * of max retries, use the min rate for the next retry
961 */
962 if (is_retry)
963 rate = rate_ctrl->valid_rate_index[minindex];
964
965 rate_ctrl->rssi_last_lookup = rssi_last;
966
967 /*
968 * Must check the actual rate (ratekbps) to account for
969 * non-monoticity of 11g's rate table
970 */
971
972 if (rate >= rate_ctrl->rate_max_phy && probe_allowed) {
973 rate = rate_ctrl->rate_max_phy;
974
975 /* Probe the next allowed phy state */
976 /* FIXME:XXXX Check to make sure ratMax is checked properly */
977 if (ath_rc_get_nextvalid_txrate(rate_table,
978 rate_ctrl, rate, &next_rate) &&
979 (now_msec - rate_ctrl->probe_time >
980 rate_table->probe_interval) &&
981 (rate_ctrl->hw_maxretry_pktcnt >= 1)) {
982 rate = next_rate;
983 rate_ctrl->probe_rate = rate;
984 rate_ctrl->probe_time = now_msec;
985 rate_ctrl->hw_maxretry_pktcnt = 0;
986 *is_probing = TRUE;
987 }
988 }
989
990 /*
991 * Make sure rate is not higher than the allowed maximum.
992 * We should also enforce the min, but I suspect the min is
993 * normally 1 rather than 0 because of the rate 9 vs 6 issue
994 * in the old code.
995 */
996 if (rate > (rate_ctrl->rate_table_size - 1))
997 rate = rate_ctrl->rate_table_size - 1;
998
999 ASSERT((rate_table->info[rate].valid && !ath_rc_priv->single_stream) ||
1000 (rate_table->info[rate].valid_single_stream &&
1001 ath_rc_priv->single_stream));
1002
1003 return rate;
1004}
1005
1006static void ath_rc_rate_set_series(const struct ath_rate_table *rate_table ,
1007 struct ath_rc_series *series,
1008 u8 tries,
1009 u8 rix,
1010 int rtsctsenable)
1011{
1012 series->tries = tries;
1013 series->flags = (rtsctsenable ? ATH_RC_RTSCTS_FLAG : 0) |
1014 (WLAN_RC_PHY_DS(rate_table->info[rix].phy) ?
1015 ATH_RC_DS_FLAG : 0) |
1016 (WLAN_RC_PHY_40(rate_table->info[rix].phy) ?
1017 ATH_RC_CW40_FLAG : 0) |
1018 (WLAN_RC_PHY_SGI(rate_table->info[rix].phy) ?
1019 ATH_RC_SGI_FLAG : 0);
1020
1021 series->rix = rate_table->info[rix].base_index;
1022 series->max_4ms_framelen = rate_table->info[rix].max_4ms_framelen;
1023}
1024
1025static u8 ath_rc_rate_getidx(struct ath_softc *sc,
1026 struct ath_rate_node *ath_rc_priv,
1027 const struct ath_rate_table *rate_table,
1028 u8 rix, u16 stepdown,
1029 u16 min_rate)
1030{
1031 u32 j;
1032 u8 nextindex;
1033 struct ath_tx_ratectrl *rate_ctrl =
1034 (struct ath_tx_ratectrl *)(ath_rc_priv);
1035
1036 if (min_rate) {
1037 for (j = RATE_TABLE_SIZE; j > 0; j--) {
1038 if (ath_rc_get_nextlowervalid_txrate(rate_table,
1039 rate_ctrl, rix, &nextindex))
1040 rix = nextindex;
1041 else
1042 break;
1043 }
1044 } else {
1045 for (j = stepdown; j > 0; j--) {
1046 if (ath_rc_get_nextlowervalid_txrate(rate_table,
1047 rate_ctrl, rix, &nextindex))
1048 rix = nextindex;
1049 else
1050 break;
1051 }
1052 }
1053 return rix;
1054}
1055
1056static void ath_rc_ratefind(struct ath_softc *sc,
1057 struct ath_rate_node *ath_rc_priv,
1058 int num_tries, int num_rates, unsigned int rcflag,
1059 struct ath_rc_series series[], int *is_probe,
1060 int is_retry)
1061{
1062 u8 try_per_rate = 0, i = 0, rix, nrix;
1063 struct ath_rate_softc *asc = (struct ath_rate_softc *)sc->sc_rc;
1064 struct ath_rate_table *rate_table;
1065
1066 rate_table =
1067 (struct ath_rate_table *)asc->hw_rate_table[sc->sc_curmode];
1068 rix = ath_rc_ratefind_ht(sc, ath_rc_priv, rate_table,
1069 (rcflag & ATH_RC_PROBE_ALLOWED) ? 1 : 0,
1070 is_probe, is_retry);
1071 nrix = rix;
1072
1073 if ((rcflag & ATH_RC_PROBE_ALLOWED) && (*is_probe)) {
1074 /* set one try for probe rates. For the
1075 * probes don't enable rts */
1076 ath_rc_rate_set_series(rate_table,
1077 &series[i++], 1, nrix, FALSE);
1078
1079 try_per_rate = (num_tries/num_rates);
1080 /* Get the next tried/allowed rate. No RTS for the next series
1081 * after the probe rate
1082 */
1083 nrix = ath_rc_rate_getidx(sc,
1084 ath_rc_priv, rate_table, nrix, 1, FALSE);
1085 ath_rc_rate_set_series(rate_table,
1086 &series[i++], try_per_rate, nrix, 0);
1087 } else {
1088 try_per_rate = (num_tries/num_rates);
1089 /* Set the choosen rate. No RTS for first series entry. */
1090 ath_rc_rate_set_series(rate_table,
1091 &series[i++], try_per_rate, nrix, FALSE);
1092 }
1093
1094 /* Fill in the other rates for multirate retry */
1095 for ( ; i < num_rates; i++) {
1096 u8 try_num;
1097 u8 min_rate;
1098
1099 try_num = ((i + 1) == num_rates) ?
1100 num_tries - (try_per_rate * i) : try_per_rate ;
1101 min_rate = (((i + 1) == num_rates) &&
1102 (rcflag & ATH_RC_MINRATE_LASTRATE)) ? 1 : 0;
1103
1104 nrix = ath_rc_rate_getidx(sc, ath_rc_priv,
1105 rate_table, nrix, 1, min_rate);
1106 /* All other rates in the series have RTS enabled */
1107 ath_rc_rate_set_series(rate_table,
1108 &series[i], try_num, nrix, TRUE);
1109 }
1110
1111 /*
1112 * NB:Change rate series to enable aggregation when operating
1113 * at lower MCS rates. When first rate in series is MCS2
1114 * in HT40 @ 2.4GHz, series should look like:
1115 *
1116 * {MCS2, MCS1, MCS0, MCS0}.
1117 *
1118 * When first rate in series is MCS3 in HT20 @ 2.4GHz, series should
1119 * look like:
1120 *
1121 * {MCS3, MCS2, MCS1, MCS1}
1122 *
1123 * So, set fourth rate in series to be same as third one for
1124 * above conditions.
1125 */
1126 if ((sc->sc_curmode == ATH9K_MODE_11NG_HT20) ||
1127 (sc->sc_curmode == ATH9K_MODE_11NG_HT40PLUS) ||
1128 (sc->sc_curmode == ATH9K_MODE_11NG_HT40MINUS)) {
1129 u8 dot11rate = rate_table->info[rix].dot11rate;
1130 u8 phy = rate_table->info[rix].phy;
1131 if (i == 4 &&
1132 ((dot11rate == 2 && phy == WLAN_RC_PHY_HT_40_SS) ||
1133 (dot11rate == 3 && phy == WLAN_RC_PHY_HT_20_SS))) {
1134 series[3].rix = series[2].rix;
1135 series[3].flags = series[2].flags;
1136 series[3].max_4ms_framelen = series[2].max_4ms_framelen;
1137 }
1138 }
1139}
1140
1141/*
1142 * Return the Tx rate series.
1143 */
1144void ath_rate_findrate(struct ath_softc *sc,
1145 struct ath_rate_node *ath_rc_priv,
1146 int num_tries,
1147 int num_rates,
1148 unsigned int rcflag,
1149 struct ath_rc_series series[],
1150 int *is_probe,
1151 int is_retry)
1152{
1153 struct ath_vap *avp = ath_rc_priv->avp;
1154
1155 DPRINTF(sc, ATH_DBG_RATE, "%s", __func__);
1156 if (!num_rates || !num_tries)
1157 return;
1158
1159 if (avp->av_config.av_fixed_rateset == IEEE80211_FIXED_RATE_NONE) {
1160 ath_rc_ratefind(sc, ath_rc_priv, num_tries, num_rates,
1161 rcflag, series, is_probe, is_retry);
1162 } else {
1163 /* Fixed rate */
1164 int idx;
1165 u8 flags;
1166 u32 rix;
1167 struct ath_rate_softc *asc = ath_rc_priv->asc;
1168 struct ath_rate_table *rate_table;
1169
1170 rate_table = (struct ath_rate_table *)
1171 asc->hw_rate_table[sc->sc_curmode];
1172
1173 for (idx = 0; idx < 4; idx++) {
1174 unsigned int mcs;
1175 u8 series_rix = 0;
1176
1177 series[idx].tries =
1178 IEEE80211_RATE_IDX_ENTRY(
1179 avp->av_config.av_fixed_retryset, idx);
1180
1181 mcs = IEEE80211_RATE_IDX_ENTRY(
1182 avp->av_config.av_fixed_rateset, idx);
1183
1184 if (idx == 3 && (mcs & 0xf0) == 0x70)
1185 mcs = (mcs & ~0xf0)|0x80;
1186
1187 if (!(mcs & 0x80))
1188 flags = 0;
1189 else
1190 flags = ((ath_rc_priv->ht_cap &
1191 WLAN_RC_DS_FLAG) ?
1192 ATH_RC_DS_FLAG : 0) |
1193 ((ath_rc_priv->ht_cap &
1194 WLAN_RC_40_FLAG) ?
1195 ATH_RC_CW40_FLAG : 0) |
1196 ((ath_rc_priv->ht_cap &
1197 WLAN_RC_SGI_FLAG) ?
1198 ((ath_rc_priv->ht_cap &
1199 WLAN_RC_40_FLAG) ?
1200 ATH_RC_SGI_FLAG : 0) : 0);
1201
1202 series[idx].rix = sc->sc_rixmap[mcs];
1203 series_rix = series[idx].rix;
1204
1205 /* XXX: Give me some cleanup love */
1206 if ((flags & ATH_RC_CW40_FLAG) &&
1207 (flags & ATH_RC_SGI_FLAG))
1208 rix = rate_table->info[series_rix].ht_index;
1209 else if (flags & ATH_RC_SGI_FLAG)
1210 rix = rate_table->info[series_rix].sgi_index;
1211 else if (flags & ATH_RC_CW40_FLAG)
1212 rix = rate_table->info[series_rix].cw40index;
1213 else
1214 rix = rate_table->info[series_rix].base_index;
1215 series[idx].max_4ms_framelen =
1216 rate_table->info[rix].max_4ms_framelen;
1217 series[idx].flags = flags;
1218 }
1219 }
1220}
1221
1222static void ath_rc_update_ht(struct ath_softc *sc,
1223 struct ath_rate_node *ath_rc_priv,
1224 struct ath_tx_info_priv *info_priv,
1225 int tx_rate, int xretries, int retries)
1226{
1227 struct ath_tx_ratectrl *rate_ctrl;
1228 u32 now_msec = jiffies_to_msecs(jiffies);
1229 int state_change = FALSE, rate, count;
1230 u8 last_per;
1231 struct ath_rate_softc *asc = (struct ath_rate_softc *)sc->sc_rc;
1232 struct ath_rate_table *rate_table =
1233 (struct ath_rate_table *)asc->hw_rate_table[sc->sc_curmode];
1234
1235 static u32 nretry_to_per_lookup[10] = {
1236 100 * 0 / 1,
1237 100 * 1 / 4,
1238 100 * 1 / 2,
1239 100 * 3 / 4,
1240 100 * 4 / 5,
1241 100 * 5 / 6,
1242 100 * 6 / 7,
1243 100 * 7 / 8,
1244 100 * 8 / 9,
1245 100 * 9 / 10
1246 };
1247
1248 if (!ath_rc_priv)
1249 return;
1250
1251 rate_ctrl = (struct ath_tx_ratectrl *)(ath_rc_priv);
1252
1253 ASSERT(tx_rate >= 0);
1254 if (tx_rate < 0)
1255 return;
1256
1257 /* To compensate for some imbalance between ctrl and ext. channel */
1258
1259 if (WLAN_RC_PHY_40(rate_table->info[tx_rate].phy))
1260 info_priv->tx.ts_rssi =
1261 info_priv->tx.ts_rssi < 3 ? 0 :
1262 info_priv->tx.ts_rssi - 3;
1263
1264 last_per = rate_ctrl->state[tx_rate].per;
1265
1266 if (xretries) {
1267 /* Update the PER. */
1268 if (xretries == 1) {
1269 rate_ctrl->state[tx_rate].per += 30;
1270 if (rate_ctrl->state[tx_rate].per > 100)
1271 rate_ctrl->state[tx_rate].per = 100;
1272 } else {
1273 /* xretries == 2 */
1274 count = sizeof(nretry_to_per_lookup) /
1275 sizeof(nretry_to_per_lookup[0]);
1276 if (retries >= count)
1277 retries = count - 1;
1278 /* new_PER = 7/8*old_PER + 1/8*(currentPER) */
1279 rate_ctrl->state[tx_rate].per =
1280 (u8)(rate_ctrl->state[tx_rate].per -
1281 (rate_ctrl->state[tx_rate].per >> 3) +
1282 ((100) >> 3));
1283 }
1284
1285 /* xretries == 1 or 2 */
1286
1287 if (rate_ctrl->probe_rate == tx_rate)
1288 rate_ctrl->probe_rate = 0;
1289
1290 } else { /* xretries == 0 */
1291 /* Update the PER. */
1292 /* Make sure it doesn't index out of array's bounds. */
1293 count = sizeof(nretry_to_per_lookup) /
1294 sizeof(nretry_to_per_lookup[0]);
1295 if (retries >= count)
1296 retries = count - 1;
1297 if (info_priv->n_bad_frames) {
1298 /* new_PER = 7/8*old_PER + 1/8*(currentPER) */
1299 /*
1300 * Assuming that n_frames is not 0. The current PER
1301 * from the retries is 100 * retries / (retries+1),
1302 * since the first retries attempts failed, and the
1303 * next one worked. For the one that worked,
1304 * n_bad_frames subframes out of n_frames wored,
1305 * so the PER for that part is
1306 * 100 * n_bad_frames / n_frames, and it contributes
1307 * 100 * n_bad_frames / (n_frames * (retries+1)) to
1308 * the above PER. The expression below is a
1309 * simplified version of the sum of these two terms.
1310 */
1311 if (info_priv->n_frames > 0)
1312 rate_ctrl->state[tx_rate].per
1313 = (u8)
1314 (rate_ctrl->state[tx_rate].per -
1315 (rate_ctrl->state[tx_rate].per >> 3) +
1316 ((100*(retries*info_priv->n_frames +
1317 info_priv->n_bad_frames) /
1318 (info_priv->n_frames *
1319 (retries+1))) >> 3));
1320 } else {
1321 /* new_PER = 7/8*old_PER + 1/8*(currentPER) */
1322
1323 rate_ctrl->state[tx_rate].per = (u8)
1324 (rate_ctrl->state[tx_rate].per -
1325 (rate_ctrl->state[tx_rate].per >> 3) +
1326 (nretry_to_per_lookup[retries] >> 3));
1327 }
1328
1329 rate_ctrl->rssi_last_prev2 = rate_ctrl->rssi_last_prev;
1330 rate_ctrl->rssi_last_prev = rate_ctrl->rssi_last;
1331 rate_ctrl->rssi_last = info_priv->tx.ts_rssi;
1332 rate_ctrl->rssi_time = now_msec;
1333
1334 /*
1335 * If we got at most one retry then increase the max rate if
1336 * this was a probe. Otherwise, ignore the probe.
1337 */
1338
1339 if (rate_ctrl->probe_rate && rate_ctrl->probe_rate == tx_rate) {
1340 if (retries > 0 || 2 * info_priv->n_bad_frames >
1341 info_priv->n_frames) {
1342 /*
1343 * Since we probed with just a single attempt,
1344 * any retries means the probe failed. Also,
1345 * if the attempt worked, but more than half
1346 * the subframes were bad then also consider
1347 * the probe a failure.
1348 */
1349 rate_ctrl->probe_rate = 0;
1350 } else {
1351 u8 probe_rate = 0;
1352
1353 rate_ctrl->rate_max_phy = rate_ctrl->probe_rate;
1354 probe_rate = rate_ctrl->probe_rate;
1355
1356 if (rate_ctrl->state[probe_rate].per > 30)
1357 rate_ctrl->state[probe_rate].per = 20;
1358
1359 rate_ctrl->probe_rate = 0;
1360
1361 /*
1362 * Since this probe succeeded, we allow the next
1363 * probe twice as soon. This allows the maxRate
1364 * to move up faster if the probes are
1365 * succesful.
1366 */
1367 rate_ctrl->probe_time = now_msec -
1368 rate_table->probe_interval / 2;
1369 }
1370 }
1371
1372 if (retries > 0) {
1373 /*
1374 * Don't update anything. We don't know if
1375 * this was because of collisions or poor signal.
1376 *
1377 * Later: if rssi_ack is close to
1378 * rate_ctrl->state[txRate].rssi_thres and we see lots
1379 * of retries, then we could increase
1380 * rate_ctrl->state[txRate].rssi_thres.
1381 */
1382 rate_ctrl->hw_maxretry_pktcnt = 0;
1383 } else {
1384 /*
1385 * It worked with no retries. First ignore bogus (small)
1386 * rssi_ack values.
1387 */
1388 if (tx_rate == rate_ctrl->rate_max_phy &&
1389 rate_ctrl->hw_maxretry_pktcnt < 255) {
1390 rate_ctrl->hw_maxretry_pktcnt++;
1391 }
1392
1393 if (info_priv->tx.ts_rssi >=
1394 rate_table->info[tx_rate].rssi_ack_validmin) {
1395 /* Average the rssi */
1396 if (tx_rate != rate_ctrl->rssi_sum_rate) {
1397 rate_ctrl->rssi_sum_rate = tx_rate;
1398 rate_ctrl->rssi_sum =
1399 rate_ctrl->rssi_sum_cnt = 0;
1400 }
1401
1402 rate_ctrl->rssi_sum += info_priv->tx.ts_rssi;
1403 rate_ctrl->rssi_sum_cnt++;
1404
1405 if (rate_ctrl->rssi_sum_cnt > 4) {
1406 int32_t rssi_ackAvg =
1407 (rate_ctrl->rssi_sum + 2) / 4;
1408 int8_t rssi_thres =
1409 rate_ctrl->state[tx_rate].
1410 rssi_thres;
1411 int8_t rssi_ack_vmin =
1412 rate_table->info[tx_rate].
1413 rssi_ack_validmin;
1414
1415 rate_ctrl->rssi_sum =
1416 rate_ctrl->rssi_sum_cnt = 0;
1417
1418 /* Now reduce the current
1419 * rssi threshold. */
1420 if ((rssi_ackAvg < rssi_thres + 2) &&
1421 (rssi_thres > rssi_ack_vmin)) {
1422 rate_ctrl->state[tx_rate].
1423 rssi_thres--;
1424 }
1425
1426 state_change = TRUE;
1427 }
1428 }
1429 }
1430 }
1431
1432 /* For all cases */
1433
1434 /*
1435 * If this rate looks bad (high PER) then stop using it for
1436 * a while (except if we are probing).
1437 */
1438 if (rate_ctrl->state[tx_rate].per >= 55 && tx_rate > 0 &&
1439 rate_table->info[tx_rate].ratekbps <=
1440 rate_table->info[rate_ctrl->rate_max_phy].ratekbps) {
1441 ath_rc_get_nextlowervalid_txrate(rate_table, rate_ctrl,
1442 (u8) tx_rate, &rate_ctrl->rate_max_phy);
1443
1444 /* Don't probe for a little while. */
1445 rate_ctrl->probe_time = now_msec;
1446 }
1447
1448 if (state_change) {
1449 /*
1450 * Make sure the rates above this have higher rssi thresholds.
1451 * (Note: Monotonicity is kept within the OFDM rates and
1452 * within the CCK rates. However, no adjustment is
1453 * made to keep the rssi thresholds monotonically
1454 * increasing between the CCK and OFDM rates.)
1455 */
1456 for (rate = tx_rate; rate <
1457 rate_ctrl->rate_table_size - 1; rate++) {
1458 if (rate_table->info[rate+1].phy !=
1459 rate_table->info[tx_rate].phy)
1460 break;
1461
1462 if (rate_ctrl->state[rate].rssi_thres +
1463 rate_table->info[rate].rssi_ack_deltamin >
1464 rate_ctrl->state[rate+1].rssi_thres) {
1465 rate_ctrl->state[rate+1].rssi_thres =
1466 rate_ctrl->state[rate].
1467 rssi_thres +
1468 rate_table->info[rate].
1469 rssi_ack_deltamin;
1470 }
1471 }
1472
1473 /* Make sure the rates below this have lower rssi thresholds. */
1474 for (rate = tx_rate - 1; rate >= 0; rate--) {
1475 if (rate_table->info[rate].phy !=
1476 rate_table->info[tx_rate].phy)
1477 break;
1478
1479 if (rate_ctrl->state[rate].rssi_thres +
1480 rate_table->info[rate].rssi_ack_deltamin >
1481 rate_ctrl->state[rate+1].rssi_thres) {
1482 if (rate_ctrl->state[rate+1].rssi_thres <
1483 rate_table->info[rate].
1484 rssi_ack_deltamin)
1485 rate_ctrl->state[rate].rssi_thres = 0;
1486 else {
1487 rate_ctrl->state[rate].rssi_thres =
1488 rate_ctrl->state[rate+1].
1489 rssi_thres -
1490 rate_table->info[rate].
1491 rssi_ack_deltamin;
1492 }
1493
1494 if (rate_ctrl->state[rate].rssi_thres <
1495 rate_table->info[rate].
1496 rssi_ack_validmin) {
1497 rate_ctrl->state[rate].rssi_thres =
1498 rate_table->info[rate].
1499 rssi_ack_validmin;
1500 }
1501 }
1502 }
1503 }
1504
1505 /* Make sure the rates below this have lower PER */
1506 /* Monotonicity is kept only for rates below the current rate. */
1507 if (rate_ctrl->state[tx_rate].per < last_per) {
1508 for (rate = tx_rate - 1; rate >= 0; rate--) {
1509 if (rate_table->info[rate].phy !=
1510 rate_table->info[tx_rate].phy)
1511 break;
1512
1513 if (rate_ctrl->state[rate].per >
1514 rate_ctrl->state[rate+1].per) {
1515 rate_ctrl->state[rate].per =
1516 rate_ctrl->state[rate+1].per;
1517 }
1518 }
1519 }
1520
1521 /* Maintain monotonicity for rates above the current rate */
1522 for (rate = tx_rate; rate < rate_ctrl->rate_table_size - 1; rate++) {
1523 if (rate_ctrl->state[rate+1].per < rate_ctrl->state[rate].per)
1524 rate_ctrl->state[rate+1].per =
1525 rate_ctrl->state[rate].per;
1526 }
1527
1528 /* Every so often, we reduce the thresholds and
1529 * PER (different for CCK and OFDM). */
1530 if (now_msec - rate_ctrl->rssi_down_time >=
1531 rate_table->rssi_reduce_interval) {
1532
1533 for (rate = 0; rate < rate_ctrl->rate_table_size; rate++) {
1534 if (rate_ctrl->state[rate].rssi_thres >
1535 rate_table->info[rate].rssi_ack_validmin)
1536 rate_ctrl->state[rate].rssi_thres -= 1;
1537 }
1538 rate_ctrl->rssi_down_time = now_msec;
1539 }
1540
1541 /* Every so often, we reduce the thresholds
1542 * and PER (different for CCK and OFDM). */
1543 if (now_msec - rate_ctrl->per_down_time >=
1544 rate_table->rssi_reduce_interval) {
1545 for (rate = 0; rate < rate_ctrl->rate_table_size; rate++) {
1546 rate_ctrl->state[rate].per =
1547 7 * rate_ctrl->state[rate].per / 8;
1548 }
1549
1550 rate_ctrl->per_down_time = now_msec;
1551 }
1552}
1553
1554/*
1555 * This routine is called in rate control callback tx_status() to give
1556 * the status of previous frames.
1557 */
1558static void ath_rc_update(struct ath_softc *sc,
1559 struct ath_rate_node *ath_rc_priv,
1560 struct ath_tx_info_priv *info_priv, int final_ts_idx,
1561 int xretries, int long_retry)
1562{
1563 struct ath_rate_softc *asc = (struct ath_rate_softc *)sc->sc_rc;
1564 struct ath_rate_table *rate_table;
1565 struct ath_tx_ratectrl *rate_ctrl;
1566 struct ath_rc_series rcs[4];
1567 u8 flags;
1568 u32 series = 0, rix;
1569
1570 memcpy(rcs, info_priv->rcs, 4 * sizeof(rcs[0]));
1571 rate_table = (struct ath_rate_table *)
1572 asc->hw_rate_table[sc->sc_curmode];
1573 rate_ctrl = (struct ath_tx_ratectrl *)(ath_rc_priv);
1574 ASSERT(rcs[0].tries != 0);
1575
1576 /*
1577 * If the first rate is not the final index, there
1578 * are intermediate rate failures to be processed.
1579 */
1580 if (final_ts_idx != 0) {
1581 /* Process intermediate rates that failed.*/
1582 for (series = 0; series < final_ts_idx ; series++) {
1583 if (rcs[series].tries != 0) {
1584 flags = rcs[series].flags;
1585 /* If HT40 and we have switched mode from
1586 * 40 to 20 => don't update */
1587 if ((flags & ATH_RC_CW40_FLAG) &&
1588 (rate_ctrl->rc_phy_mode !=
1589 (flags & ATH_RC_CW40_FLAG)))
1590 return;
1591 if ((flags & ATH_RC_CW40_FLAG) &&
1592 (flags & ATH_RC_SGI_FLAG))
1593 rix = rate_table->info[
1594 rcs[series].rix].ht_index;
1595 else if (flags & ATH_RC_SGI_FLAG)
1596 rix = rate_table->info[
1597 rcs[series].rix].sgi_index;
1598 else if (flags & ATH_RC_CW40_FLAG)
1599 rix = rate_table->info[
1600 rcs[series].rix].cw40index;
1601 else
1602 rix = rate_table->info[
1603 rcs[series].rix].base_index;
1604 ath_rc_update_ht(sc, ath_rc_priv,
1605 info_priv, rix,
1606 xretries ? 1 : 2,
1607 rcs[series].tries);
1608 }
1609 }
1610 } else {
1611 /*
1612 * Handle the special case of MIMO PS burst, where the second
1613 * aggregate is sent out with only one rate and one try.
1614 * Treating it as an excessive retry penalizes the rate
1615 * inordinately.
1616 */
1617 if (rcs[0].tries == 1 && xretries == 1)
1618 xretries = 2;
1619 }
1620
1621 flags = rcs[series].flags;
1622 /* If HT40 and we have switched mode from 40 to 20 => don't update */
1623 if ((flags & ATH_RC_CW40_FLAG) &&
1624 (rate_ctrl->rc_phy_mode != (flags & ATH_RC_CW40_FLAG)))
1625 return;
1626
1627 if ((flags & ATH_RC_CW40_FLAG) && (flags & ATH_RC_SGI_FLAG))
1628 rix = rate_table->info[rcs[series].rix].ht_index;
1629 else if (flags & ATH_RC_SGI_FLAG)
1630 rix = rate_table->info[rcs[series].rix].sgi_index;
1631 else if (flags & ATH_RC_CW40_FLAG)
1632 rix = rate_table->info[rcs[series].rix].cw40index;
1633 else
1634 rix = rate_table->info[rcs[series].rix].base_index;
1635
1636 ath_rc_update_ht(sc, ath_rc_priv, info_priv, rix,
1637 xretries, long_retry);
1638}
1639
1640
1641/*
1642 * Process a tx descriptor for a completed transmit (success or failure).
1643 */
1644static void ath_rate_tx_complete(struct ath_softc *sc,
1645 struct ath_node *an,
1646 struct ath_rate_node *rc_priv,
1647 struct ath_tx_info_priv *info_priv)
1648{
1649 int final_ts_idx = info_priv->tx.ts_rateindex;
1650 int tx_status = 0, is_underrun = 0;
1651 struct ath_vap *avp;
1652
1653 avp = rc_priv->avp;
1654 if ((avp->av_config.av_fixed_rateset != IEEE80211_FIXED_RATE_NONE)
1655 || info_priv->tx.ts_status & ATH9K_TXERR_FILT)
1656 return;
1657
1658 if (info_priv->tx.ts_rssi > 0) {
1659 ATH_RSSI_LPF(an->an_chainmask_sel.tx_avgrssi,
1660 info_priv->tx.ts_rssi);
1661 }
1662
1663 /*
1664 * If underrun error is seen assume it as an excessive retry only
1665 * if prefetch trigger level have reached the max (0x3f for 5416)
1666 * Adjust the long retry as if the frame was tried ATH_11N_TXMAXTRY
1667 * times. This affects how ratectrl updates PER for the failed rate.
1668 */
1669 if (info_priv->tx.ts_flags &
1670 (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN) &&
1671 ((sc->sc_ah->ah_txTrigLevel) >= tx_triglevel_max)) {
1672 tx_status = 1;
1673 is_underrun = 1;
1674 }
1675
1676 if ((info_priv->tx.ts_status & ATH9K_TXERR_XRETRY) ||
1677 (info_priv->tx.ts_status & ATH9K_TXERR_FIFO))
1678 tx_status = 1;
1679
1680 ath_rc_update(sc, rc_priv, info_priv, final_ts_idx, tx_status,
1681 (is_underrun) ? ATH_11N_TXMAXTRY :
1682 info_priv->tx.ts_longretry);
1683}
1684
1685
1686/*
1687 * Update the SIB's rate control information
1688 *
1689 * This should be called when the supported rates change
1690 * (e.g. SME operation, wireless mode change)
1691 *
1692 * It will determine which rates are valid for use.
1693 */
1694static void ath_rc_sib_update(struct ath_softc *sc,
1695 struct ath_rate_node *ath_rc_priv,
1696 u32 capflag, int keep_state,
1697 struct ath_rateset *negotiated_rates,
1698 struct ath_rateset *negotiated_htrates)
1699{
1700 struct ath_rate_table *rate_table = NULL;
1701 struct ath_rate_softc *asc = (struct ath_rate_softc *)sc->sc_rc;
1702 struct ath_rateset *rateset = negotiated_rates;
1703 u8 *ht_mcs = (u8 *)negotiated_htrates;
1704 struct ath_tx_ratectrl *rate_ctrl = (struct ath_tx_ratectrl *)
1705 (ath_rc_priv);
1706 u8 i, j, k, hi = 0, hthi = 0;
1707
1708 rate_table = (struct ath_rate_table *)
1709 asc->hw_rate_table[sc->sc_curmode];
1710
1711 /* Initial rate table size. Will change depending
1712 * on the working rate set */
1713 rate_ctrl->rate_table_size = MAX_TX_RATE_TBL;
1714
1715 /* Initialize thresholds according to the global rate table */
1716 for (i = 0 ; (i < rate_ctrl->rate_table_size) && (!keep_state); i++) {
1717 rate_ctrl->state[i].rssi_thres =
1718 rate_table->info[i].rssi_ack_validmin;
1719 rate_ctrl->state[i].per = 0;
1720 }
1721
1722 /* Determine the valid rates */
1723 ath_rc_init_valid_txmask(rate_ctrl);
1724
1725 for (i = 0; i < WLAN_RC_PHY_MAX; i++) {
1726 for (j = 0; j < MAX_TX_RATE_PHY; j++)
1727 rate_ctrl->valid_phy_rateidx[i][j] = 0;
1728 rate_ctrl->valid_phy_ratecnt[i] = 0;
1729 }
1730 rate_ctrl->rc_phy_mode = (capflag & WLAN_RC_40_FLAG);
1731
1732 /* Set stream capability */
1733 ath_rc_priv->single_stream = (capflag & WLAN_RC_DS_FLAG) ? 0 : 1;
1734
1735 if (!rateset->rs_nrates) {
1736 /* No working rate, just initialize valid rates */
1737 hi = ath_rc_sib_init_validrates(ath_rc_priv, rate_table,
1738 capflag);
1739 } else {
1740 /* Use intersection of working rates and valid rates */
1741 hi = ath_rc_sib_setvalid_rates(ath_rc_priv, rate_table,
1742 rateset, capflag);
1743 if (capflag & WLAN_RC_HT_FLAG) {
1744 hthi = ath_rc_sib_setvalid_htrates(ath_rc_priv,
1745 rate_table,
1746 ht_mcs,
1747 capflag);
1748 }
1749 hi = A_MAX(hi, hthi);
1750 }
1751
1752 rate_ctrl->rate_table_size = hi + 1;
1753 rate_ctrl->rate_max_phy = 0;
1754 ASSERT(rate_ctrl->rate_table_size <= MAX_TX_RATE_TBL);
1755
1756 for (i = 0, k = 0; i < WLAN_RC_PHY_MAX; i++) {
1757 for (j = 0; j < rate_ctrl->valid_phy_ratecnt[i]; j++) {
1758 rate_ctrl->valid_rate_index[k++] =
1759 rate_ctrl->valid_phy_rateidx[i][j];
1760 }
1761
1762 if (!ath_rc_valid_phyrate(i, rate_table->initial_ratemax, TRUE)
1763 || !rate_ctrl->valid_phy_ratecnt[i])
1764 continue;
1765
1766 rate_ctrl->rate_max_phy = rate_ctrl->valid_phy_rateidx[i][j-1];
1767 }
1768 ASSERT(rate_ctrl->rate_table_size <= MAX_TX_RATE_TBL);
1769 ASSERT(k <= MAX_TX_RATE_TBL);
1770
1771 rate_ctrl->max_valid_rate = k;
1772 /*
1773 * Some third party vendors don't send the supported rate series in
1774 * order. So sorting to make sure its in order, otherwise our RateFind
1775 * Algo will select wrong rates
1776 */
1777 ath_rc_sort_validrates(rate_table, rate_ctrl);
1778 rate_ctrl->rate_max_phy = rate_ctrl->valid_rate_index[k-4];
1779}
1780
1781/*
1782 * Update rate-control state on station associate/reassociate.
1783 */
1784static int ath_rate_newassoc(struct ath_softc *sc,
1785 struct ath_rate_node *ath_rc_priv,
1786 unsigned int capflag,
1787 struct ath_rateset *negotiated_rates,
1788 struct ath_rateset *negotiated_htrates)
1789{
1790
1791
1792 ath_rc_priv->ht_cap =
1793 ((capflag & ATH_RC_DS_FLAG) ? WLAN_RC_DS_FLAG : 0) |
1794 ((capflag & ATH_RC_SGI_FLAG) ? WLAN_RC_SGI_FLAG : 0) |
1795 ((capflag & ATH_RC_HT_FLAG) ? WLAN_RC_HT_FLAG : 0) |
1796 ((capflag & ATH_RC_CW40_FLAG) ? WLAN_RC_40_FLAG : 0);
1797
1798 ath_rc_sib_update(sc, ath_rc_priv, ath_rc_priv->ht_cap, 0,
1799 negotiated_rates, negotiated_htrates);
1800
1801 return 0;
1802}
1803
1804/*
1805 * This routine is called to initialize the rate control parameters
1806 * in the SIB. It is called initially during system initialization
1807 * or when a station is associated with the AP.
1808 */
1809static void ath_rc_sib_init(struct ath_rate_node *ath_rc_priv)
1810{
1811 struct ath_tx_ratectrl *rate_ctrl;
1812
1813 rate_ctrl = (struct ath_tx_ratectrl *)(ath_rc_priv);
1814 rate_ctrl->rssi_down_time = jiffies_to_msecs(jiffies);
1815}
1816
1817
1818static void ath_setup_rates(struct ieee80211_local *local, struct sta_info *sta)
1819
1820{
1821 struct ieee80211_supported_band *sband;
1822 struct ieee80211_hw *hw = local_to_hw(local);
1823 struct ath_softc *sc = hw->priv;
1824 struct ath_rate_node *rc_priv = sta->rate_ctrl_priv;
1825 int i, j = 0;
1826
1827 DPRINTF(sc, ATH_DBG_RATE, "%s", __func__);
1828 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
1829 for (i = 0; i < sband->n_bitrates; i++) {
1830 if (sta->supp_rates[local->hw.conf.channel->band] & BIT(i)) {
1831 rc_priv->neg_rates.rs_rates[j]
1832 = (sband->bitrates[i].bitrate * 2) / 10;
1833 j++;
1834 }
1835 }
1836 rc_priv->neg_rates.rs_nrates = j;
1837}
1838
1839void ath_rc_node_update(struct ieee80211_hw *hw, struct ath_rate_node *rc_priv)
1840{
1841 struct ath_softc *sc = hw->priv;
1842 u32 capflag = 0;
1843
1844 if (hw->conf.ht_conf.ht_supported) {
1845 capflag |= ATH_RC_HT_FLAG | ATH_RC_DS_FLAG;
1846 if (sc->sc_ht_info.tx_chan_width == ATH9K_HT_MACMODE_2040)
1847 capflag |= ATH_RC_CW40_FLAG;
1848 }
1849
1850 ath_rate_newassoc(sc, rc_priv, capflag,
1851 &rc_priv->neg_rates,
1852 &rc_priv->neg_ht_rates);
1853
1854}
1855
1856/* Rate Control callbacks */
1857static void ath_tx_status(void *priv, struct net_device *dev,
1858 struct sk_buff *skb)
1859{
1860 struct ath_softc *sc = priv;
1861 struct ath_tx_info_priv *tx_info_priv;
1862 struct ath_node *an;
1863 struct sta_info *sta;
1864 struct ieee80211_local *local;
1865 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1866 struct ieee80211_hdr *hdr;
1867 __le16 fc;
1868
1869 local = hw_to_local(sc->hw);
1870 hdr = (struct ieee80211_hdr *)skb->data;
1871 fc = hdr->frame_control;
1872 tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
1873
1874 spin_lock_bh(&sc->node_lock);
1875 an = ath_node_find(sc, hdr->addr1);
1876 spin_unlock_bh(&sc->node_lock);
1877
1878 sta = sta_info_get(local, hdr->addr1);
1879 if (!an || !sta || !ieee80211_is_data(fc)) {
1880 if (tx_info->driver_data[0] != NULL) {
1881 kfree(tx_info->driver_data[0]);
1882 tx_info->driver_data[0] = NULL;
1883 }
1884 return;
1885 }
1886 if (tx_info->driver_data[0] != NULL) {
1887 ath_rate_tx_complete(sc, an, sta->rate_ctrl_priv, tx_info_priv);
1888 kfree(tx_info->driver_data[0]);
1889 tx_info->driver_data[0] = NULL;
1890 }
1891}
1892
1893static void ath_tx_aggr_resp(struct ath_softc *sc,
1894 struct sta_info *sta,
1895 struct ath_node *an,
1896 u8 tidno)
1897{
1898 struct ieee80211_hw *hw = sc->hw;
1899 struct ieee80211_local *local;
1900 struct ath_atx_tid *txtid;
1901 struct ieee80211_supported_band *sband;
1902 u16 buffersize = 0;
1903 int state;
1904 DECLARE_MAC_BUF(mac);
1905
1906 if (!sc->sc_txaggr)
1907 return;
1908
1909 txtid = ATH_AN_2_TID(an, tidno);
1910 if (!txtid->paused)
1911 return;
1912
1913 local = hw_to_local(sc->hw);
1914 sband = hw->wiphy->bands[hw->conf.channel->band];
1915 buffersize = IEEE80211_MIN_AMPDU_BUF <<
1916 sband->ht_info.ampdu_factor; /* FIXME */
1917 state = sta->ampdu_mlme.tid_state_tx[tidno];
1918
1919 if (state & HT_ADDBA_RECEIVED_MSK) {
1920 txtid->addba_exchangecomplete = 1;
1921 txtid->addba_exchangeinprogress = 0;
1922 txtid->baw_size = buffersize;
1923
1924 DPRINTF(sc, ATH_DBG_AGGR,
1925 "%s: Resuming tid, buffersize: %d\n",
1926 __func__,
1927 buffersize);
1928
1929 ath_tx_resume_tid(sc, txtid);
1930 }
1931}
1932
1933static void ath_get_rate(void *priv, struct net_device *dev,
1934 struct ieee80211_supported_band *sband,
1935 struct sk_buff *skb,
1936 struct rate_selection *sel)
1937{
1938 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1939 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1940 struct sta_info *sta;
1941 struct ath_softc *sc = (struct ath_softc *)priv;
1942 struct ieee80211_hw *hw = sc->hw;
1943 struct ath_tx_info_priv *tx_info_priv;
1944 struct ath_rate_node *ath_rc_priv;
1945 struct ath_node *an;
1946 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1947 int is_probe, chk, ret;
1948 s8 lowest_idx;
1949 __le16 fc = hdr->frame_control;
1950 u8 *qc, tid;
1951 DECLARE_MAC_BUF(mac);
1952
1953 DPRINTF(sc, ATH_DBG_RATE, "%s\n", __func__);
1954
1955 /* allocate driver private area of tx_info */
1956 tx_info->driver_data[0] = kzalloc(sizeof(*tx_info_priv), GFP_ATOMIC);
1957 ASSERT(tx_info->driver_data[0] != NULL);
1958 tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
1959
1960 sta = sta_info_get(local, hdr->addr1);
1961 lowest_idx = rate_lowest_index(local, sband, sta);
1962 tx_info_priv->min_rate = (sband->bitrates[lowest_idx].bitrate * 2) / 10;
1963 /* lowest rate for management and multicast/broadcast frames */
1964 if (!ieee80211_is_data(fc) ||
1965 is_multicast_ether_addr(hdr->addr1) || !sta) {
1966 sel->rate_idx = lowest_idx;
1967 return;
1968 }
1969
1970 ath_rc_priv = sta->rate_ctrl_priv;
1971
1972 /* Find tx rate for unicast frames */
1973 ath_rate_findrate(sc, ath_rc_priv,
1974 ATH_11N_TXMAXTRY, 4,
1975 ATH_RC_PROBE_ALLOWED,
1976 tx_info_priv->rcs,
1977 &is_probe,
1978 false);
1979 if (is_probe)
1980 sel->probe_idx = ((struct ath_tx_ratectrl *)
1981 sta->rate_ctrl_priv)->probe_rate;
1982
1983 /* Ratecontrol sometimes returns invalid rate index */
1984 if (tx_info_priv->rcs[0].rix != 0xff)
1985 ath_rc_priv->prev_data_rix = tx_info_priv->rcs[0].rix;
1986 else
1987 tx_info_priv->rcs[0].rix = ath_rc_priv->prev_data_rix;
1988
1989 sel->rate_idx = tx_info_priv->rcs[0].rix;
1990
1991 /* Check if aggregation has to be enabled for this tid */
1992
1993 if (hw->conf.ht_conf.ht_supported) {
1994 if (ieee80211_is_data_qos(fc)) {
1995 qc = ieee80211_get_qos_ctl(hdr);
1996 tid = qc[0] & 0xf;
1997
1998 spin_lock_bh(&sc->node_lock);
1999 an = ath_node_find(sc, hdr->addr1);
2000 spin_unlock_bh(&sc->node_lock);
2001
2002 if (!an) {
2003 DPRINTF(sc, ATH_DBG_AGGR,
2004 "%s: Node not found to "
2005 "init/chk TX aggr\n", __func__);
2006 return;
2007 }
2008
2009 chk = ath_tx_aggr_check(sc, an, tid);
2010 if (chk == AGGR_REQUIRED) {
2011 ret = ieee80211_start_tx_ba_session(hw,
2012 hdr->addr1, tid);
2013 if (ret)
2014 DPRINTF(sc, ATH_DBG_AGGR,
2015 "%s: Unable to start tx "
2016 "aggr for: %s\n",
2017 __func__,
2018 print_mac(mac, hdr->addr1));
2019 else
2020 DPRINTF(sc, ATH_DBG_AGGR,
2021 "%s: Started tx aggr for: %s\n",
2022 __func__,
2023 print_mac(mac, hdr->addr1));
2024 } else if (chk == AGGR_EXCHANGE_PROGRESS)
2025 ath_tx_aggr_resp(sc, sta, an, tid);
2026 }
2027 }
2028}
2029
2030static void ath_rate_init(void *priv, void *priv_sta,
2031 struct ieee80211_local *local,
2032 struct sta_info *sta)
2033{
2034 struct ieee80211_supported_band *sband;
2035 struct ieee80211_hw *hw = local_to_hw(local);
2036 struct ieee80211_conf *conf = &local->hw.conf;
2037 struct ath_softc *sc = hw->priv;
2038 int i, j = 0;
2039
2040 DPRINTF(sc, ATH_DBG_RATE, "%s\n", __func__);
2041
2042 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
2043 sta->txrate_idx = rate_lowest_index(local, sband, sta);
2044
2045 ath_setup_rates(local, sta);
2046 if (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) {
2047 for (i = 0; i < MCS_SET_SIZE; i++) {
2048 if (conf->ht_conf.supp_mcs_set[i/8] & (1<<(i%8)))
2049 ((struct ath_rate_node *)
2050 priv_sta)->neg_ht_rates.rs_rates[j++] = i;
2051 if (j == ATH_RATE_MAX)
2052 break;
2053 }
2054 ((struct ath_rate_node *)priv_sta)->neg_ht_rates.rs_nrates = j;
2055 }
2056 ath_rc_node_update(hw, priv_sta);
2057}
2058
2059static void ath_rate_clear(void *priv)
2060{
2061 return;
2062}
2063
2064static void *ath_rate_alloc(struct ieee80211_local *local)
2065{
2066 struct ieee80211_hw *hw = local_to_hw(local);
2067 struct ath_softc *sc = hw->priv;
2068
2069 DPRINTF(sc, ATH_DBG_RATE, "%s", __func__);
2070 return local->hw.priv;
2071}
2072
2073static void ath_rate_free(void *priv)
2074{
2075 return;
2076}
2077
2078static void *ath_rate_alloc_sta(void *priv, gfp_t gfp)
2079{
2080 struct ath_softc *sc = priv;
2081 struct ath_vap *avp = sc->sc_vaps[0];
2082 struct ath_rate_node *rate_priv;
2083
2084 DPRINTF(sc, ATH_DBG_RATE, "%s", __func__);
2085 rate_priv = ath_rate_node_alloc(avp, sc->sc_rc, gfp);
2086 if (!rate_priv) {
2087 DPRINTF(sc, ATH_DBG_FATAL, "%s:Unable to allocate"
2088 "private rate control structure", __func__);
2089 return NULL;
2090 }
2091 ath_rc_sib_init(rate_priv);
2092 return rate_priv;
2093}
2094
2095static void ath_rate_free_sta(void *priv, void *priv_sta)
2096{
2097 struct ath_rate_node *rate_priv = priv_sta;
2098 struct ath_softc *sc = priv;
2099
2100 DPRINTF(sc, ATH_DBG_RATE, "%s", __func__);
2101 ath_rate_node_free(rate_priv);
2102}
2103
2104static struct rate_control_ops ath_rate_ops = {
2105 .module = NULL,
2106 .name = "ath9k_rate_control",
2107 .tx_status = ath_tx_status,
2108 .get_rate = ath_get_rate,
2109 .rate_init = ath_rate_init,
2110 .clear = ath_rate_clear,
2111 .alloc = ath_rate_alloc,
2112 .free = ath_rate_free,
2113 .alloc_sta = ath_rate_alloc_sta,
2114 .free_sta = ath_rate_free_sta
2115};
2116
2117int ath_rate_control_register(void)
2118{
2119 return ieee80211_rate_control_register(&ath_rate_ops);
2120}
2121
2122void ath_rate_control_unregister(void)
2123{
2124 ieee80211_rate_control_unregister(&ath_rate_ops);
2125}
2126
diff --git a/drivers/net/wireless/ath9k/rc.h b/drivers/net/wireless/ath9k/rc.h
new file mode 100644
index 000000000000..71aef9c75232
--- /dev/null
+++ b/drivers/net/wireless/ath9k/rc.h
@@ -0,0 +1,316 @@
1/*
2 * Copyright (c) 2004 Sam Leffler, Errno Consulting
3 * Copyright (c) 2004 Video54 Technologies, Inc.
4 * Copyright (c) 2008 Atheros Communications Inc.
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#ifndef RC_H
20#define RC_H
21
22#include "ath9k.h"
23/*
24 * Interface definitions for transmit rate control modules for the
25 * Atheros driver.
26 *
27 * A rate control module is responsible for choosing the transmit rate
28 * for each data frame. Management+control frames are always sent at
29 * a fixed rate.
30 *
31 * Only one module may be present at a time; the driver references
32 * rate control interfaces by symbol name. If multiple modules are
33 * to be supported we'll need to switch to a registration-based scheme
34 * as is currently done, for example, for authentication modules.
35 *
36 * An instance of the rate control module is attached to each device
37 * at attach time and detached when the device is destroyed. The module
38 * may associate data with each device and each node (station). Both
39 * sets of storage are opaque except for the size of the per-node storage
40 * which must be provided when the module is attached.
41 *
42 * The rate control module is notified for each state transition and
43 * station association/reassociation. Otherwise it is queried for a
44 * rate for each outgoing frame and provided status from each transmitted
45 * frame. Any ancillary processing is the responsibility of the module
46 * (e.g. if periodic processing is required then the module should setup
47 * it's own timer).
48 *
49 * In addition to the transmit rate for each frame the module must also
50 * indicate the number of attempts to make at the specified rate. If this
51 * number is != ATH_TXMAXTRY then an additional callback is made to setup
52 * additional transmit state. The rate control code is assumed to write
53 * this additional data directly to the transmit descriptor.
54 */
55
56struct ath_softc;
57
58#define TRUE 1
59#define FALSE 0
60
61#define ATH_RATE_MAX 30
62#define MCS_SET_SIZE 128
63
64enum ieee80211_fixed_rate_mode {
65 IEEE80211_FIXED_RATE_NONE = 0,
66 IEEE80211_FIXED_RATE_MCS = 1 /* HT rates */
67};
68
69/*
70 * Use the hal os glue code to get ms time
71 */
72#define IEEE80211_RATE_IDX_ENTRY(val, idx) (((val&(0xff<<(idx*8)))>>(idx*8)))
73
74#define SHORT_PRE 1
75#define LONG_PRE 0
76
77#define WLAN_PHY_HT_20_SS WLAN_RC_PHY_HT_20_SS
78#define WLAN_PHY_HT_20_DS WLAN_RC_PHY_HT_20_DS
79#define WLAN_PHY_HT_20_DS_HGI WLAN_RC_PHY_HT_20_DS_HGI
80#define WLAN_PHY_HT_40_SS WLAN_RC_PHY_HT_40_SS
81#define WLAN_PHY_HT_40_SS_HGI WLAN_RC_PHY_HT_40_SS_HGI
82#define WLAN_PHY_HT_40_DS WLAN_RC_PHY_HT_40_DS
83#define WLAN_PHY_HT_40_DS_HGI WLAN_RC_PHY_HT_40_DS_HGI
84
85#define WLAN_PHY_OFDM PHY_OFDM
86#define WLAN_PHY_CCK PHY_CCK
87
88#define TRUE_20 0x2
89#define TRUE_40 0x4
90#define TRUE_2040 (TRUE_20|TRUE_40)
91#define TRUE_ALL (TRUE_2040|TRUE)
92
93enum {
94 WLAN_RC_PHY_HT_20_SS = 4,
95 WLAN_RC_PHY_HT_20_DS,
96 WLAN_RC_PHY_HT_40_SS,
97 WLAN_RC_PHY_HT_40_DS,
98 WLAN_RC_PHY_HT_20_SS_HGI,
99 WLAN_RC_PHY_HT_20_DS_HGI,
100 WLAN_RC_PHY_HT_40_SS_HGI,
101 WLAN_RC_PHY_HT_40_DS_HGI,
102 WLAN_RC_PHY_MAX
103};
104
105#define WLAN_RC_PHY_DS(_phy) ((_phy == WLAN_RC_PHY_HT_20_DS) \
106 || (_phy == WLAN_RC_PHY_HT_40_DS) \
107 || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \
108 || (_phy == WLAN_RC_PHY_HT_40_DS_HGI))
109#define WLAN_RC_PHY_40(_phy) ((_phy == WLAN_RC_PHY_HT_40_SS) \
110 || (_phy == WLAN_RC_PHY_HT_40_DS) \
111 || (_phy == WLAN_RC_PHY_HT_40_SS_HGI) \
112 || (_phy == WLAN_RC_PHY_HT_40_DS_HGI))
113#define WLAN_RC_PHY_SGI(_phy) ((_phy == WLAN_RC_PHY_HT_20_SS_HGI) \
114 || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \
115 || (_phy == WLAN_RC_PHY_HT_40_SS_HGI) \
116 || (_phy == WLAN_RC_PHY_HT_40_DS_HGI))
117
118#define WLAN_RC_PHY_HT(_phy) (_phy >= WLAN_RC_PHY_HT_20_SS)
119
120/* Returns the capflag mode */
121#define WLAN_RC_CAP_MODE(capflag) (((capflag & WLAN_RC_HT_FLAG) ? \
122 (capflag & WLAN_RC_40_FLAG) ? TRUE_40 : TRUE_20 : TRUE))
123
124/* Return TRUE if flag supports HT20 && client supports HT20 or
125 * return TRUE if flag supports HT40 && client supports HT40.
126 * This is used becos some rates overlap between HT20/HT40.
127 */
128
129#define WLAN_RC_PHY_HT_VALID(flag, capflag) (((flag & TRUE_20) && !(capflag \
130 & WLAN_RC_40_FLAG)) || ((flag & TRUE_40) && \
131 (capflag & WLAN_RC_40_FLAG)))
132
133#define WLAN_RC_DS_FLAG (0x01)
134#define WLAN_RC_40_FLAG (0x02)
135#define WLAN_RC_SGI_FLAG (0x04)
136#define WLAN_RC_HT_FLAG (0x08)
137
138/* Index into the rate table */
139#define INIT_RATE_MAX_20 23
140#define INIT_RATE_MAX_40 40
141
142#define RATE_TABLE_SIZE 64
143
144/* XXX: Convert to kdoc */
145struct ath_rate_table {
146 int rate_cnt;
147 struct {
148 int valid; /* Valid for use in rate control */
149 int valid_single_stream;/* Valid for use in rate control
150 for single stream operation */
151 u8 phy; /* CCK/OFDM/TURBO/XR */
152 u32 ratekbps; /* Rate in Kbits per second */
153 u32 user_ratekbps; /* User rate in KBits per second */
154 u8 ratecode; /* rate that goes into
155 hw descriptors */
156 u8 short_preamble; /* Mask for enabling short preamble
157 in rate code for CCK */
158 u8 dot11rate; /* Value that goes into supported
159 rates info element of MLME */
160 u8 ctrl_rate; /* Index of next lower basic rate,
161 used for duration computation */
162 int8_t rssi_ack_validmin; /* Rate control related */
163 int8_t rssi_ack_deltamin; /* Rate control related */
164 u8 base_index; /* base rate index */
165 u8 cw40index; /* 40cap rate index */
166 u8 sgi_index; /* shortgi rate index */
167 u8 ht_index; /* shortgi rate index */
168 u32 max_4ms_framelen; /* Maximum frame length(bytes)
169 for 4ms tx duration */
170 } info[RATE_TABLE_SIZE];
171 u32 probe_interval; /* interval for ratectrl to
172 probe for other rates */
173 u32 rssi_reduce_interval; /* interval for ratectrl
174 to reduce RSSI */
175 u8 initial_ratemax; /* the initial ratemax value used
176 in ath_rc_sib_update() */
177};
178
179#define ATH_RC_PROBE_ALLOWED 0x00000001
180#define ATH_RC_MINRATE_LASTRATE 0x00000002
181#define ATH_RC_SHORT_PREAMBLE 0x00000004
182
183struct ath_rc_series {
184 u8 rix;
185 u8 tries;
186 u8 flags;
187 u32 max_4ms_framelen;
188};
189
190/* rcs_flags definition */
191#define ATH_RC_DS_FLAG 0x01
192#define ATH_RC_CW40_FLAG 0x02 /* CW 40 */
193#define ATH_RC_SGI_FLAG 0x04 /* Short Guard Interval */
194#define ATH_RC_HT_FLAG 0x08 /* HT */
195#define ATH_RC_RTSCTS_FLAG 0x10 /* RTS-CTS */
196
197/*
198 * State structures for new rate adaptation code
199 */
200#define MAX_TX_RATE_TBL 64
201#define MAX_TX_RATE_PHY 48
202
203struct ath_tx_ratectrl_state {
204 int8_t rssi_thres; /* required rssi for this rate (dB) */
205 u8 per; /* recent estimate of packet error rate (%) */
206};
207
208struct ath_tx_ratectrl {
209 struct ath_tx_ratectrl_state state[MAX_TX_RATE_TBL]; /* state */
210 int8_t rssi_last; /* last ack rssi */
211 int8_t rssi_last_lookup; /* last ack rssi used for lookup */
212 int8_t rssi_last_prev; /* previous last ack rssi */
213 int8_t rssi_last_prev2; /* 2nd previous last ack rssi */
214 int32_t rssi_sum_cnt; /* count of rssi_sum for averaging */
215 int32_t rssi_sum_rate; /* rate that we are averaging */
216 int32_t rssi_sum; /* running sum of rssi for averaging */
217 u32 valid_txrate_mask; /* mask of valid rates */
218 u8 rate_table_size; /* rate table size */
219 u8 rate_max; /* max rate that has recently worked */
220 u8 probe_rate; /* rate we are probing at */
221 u32 rssi_time; /* msec timestamp for last ack rssi */
222 u32 rssi_down_time; /* msec timestamp for last down step */
223 u32 probe_time; /* msec timestamp for last probe */
224 u8 hw_maxretry_pktcnt; /* num packets since we got
225 HW max retry error */
226 u8 max_valid_rate; /* maximum number of valid rate */
227 u8 valid_rate_index[MAX_TX_RATE_TBL]; /* valid rate index */
228 u32 per_down_time; /* msec timstamp for last
229 PER down step */
230
231 /* 11n state */
232 u8 valid_phy_ratecnt[WLAN_RC_PHY_MAX]; /* valid rate count */
233 u8 valid_phy_rateidx[WLAN_RC_PHY_MAX][MAX_TX_RATE_TBL];
234 u8 rc_phy_mode;
235 u8 rate_max_phy; /* Phy index for the max rate */
236 u32 rate_max_lastused; /* msec timstamp of when we
237 last used rateMaxPhy */
238 u32 probe_interval; /* interval for ratectrl to probe
239 for other rates */
240};
241
242struct ath_rateset {
243 u8 rs_nrates;
244 u8 rs_rates[ATH_RATE_MAX];
245};
246
247/* per-device state */
248struct ath_rate_softc {
249 /* phy tables that contain rate control data */
250 const void *hw_rate_table[ATH9K_MODE_MAX];
251 int fixedrix; /* -1 or index of fixed rate */
252};
253
254/* per-node state */
255struct ath_rate_node {
256 struct ath_tx_ratectrl tx_ratectrl; /* rate control state proper */
257 u32 prev_data_rix; /* rate idx of last data frame */
258
259 /* map of rate ix -> negotiated rate set ix */
260 u8 rixmap[MAX_TX_RATE_TBL];
261
262 /* map of ht rate ix -> negotiated rate set ix */
263 u8 ht_rixmap[MAX_TX_RATE_TBL];
264
265 u8 ht_cap; /* ht capabilities */
266 u8 ant_tx; /* current transmit antenna */
267
268 u8 single_stream; /* When TRUE, only single
269 stream Tx possible */
270 struct ath_rateset neg_rates; /* Negotiated rates */
271 struct ath_rateset neg_ht_rates; /* Negotiated HT rates */
272 struct ath_rate_softc *asc; /* back pointer to atheros softc */
273 struct ath_vap *avp; /* back pointer to vap */
274};
275
276/* Driver data of ieee80211_tx_info */
277struct ath_tx_info_priv {
278 struct ath_rc_series rcs[4];
279 struct ath_tx_status tx;
280 int n_frames;
281 int n_bad_frames;
282 u8 min_rate;
283};
284
285/*
286 * Attach/detach a rate control module.
287 */
288struct ath_rate_softc *ath_rate_attach(struct ath_hal *ah);
289void ath_rate_detach(struct ath_rate_softc *asc);
290
291/*
292 * Update/reset rate control state for 802.11 state transitions.
293 * Important mostly as the analog to ath_rate_newassoc when operating
294 * in station mode.
295 */
296void ath_rc_node_update(struct ieee80211_hw *hw, struct ath_rate_node *rc_priv);
297void ath_rate_newstate(struct ath_softc *sc, struct ath_vap *avp);
298
299/*
300 * Return the tx rate series.
301 */
302void ath_rate_findrate(struct ath_softc *sc, struct ath_rate_node *ath_rc_priv,
303 int num_tries, int num_rates,
304 unsigned int rcflag, struct ath_rc_series[],
305 int *is_probe, int isretry);
306/*
307 * Return rate index for given Dot11 Rate.
308 */
309u8 ath_rate_findrateix(struct ath_softc *sc,
310 u8 dot11_rate);
311
312/* Routines to register/unregister rate control algorithm */
313int ath_rate_control_register(void);
314void ath_rate_control_unregister(void);
315
316#endif /* RC_H */
diff --git a/drivers/net/wireless/ath9k/recv.c b/drivers/net/wireless/ath9k/recv.c
new file mode 100644
index 000000000000..2fe806175c01
--- /dev/null
+++ b/drivers/net/wireless/ath9k/recv.c
@@ -0,0 +1,1318 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17/*
18 * Implementation of receive path.
19 */
20
21#include "core.h"
22
23/*
24 * Setup and link descriptors.
25 *
26 * 11N: we can no longer afford to self link the last descriptor.
27 * MAC acknowledges BA status as long as it copies frames to host
28 * buffer (or rx fifo). This can incorrectly acknowledge packets
29 * to a sender if last desc is self-linked.
30 *
31 * NOTE: Caller should hold the rxbuf lock.
32 */
33
34static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
35{
36 struct ath_hal *ah = sc->sc_ah;
37 struct ath_desc *ds;
38 struct sk_buff *skb;
39
40 ATH_RXBUF_RESET(bf);
41
42 ds = bf->bf_desc;
43 ds->ds_link = 0; /* link to null */
44 ds->ds_data = bf->bf_buf_addr;
45
46 /* XXX For RADAR?
47 * virtual addr of the beginning of the buffer. */
48 skb = bf->bf_mpdu;
49 ASSERT(skb != NULL);
50 ds->ds_vdata = skb->data;
51
52 /* setup rx descriptors */
53 ath9k_hw_setuprxdesc(ah,
54 ds,
55 skb_tailroom(skb), /* buffer size */
56 0);
57
58 if (sc->sc_rxlink == NULL)
59 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
60 else
61 *sc->sc_rxlink = bf->bf_daddr;
62
63 sc->sc_rxlink = &ds->ds_link;
64 ath9k_hw_rxena(ah);
65}
66
67/* Process received BAR frame */
68
69static int ath_bar_rx(struct ath_softc *sc,
70 struct ath_node *an,
71 struct sk_buff *skb)
72{
73 struct ieee80211_bar *bar;
74 struct ath_arx_tid *rxtid;
75 struct sk_buff *tskb;
76 struct ath_recv_status *rx_status;
77 int tidno, index, cindex;
78 u16 seqno;
79
80 /* look at BAR contents */
81
82 bar = (struct ieee80211_bar *)skb->data;
83 tidno = (le16_to_cpu(bar->control) & IEEE80211_BAR_CTL_TID_M)
84 >> IEEE80211_BAR_CTL_TID_S;
85 seqno = le16_to_cpu(bar->start_seq_num) >> IEEE80211_SEQ_SEQ_SHIFT;
86
87 /* process BAR - indicate all pending RX frames till the BAR seqno */
88
89 rxtid = &an->an_aggr.rx.tid[tidno];
90
91 spin_lock_bh(&rxtid->tidlock);
92
93 /* get relative index */
94
95 index = ATH_BA_INDEX(rxtid->seq_next, seqno);
96
97 /* drop BAR if old sequence (index is too large) */
98
99 if ((index > rxtid->baw_size) &&
100 (index > (IEEE80211_SEQ_MAX - (rxtid->baw_size << 2))))
101 /* discard frame, ieee layer may not treat frame as a dup */
102 goto unlock_and_free;
103
104 /* complete receive processing for all pending frames upto BAR seqno */
105
106 cindex = (rxtid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
107 while ((rxtid->baw_head != rxtid->baw_tail) &&
108 (rxtid->baw_head != cindex)) {
109 tskb = rxtid->rxbuf[rxtid->baw_head].rx_wbuf;
110 rx_status = &rxtid->rxbuf[rxtid->baw_head].rx_status;
111 rxtid->rxbuf[rxtid->baw_head].rx_wbuf = NULL;
112
113 if (tskb != NULL)
114 ath_rx_subframe(an, tskb, rx_status);
115
116 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
117 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
118 }
119
120 /* ... and indicate rest of the frames in-order */
121
122 while (rxtid->baw_head != rxtid->baw_tail &&
123 rxtid->rxbuf[rxtid->baw_head].rx_wbuf != NULL) {
124 tskb = rxtid->rxbuf[rxtid->baw_head].rx_wbuf;
125 rx_status = &rxtid->rxbuf[rxtid->baw_head].rx_status;
126 rxtid->rxbuf[rxtid->baw_head].rx_wbuf = NULL;
127
128 ath_rx_subframe(an, tskb, rx_status);
129
130 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
131 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
132 }
133
134unlock_and_free:
135 spin_unlock_bh(&rxtid->tidlock);
136 /* free bar itself */
137 dev_kfree_skb(skb);
138 return IEEE80211_FTYPE_CTL;
139}
140
141/* Function to handle a subframe of aggregation when HT is enabled */
142
143static int ath_ampdu_input(struct ath_softc *sc,
144 struct ath_node *an,
145 struct sk_buff *skb,
146 struct ath_recv_status *rx_status)
147{
148 struct ieee80211_hdr *hdr;
149 struct ath_arx_tid *rxtid;
150 struct ath_rxbuf *rxbuf;
151 u8 type, subtype;
152 u16 rxseq;
153 int tid = 0, index, cindex, rxdiff;
154 __le16 fc;
155 u8 *qc;
156
157 hdr = (struct ieee80211_hdr *)skb->data;
158 fc = hdr->frame_control;
159
160 /* collect stats of frames with non-zero version */
161
162 if ((le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_VERS) != 0) {
163 dev_kfree_skb(skb);
164 return -1;
165 }
166
167 type = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_FTYPE;
168 subtype = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_STYPE;
169
170 if (ieee80211_is_back_req(fc))
171 return ath_bar_rx(sc, an, skb);
172
173 /* special aggregate processing only for qos unicast data frames */
174
175 if (!ieee80211_is_data(fc) ||
176 !ieee80211_is_data_qos(fc) ||
177 is_multicast_ether_addr(hdr->addr1))
178 return ath_rx_subframe(an, skb, rx_status);
179
180 /* lookup rx tid state */
181
182 if (ieee80211_is_data_qos(fc)) {
183 qc = ieee80211_get_qos_ctl(hdr);
184 tid = qc[0] & 0xf;
185 }
186
187 if (sc->sc_opmode == ATH9K_M_STA) {
188 /* Drop the frame not belonging to me. */
189 if (memcmp(hdr->addr1, sc->sc_myaddr, ETH_ALEN)) {
190 dev_kfree_skb(skb);
191 return -1;
192 }
193 }
194
195 rxtid = &an->an_aggr.rx.tid[tid];
196
197 spin_lock(&rxtid->tidlock);
198
199 rxdiff = (rxtid->baw_tail - rxtid->baw_head) &
200 (ATH_TID_MAX_BUFS - 1);
201
202 /*
203 * If the ADDBA exchange has not been completed by the source,
204 * process via legacy path (i.e. no reordering buffer is needed)
205 */
206 if (!rxtid->addba_exchangecomplete) {
207 spin_unlock(&rxtid->tidlock);
208 return ath_rx_subframe(an, skb, rx_status);
209 }
210
211 /* extract sequence number from recvd frame */
212
213 rxseq = le16_to_cpu(hdr->seq_ctrl) >> IEEE80211_SEQ_SEQ_SHIFT;
214
215 if (rxtid->seq_reset) {
216 rxtid->seq_reset = 0;
217 rxtid->seq_next = rxseq;
218 }
219
220 index = ATH_BA_INDEX(rxtid->seq_next, rxseq);
221
222 /* drop frame if old sequence (index is too large) */
223
224 if (index > (IEEE80211_SEQ_MAX - (rxtid->baw_size << 2))) {
225 /* discard frame, ieee layer may not treat frame as a dup */
226 spin_unlock(&rxtid->tidlock);
227 dev_kfree_skb(skb);
228 return IEEE80211_FTYPE_DATA;
229 }
230
231 /* sequence number is beyond block-ack window */
232
233 if (index >= rxtid->baw_size) {
234
235 /* complete receive processing for all pending frames */
236
237 while (index >= rxtid->baw_size) {
238
239 rxbuf = rxtid->rxbuf + rxtid->baw_head;
240
241 if (rxbuf->rx_wbuf != NULL) {
242 ath_rx_subframe(an, rxbuf->rx_wbuf,
243 &rxbuf->rx_status);
244 rxbuf->rx_wbuf = NULL;
245 }
246
247 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
248 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
249
250 index--;
251 }
252 }
253
254 /* add buffer to the recv ba window */
255
256 cindex = (rxtid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
257 rxbuf = rxtid->rxbuf + cindex;
258
259 if (rxbuf->rx_wbuf != NULL) {
260 spin_unlock(&rxtid->tidlock);
261 /* duplicate frame */
262 dev_kfree_skb(skb);
263 return IEEE80211_FTYPE_DATA;
264 }
265
266 rxbuf->rx_wbuf = skb;
267 rxbuf->rx_time = get_timestamp();
268 rxbuf->rx_status = *rx_status;
269
270 /* advance tail if sequence received is newer
271 * than any received so far */
272
273 if (index >= rxdiff) {
274 rxtid->baw_tail = cindex;
275 INCR(rxtid->baw_tail, ATH_TID_MAX_BUFS);
276 }
277
278 /* indicate all in-order received frames */
279
280 while (rxtid->baw_head != rxtid->baw_tail) {
281 rxbuf = rxtid->rxbuf + rxtid->baw_head;
282 if (!rxbuf->rx_wbuf)
283 break;
284
285 ath_rx_subframe(an, rxbuf->rx_wbuf, &rxbuf->rx_status);
286 rxbuf->rx_wbuf = NULL;
287
288 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
289 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
290 }
291
292 /*
293 * start a timer to flush all received frames if there are pending
294 * receive frames
295 */
296 if (rxtid->baw_head != rxtid->baw_tail)
297 mod_timer(&rxtid->timer, ATH_RX_TIMEOUT);
298 else
299 del_timer_sync(&rxtid->timer);
300
301 spin_unlock(&rxtid->tidlock);
302 return IEEE80211_FTYPE_DATA;
303}
304
305/* Timer to flush all received sub-frames */
306
307static void ath_rx_timer(unsigned long data)
308{
309 struct ath_arx_tid *rxtid = (struct ath_arx_tid *)data;
310 struct ath_node *an = rxtid->an;
311 struct ath_rxbuf *rxbuf;
312 int nosched;
313
314 spin_lock_bh(&rxtid->tidlock);
315 while (rxtid->baw_head != rxtid->baw_tail) {
316 rxbuf = rxtid->rxbuf + rxtid->baw_head;
317 if (!rxbuf->rx_wbuf) {
318 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
319 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
320 continue;
321 }
322
323 /*
324 * Stop if the next one is a very recent frame.
325 *
326 * Call get_timestamp in every iteration to protect against the
327 * case in which a new frame is received while we are executing
328 * this function. Using a timestamp obtained before entering
329 * the loop could lead to a very large time interval
330 * (a negative value typecast to unsigned), breaking the
331 * function's logic.
332 */
333 if ((get_timestamp() - rxbuf->rx_time) <
334 (ATH_RX_TIMEOUT * HZ / 1000))
335 break;
336
337 ath_rx_subframe(an, rxbuf->rx_wbuf,
338 &rxbuf->rx_status);
339 rxbuf->rx_wbuf = NULL;
340
341 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
342 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
343 }
344
345 /*
346 * start a timer to flush all received frames if there are pending
347 * receive frames
348 */
349 if (rxtid->baw_head != rxtid->baw_tail)
350 nosched = 0;
351 else
352 nosched = 1; /* no need to re-arm the timer again */
353
354 spin_unlock_bh(&rxtid->tidlock);
355}
356
357/* Free all pending sub-frames in the re-ordering buffer */
358
359static void ath_rx_flush_tid(struct ath_softc *sc,
360 struct ath_arx_tid *rxtid, int drop)
361{
362 struct ath_rxbuf *rxbuf;
363
364 spin_lock_bh(&rxtid->tidlock);
365 while (rxtid->baw_head != rxtid->baw_tail) {
366 rxbuf = rxtid->rxbuf + rxtid->baw_head;
367 if (!rxbuf->rx_wbuf) {
368 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
369 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
370 continue;
371 }
372
373 if (drop)
374 dev_kfree_skb(rxbuf->rx_wbuf);
375 else
376 ath_rx_subframe(rxtid->an,
377 rxbuf->rx_wbuf,
378 &rxbuf->rx_status);
379
380 rxbuf->rx_wbuf = NULL;
381
382 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
383 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
384 }
385 spin_unlock_bh(&rxtid->tidlock);
386}
387
388static struct sk_buff *ath_rxbuf_alloc(struct ath_softc *sc,
389 u32 len)
390{
391 struct sk_buff *skb;
392 u32 off;
393
394 /*
395 * Cache-line-align. This is important (for the
396 * 5210 at least) as not doing so causes bogus data
397 * in rx'd frames.
398 */
399
400 skb = dev_alloc_skb(len + sc->sc_cachelsz - 1);
401 if (skb != NULL) {
402 off = ((unsigned long) skb->data) % sc->sc_cachelsz;
403 if (off != 0)
404 skb_reserve(skb, sc->sc_cachelsz - off);
405 } else {
406 DPRINTF(sc, ATH_DBG_FATAL,
407 "%s: skbuff alloc of size %u failed\n",
408 __func__, len);
409 return NULL;
410 }
411
412 return skb;
413}
414
415static void ath_rx_requeue(struct ath_softc *sc, struct sk_buff *skb)
416{
417 struct ath_buf *bf = ATH_RX_CONTEXT(skb)->ctx_rxbuf;
418
419 ASSERT(bf != NULL);
420
421 spin_lock_bh(&sc->sc_rxbuflock);
422 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
423 /*
424 * This buffer is still held for hw acess.
425 * Mark it as free to be re-queued it later.
426 */
427 bf->bf_status |= ATH_BUFSTATUS_FREE;
428 } else {
429 /* XXX: we probably never enter here, remove after
430 * verification */
431 list_add_tail(&bf->list, &sc->sc_rxbuf);
432 ath_rx_buf_link(sc, bf);
433 }
434 spin_unlock_bh(&sc->sc_rxbuflock);
435}
436
437/*
438 * The skb indicated to upper stack won't be returned to us.
439 * So we have to allocate a new one and queue it by ourselves.
440 */
441static int ath_rx_indicate(struct ath_softc *sc,
442 struct sk_buff *skb,
443 struct ath_recv_status *status,
444 u16 keyix)
445{
446 struct ath_buf *bf = ATH_RX_CONTEXT(skb)->ctx_rxbuf;
447 struct sk_buff *nskb;
448 int type;
449
450 /* indicate frame to the stack, which will free the old skb. */
451 type = ath__rx_indicate(sc, skb, status, keyix);
452
453 /* allocate a new skb and queue it to for H/W processing */
454 nskb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize);
455 if (nskb != NULL) {
456 bf->bf_mpdu = nskb;
457 bf->bf_buf_addr = ath_skb_map_single(sc,
458 nskb,
459 PCI_DMA_FROMDEVICE,
460 /* XXX: Remove get_dma_mem_context() */
461 get_dma_mem_context(bf, bf_dmacontext));
462 ATH_RX_CONTEXT(nskb)->ctx_rxbuf = bf;
463
464 /* queue the new wbuf to H/W */
465 ath_rx_requeue(sc, nskb);
466 }
467
468 return type;
469}
470
471static void ath_opmode_init(struct ath_softc *sc)
472{
473 struct ath_hal *ah = sc->sc_ah;
474 u32 rfilt, mfilt[2];
475
476 /* configure rx filter */
477 rfilt = ath_calcrxfilter(sc);
478 ath9k_hw_setrxfilter(ah, rfilt);
479
480 /* configure bssid mask */
481 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
482 ath9k_hw_setbssidmask(ah, sc->sc_bssidmask);
483
484 /* configure operational mode */
485 ath9k_hw_setopmode(ah);
486
487 /* Handle any link-level address change. */
488 ath9k_hw_setmac(ah, sc->sc_myaddr);
489
490 /* calculate and install multicast filter */
491 mfilt[0] = mfilt[1] = ~0;
492
493 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
494 DPRINTF(sc, ATH_DBG_CONFIG ,
495 "%s: RX filter 0x%x, MC filter %08x:%08x\n",
496 __func__, rfilt, mfilt[0], mfilt[1]);
497}
498
499int ath_rx_init(struct ath_softc *sc, int nbufs)
500{
501 struct sk_buff *skb;
502 struct ath_buf *bf;
503 int error = 0;
504
505 do {
506 spin_lock_init(&sc->sc_rxflushlock);
507 sc->sc_rxflush = 0;
508 spin_lock_init(&sc->sc_rxbuflock);
509
510 /*
511 * Cisco's VPN software requires that drivers be able to
512 * receive encapsulated frames that are larger than the MTU.
513 * Since we can't be sure how large a frame we'll get, setup
514 * to handle the larges on possible.
515 */
516 sc->sc_rxbufsize = roundup(IEEE80211_MAX_MPDU_LEN,
517 min(sc->sc_cachelsz,
518 (u16)64));
519
520 DPRINTF(sc, ATH_DBG_CONFIG, "%s: cachelsz %u rxbufsize %u\n",
521 __func__, sc->sc_cachelsz, sc->sc_rxbufsize);
522
523 /* Initialize rx descriptors */
524
525 error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf,
526 "rx", nbufs, 1);
527 if (error != 0) {
528 DPRINTF(sc, ATH_DBG_FATAL,
529 "%s: failed to allocate rx descriptors: %d\n",
530 __func__, error);
531 break;
532 }
533
534 /* Pre-allocate a wbuf for each rx buffer */
535
536 list_for_each_entry(bf, &sc->sc_rxbuf, list) {
537 skb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize);
538 if (skb == NULL) {
539 error = -ENOMEM;
540 break;
541 }
542
543 bf->bf_mpdu = skb;
544 bf->bf_buf_addr =
545 ath_skb_map_single(sc, skb, PCI_DMA_FROMDEVICE,
546 get_dma_mem_context(bf, bf_dmacontext));
547 ATH_RX_CONTEXT(skb)->ctx_rxbuf = bf;
548 }
549 sc->sc_rxlink = NULL;
550
551 } while (0);
552
553 if (error)
554 ath_rx_cleanup(sc);
555
556 return error;
557}
558
559/* Reclaim all rx queue resources */
560
561void ath_rx_cleanup(struct ath_softc *sc)
562{
563 struct sk_buff *skb;
564 struct ath_buf *bf;
565
566 list_for_each_entry(bf, &sc->sc_rxbuf, list) {
567 skb = bf->bf_mpdu;
568 if (skb)
569 dev_kfree_skb(skb);
570 }
571
572 /* cleanup rx descriptors */
573
574 if (sc->sc_rxdma.dd_desc_len != 0)
575 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
576}
577
578/*
579 * Calculate the receive filter according to the
580 * operating mode and state:
581 *
582 * o always accept unicast, broadcast, and multicast traffic
583 * o maintain current state of phy error reception (the hal
584 * may enable phy error frames for noise immunity work)
585 * o probe request frames are accepted only when operating in
586 * hostap, adhoc, or monitor modes
587 * o enable promiscuous mode according to the interface state
588 * o accept beacons:
589 * - when operating in adhoc mode so the 802.11 layer creates
590 * node table entries for peers,
591 * - when operating in station mode for collecting rssi data when
592 * the station is otherwise quiet, or
593 * - when operating as a repeater so we see repeater-sta beacons
594 * - when scanning
595 */
596
597u32 ath_calcrxfilter(struct ath_softc *sc)
598{
599#define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
600 u32 rfilt;
601
602 rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE)
603 | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
604 | ATH9K_RX_FILTER_MCAST;
605
606 /* If not a STA, enable processing of Probe Requests */
607 if (sc->sc_opmode != ATH9K_M_STA)
608 rfilt |= ATH9K_RX_FILTER_PROBEREQ;
609
610 /* Can't set HOSTAP into promiscous mode */
611 if (sc->sc_opmode == ATH9K_M_MONITOR) {
612 rfilt |= ATH9K_RX_FILTER_PROM;
613 /* ??? To prevent from sending ACK */
614 rfilt &= ~ATH9K_RX_FILTER_UCAST;
615 }
616
617 if (sc->sc_opmode == ATH9K_M_STA || sc->sc_opmode == ATH9K_M_IBSS ||
618 sc->sc_scanning)
619 rfilt |= ATH9K_RX_FILTER_BEACON;
620
621 /* If in HOSTAP mode, want to enable reception of PSPOLL frames
622 & beacon frames */
623 if (sc->sc_opmode == ATH9K_M_HOSTAP)
624 rfilt |= (ATH9K_RX_FILTER_BEACON | ATH9K_RX_FILTER_PSPOLL);
625 return rfilt;
626#undef RX_FILTER_PRESERVE
627}
628
629/* Enable the receive h/w following a reset. */
630
631int ath_startrecv(struct ath_softc *sc)
632{
633 struct ath_hal *ah = sc->sc_ah;
634 struct ath_buf *bf, *tbf;
635
636 spin_lock_bh(&sc->sc_rxbuflock);
637 if (list_empty(&sc->sc_rxbuf))
638 goto start_recv;
639
640 sc->sc_rxlink = NULL;
641 list_for_each_entry_safe(bf, tbf, &sc->sc_rxbuf, list) {
642 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
643 /* restarting h/w, no need for holding descriptors */
644 bf->bf_status &= ~ATH_BUFSTATUS_STALE;
645 /*
646 * Upper layer may not be done with the frame yet so
647 * we can't just re-queue it to hardware. Remove it
648 * from h/w queue. It'll be re-queued when upper layer
649 * returns the frame and ath_rx_requeue_mpdu is called.
650 */
651 if (!(bf->bf_status & ATH_BUFSTATUS_FREE)) {
652 list_del(&bf->list);
653 continue;
654 }
655 }
656 /* chain descriptors */
657 ath_rx_buf_link(sc, bf);
658 }
659
660 /* We could have deleted elements so the list may be empty now */
661 if (list_empty(&sc->sc_rxbuf))
662 goto start_recv;
663
664 bf = list_first_entry(&sc->sc_rxbuf, struct ath_buf, list);
665 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
666 ath9k_hw_rxena(ah); /* enable recv descriptors */
667
668start_recv:
669 spin_unlock_bh(&sc->sc_rxbuflock);
670 ath_opmode_init(sc); /* set filters, etc. */
671 ath9k_hw_startpcureceive(ah); /* re-enable PCU/DMA engine */
672 return 0;
673}
674
675/* Disable the receive h/w in preparation for a reset. */
676
677bool ath_stoprecv(struct ath_softc *sc)
678{
679 struct ath_hal *ah = sc->sc_ah;
680 u64 tsf;
681 bool stopped;
682
683 ath9k_hw_stoppcurecv(ah); /* disable PCU */
684 ath9k_hw_setrxfilter(ah, 0); /* clear recv filter */
685 stopped = ath9k_hw_stopdmarecv(ah); /* disable DMA engine */
686 mdelay(3); /* 3ms is long enough for 1 frame */
687 tsf = ath9k_hw_gettsf64(ah);
688 sc->sc_rxlink = NULL; /* just in case */
689 return stopped;
690}
691
692/* Flush receive queue */
693
694void ath_flushrecv(struct ath_softc *sc)
695{
696 /*
697 * ath_rx_tasklet may be used to handle rx interrupt and flush receive
698 * queue at the same time. Use a lock to serialize the access of rx
699 * queue.
700 * ath_rx_tasklet cannot hold the spinlock while indicating packets.
701 * Instead, do not claim the spinlock but check for a flush in
702 * progress (see references to sc_rxflush)
703 */
704 spin_lock_bh(&sc->sc_rxflushlock);
705 sc->sc_rxflush = 1;
706
707 ath_rx_tasklet(sc, 1);
708
709 sc->sc_rxflush = 0;
710 spin_unlock_bh(&sc->sc_rxflushlock);
711}
712
713/* Process an individual frame */
714
715int ath_rx_input(struct ath_softc *sc,
716 struct ath_node *an,
717 int is_ampdu,
718 struct sk_buff *skb,
719 struct ath_recv_status *rx_status,
720 enum ATH_RX_TYPE *status)
721{
722 if (is_ampdu && sc->sc_rxaggr) {
723 *status = ATH_RX_CONSUMED;
724 return ath_ampdu_input(sc, an, skb, rx_status);
725 } else {
726 *status = ATH_RX_NON_CONSUMED;
727 return -1;
728 }
729}
730
731/* Process receive queue, as well as LED, etc. */
732
733int ath_rx_tasklet(struct ath_softc *sc, int flush)
734{
735#define PA2DESC(_sc, _pa) \
736 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
737 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
738
739 struct ath_buf *bf, *bf_held = NULL;
740 struct ath_desc *ds;
741 struct ieee80211_hdr *hdr;
742 struct sk_buff *skb = NULL;
743 struct ath_recv_status rx_status;
744 struct ath_hal *ah = sc->sc_ah;
745 int type, rx_processed = 0;
746 u32 phyerr;
747 u8 chainreset = 0;
748 int retval;
749 __le16 fc;
750
751 do {
752 /* If handling rx interrupt and flush is in progress => exit */
753 if (sc->sc_rxflush && (flush == 0))
754 break;
755
756 spin_lock_bh(&sc->sc_rxbuflock);
757 if (list_empty(&sc->sc_rxbuf)) {
758 sc->sc_rxlink = NULL;
759 spin_unlock_bh(&sc->sc_rxbuflock);
760 break;
761 }
762
763 bf = list_first_entry(&sc->sc_rxbuf, struct ath_buf, list);
764
765 /*
766 * There is a race condition that BH gets scheduled after sw
767 * writes RxE and before hw re-load the last descriptor to get
768 * the newly chained one. Software must keep the last DONE
769 * descriptor as a holding descriptor - software does so by
770 * marking it with the STALE flag.
771 */
772 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
773 bf_held = bf;
774 if (list_is_last(&bf_held->list, &sc->sc_rxbuf)) {
775 /*
776 * The holding descriptor is the last
777 * descriptor in queue. It's safe to
778 * remove the last holding descriptor
779 * in BH context.
780 */
781 list_del(&bf_held->list);
782 bf_held->bf_status &= ~ATH_BUFSTATUS_STALE;
783 sc->sc_rxlink = NULL;
784
785 if (bf_held->bf_status & ATH_BUFSTATUS_FREE) {
786 list_add_tail(&bf_held->list,
787 &sc->sc_rxbuf);
788 ath_rx_buf_link(sc, bf_held);
789 }
790 spin_unlock_bh(&sc->sc_rxbuflock);
791 break;
792 }
793 bf = list_entry(bf->list.next, struct ath_buf, list);
794 }
795
796 ds = bf->bf_desc;
797 ++rx_processed;
798
799 /*
800 * Must provide the virtual address of the current
801 * descriptor, the physical address, and the virtual
802 * address of the next descriptor in the h/w chain.
803 * This allows the HAL to look ahead to see if the
804 * hardware is done with a descriptor by checking the
805 * done bit in the following descriptor and the address
806 * of the current descriptor the DMA engine is working
807 * on. All this is necessary because of our use of
808 * a self-linked list to avoid rx overruns.
809 */
810 retval = ath9k_hw_rxprocdesc(ah,
811 ds,
812 bf->bf_daddr,
813 PA2DESC(sc, ds->ds_link),
814 0);
815 if (retval == -EINPROGRESS) {
816 struct ath_buf *tbf;
817 struct ath_desc *tds;
818
819 if (list_is_last(&bf->list, &sc->sc_rxbuf)) {
820 spin_unlock_bh(&sc->sc_rxbuflock);
821 break;
822 }
823
824 tbf = list_entry(bf->list.next, struct ath_buf, list);
825
826 /*
827 * On some hardware the descriptor status words could
828 * get corrupted, including the done bit. Because of
829 * this, check if the next descriptor's done bit is
830 * set or not.
831 *
832 * If the next descriptor's done bit is set, the current
833 * descriptor has been corrupted. Force s/w to discard
834 * this descriptor and continue...
835 */
836
837 tds = tbf->bf_desc;
838 retval = ath9k_hw_rxprocdesc(ah,
839 tds, tbf->bf_daddr,
840 PA2DESC(sc, tds->ds_link), 0);
841 if (retval == -EINPROGRESS) {
842 spin_unlock_bh(&sc->sc_rxbuflock);
843 break;
844 }
845 }
846
847 /* XXX: we do not support frames spanning
848 * multiple descriptors */
849 bf->bf_status |= ATH_BUFSTATUS_DONE;
850
851 skb = bf->bf_mpdu;
852 if (skb == NULL) { /* XXX ??? can this happen */
853 spin_unlock_bh(&sc->sc_rxbuflock);
854 continue;
855 }
856 /*
857 * Now we know it's a completed frame, we can indicate the
858 * frame. Remove the previous holding descriptor and leave
859 * this one in the queue as the new holding descriptor.
860 */
861 if (bf_held) {
862 list_del(&bf_held->list);
863 bf_held->bf_status &= ~ATH_BUFSTATUS_STALE;
864 if (bf_held->bf_status & ATH_BUFSTATUS_FREE) {
865 list_add_tail(&bf_held->list, &sc->sc_rxbuf);
866 /* try to requeue this descriptor */
867 ath_rx_buf_link(sc, bf_held);
868 }
869 }
870
871 bf->bf_status |= ATH_BUFSTATUS_STALE;
872 bf_held = bf;
873 /*
874 * Release the lock here in case ieee80211_input() return
875 * the frame immediately by calling ath_rx_mpdu_requeue().
876 */
877 spin_unlock_bh(&sc->sc_rxbuflock);
878
879 if (flush) {
880 /*
881 * If we're asked to flush receive queue, directly
882 * chain it back at the queue without processing it.
883 */
884 goto rx_next;
885 }
886
887 hdr = (struct ieee80211_hdr *)skb->data;
888 fc = hdr->frame_control;
889 memzero(&rx_status, sizeof(struct ath_recv_status));
890
891 if (ds->ds_rxstat.rs_more) {
892 /*
893 * Frame spans multiple descriptors; this
894 * cannot happen yet as we don't support
895 * jumbograms. If not in monitor mode,
896 * discard the frame.
897 */
898#ifndef ERROR_FRAMES
899 /*
900 * Enable this if you want to see
901 * error frames in Monitor mode.
902 */
903 if (sc->sc_opmode != ATH9K_M_MONITOR)
904 goto rx_next;
905#endif
906 /* fall thru for monitor mode handling... */
907 } else if (ds->ds_rxstat.rs_status != 0) {
908 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_CRC)
909 rx_status.flags |= ATH_RX_FCS_ERROR;
910 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_PHY) {
911 phyerr = ds->ds_rxstat.rs_phyerr & 0x1f;
912 goto rx_next;
913 }
914
915 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_DECRYPT) {
916 /*
917 * Decrypt error. We only mark packet status
918 * here and always push up the frame up to let
919 * mac80211 handle the actual error case, be
920 * it no decryption key or real decryption
921 * error. This let us keep statistics there.
922 */
923 rx_status.flags |= ATH_RX_DECRYPT_ERROR;
924 } else if (ds->ds_rxstat.rs_status & ATH9K_RXERR_MIC) {
925 /*
926 * Demic error. We only mark frame status here
927 * and always push up the frame up to let
928 * mac80211 handle the actual error case. This
929 * let us keep statistics there. Hardware may
930 * post a false-positive MIC error.
931 */
932 if (ieee80211_is_ctl(fc))
933 /*
934 * Sometimes, we get invalid
935 * MIC failures on valid control frames.
936 * Remove these mic errors.
937 */
938 ds->ds_rxstat.rs_status &=
939 ~ATH9K_RXERR_MIC;
940 else
941 rx_status.flags |= ATH_RX_MIC_ERROR;
942 }
943 /*
944 * Reject error frames with the exception of
945 * decryption and MIC failures. For monitor mode,
946 * we also ignore the CRC error.
947 */
948 if (sc->sc_opmode == ATH9K_M_MONITOR) {
949 if (ds->ds_rxstat.rs_status &
950 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
951 ATH9K_RXERR_CRC))
952 goto rx_next;
953 } else {
954 if (ds->ds_rxstat.rs_status &
955 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
956 goto rx_next;
957 }
958 }
959 }
960 /*
961 * The status portion of the descriptor could get corrupted.
962 */
963 if (sc->sc_rxbufsize < ds->ds_rxstat.rs_datalen)
964 goto rx_next;
965 /*
966 * Sync and unmap the frame. At this point we're
967 * committed to passing the sk_buff somewhere so
968 * clear buf_skb; this means a new sk_buff must be
969 * allocated when the rx descriptor is setup again
970 * to receive another frame.
971 */
972 skb_put(skb, ds->ds_rxstat.rs_datalen);
973 skb->protocol = cpu_to_be16(ETH_P_CONTROL);
974 rx_status.tsf = ath_extend_tsf(sc, ds->ds_rxstat.rs_tstamp);
975 rx_status.rateieee =
976 sc->sc_hwmap[ds->ds_rxstat.rs_rate].ieeerate;
977 rx_status.rateKbps =
978 sc->sc_hwmap[ds->ds_rxstat.rs_rate].rateKbps;
979 rx_status.ratecode = ds->ds_rxstat.rs_rate;
980
981 /* HT rate */
982 if (rx_status.ratecode & 0x80) {
983 /* TODO - add table to avoid division */
984 if (ds->ds_rxstat.rs_flags & ATH9K_RX_2040) {
985 rx_status.flags |= ATH_RX_40MHZ;
986 rx_status.rateKbps =
987 (rx_status.rateKbps * 27) / 13;
988 }
989 if (ds->ds_rxstat.rs_flags & ATH9K_RX_GI)
990 rx_status.rateKbps =
991 (rx_status.rateKbps * 10) / 9;
992 else
993 rx_status.flags |= ATH_RX_SHORT_GI;
994 }
995
996 /* sc->sc_noise_floor is only available when the station
997 attaches to an AP, so we use a default value
998 if we are not yet attached. */
999
1000 /* XXX we should use either sc->sc_noise_floor or
1001 * ath_hal_getChanNoise(ah, &sc->sc_curchan)
1002 * to calculate the noise floor.
1003 * However, the value returned by ath_hal_getChanNoise
1004 * seems to be incorrect (-31dBm on the last test),
1005 * so we will use a hard-coded value until we
1006 * figure out what is going on.
1007 */
1008 rx_status.abs_rssi =
1009 ds->ds_rxstat.rs_rssi + ATH_DEFAULT_NOISE_FLOOR;
1010
1011 pci_dma_sync_single_for_cpu(sc->pdev,
1012 bf->bf_buf_addr,
1013 skb_tailroom(skb),
1014 PCI_DMA_FROMDEVICE);
1015 pci_unmap_single(sc->pdev,
1016 bf->bf_buf_addr,
1017 sc->sc_rxbufsize,
1018 PCI_DMA_FROMDEVICE);
1019
1020 /* XXX: Ah! make me more readable, use a helper */
1021 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) {
1022 if (ds->ds_rxstat.rs_moreaggr == 0) {
1023 rx_status.rssictl[0] =
1024 ds->ds_rxstat.rs_rssi_ctl0;
1025 rx_status.rssictl[1] =
1026 ds->ds_rxstat.rs_rssi_ctl1;
1027 rx_status.rssictl[2] =
1028 ds->ds_rxstat.rs_rssi_ctl2;
1029 rx_status.rssi = ds->ds_rxstat.rs_rssi;
1030 if (ds->ds_rxstat.rs_flags & ATH9K_RX_2040) {
1031 rx_status.rssiextn[0] =
1032 ds->ds_rxstat.rs_rssi_ext0;
1033 rx_status.rssiextn[1] =
1034 ds->ds_rxstat.rs_rssi_ext1;
1035 rx_status.rssiextn[2] =
1036 ds->ds_rxstat.rs_rssi_ext2;
1037 rx_status.flags |=
1038 ATH_RX_RSSI_EXTN_VALID;
1039 }
1040 rx_status.flags |= ATH_RX_RSSI_VALID |
1041 ATH_RX_CHAIN_RSSI_VALID;
1042 }
1043 } else {
1044 /*
1045 * Need to insert the "combined" rssi into the
1046 * status structure for upper layer processing
1047 */
1048 rx_status.rssi = ds->ds_rxstat.rs_rssi;
1049 rx_status.flags |= ATH_RX_RSSI_VALID;
1050 }
1051
1052 /* Pass frames up to the stack. */
1053
1054 type = ath_rx_indicate(sc, skb,
1055 &rx_status, ds->ds_rxstat.rs_keyix);
1056
1057 /*
1058 * change the default rx antenna if rx diversity chooses the
1059 * other antenna 3 times in a row.
1060 */
1061 if (sc->sc_defant != ds->ds_rxstat.rs_antenna) {
1062 if (++sc->sc_rxotherant >= 3)
1063 ath_setdefantenna(sc,
1064 ds->ds_rxstat.rs_antenna);
1065 } else {
1066 sc->sc_rxotherant = 0;
1067 }
1068
1069#ifdef CONFIG_SLOW_ANT_DIV
1070 if ((rx_status.flags & ATH_RX_RSSI_VALID) &&
1071 ieee80211_is_beacon(fc)) {
1072 ath_slow_ant_div(&sc->sc_antdiv, hdr, &ds->ds_rxstat);
1073 }
1074#endif
1075 /*
1076 * For frames successfully indicated, the buffer will be
1077 * returned to us by upper layers by calling
1078 * ath_rx_mpdu_requeue, either synchronusly or asynchronously.
1079 * So we don't want to do it here in this loop.
1080 */
1081 continue;
1082
1083rx_next:
1084 bf->bf_status |= ATH_BUFSTATUS_FREE;
1085 } while (TRUE);
1086
1087 if (chainreset) {
1088 DPRINTF(sc, ATH_DBG_CONFIG,
1089 "%s: Reset rx chain mask. "
1090 "Do internal reset\n", __func__);
1091 ASSERT(flush == 0);
1092 ath_internal_reset(sc);
1093 }
1094
1095 return 0;
1096#undef PA2DESC
1097}
1098
1099/* Process ADDBA request in per-TID data structure */
1100
1101int ath_rx_aggr_start(struct ath_softc *sc,
1102 const u8 *addr,
1103 u16 tid,
1104 u16 *ssn)
1105{
1106 struct ath_arx_tid *rxtid;
1107 struct ath_node *an;
1108 struct ieee80211_hw *hw = sc->hw;
1109 struct ieee80211_supported_band *sband;
1110 u16 buffersize = 0;
1111
1112 spin_lock_bh(&sc->node_lock);
1113 an = ath_node_find(sc, (u8 *) addr);
1114 spin_unlock_bh(&sc->node_lock);
1115
1116 if (!an) {
1117 DPRINTF(sc, ATH_DBG_AGGR,
1118 "%s: Node not found to initialize RX aggregation\n",
1119 __func__);
1120 return -1;
1121 }
1122
1123 sband = hw->wiphy->bands[hw->conf.channel->band];
1124 buffersize = IEEE80211_MIN_AMPDU_BUF <<
1125 sband->ht_info.ampdu_factor; /* FIXME */
1126
1127 rxtid = &an->an_aggr.rx.tid[tid];
1128
1129 spin_lock_bh(&rxtid->tidlock);
1130 if (sc->sc_rxaggr) {
1131 /* Allow aggregation reception
1132 * Adjust rx BA window size. Peer might indicate a
1133 * zero buffer size for a _dont_care_ condition.
1134 */
1135 if (buffersize)
1136 rxtid->baw_size = min(buffersize, rxtid->baw_size);
1137
1138 /* set rx sequence number */
1139 rxtid->seq_next = *ssn;
1140
1141 /* Allocate the receive buffers for this TID */
1142 DPRINTF(sc, ATH_DBG_AGGR,
1143 "%s: Allcating rxbuffer for TID %d\n", __func__, tid);
1144
1145 if (rxtid->rxbuf == NULL) {
1146 /*
1147 * If the rxbuff is not NULL at this point, we *probably*
1148 * already allocated the buffer on a previous ADDBA,
1149 * and this is a subsequent ADDBA that got through.
1150 * Don't allocate, but use the value in the pointer,
1151 * we zero it out when we de-allocate.
1152 */
1153 rxtid->rxbuf = kmalloc(ATH_TID_MAX_BUFS *
1154 sizeof(struct ath_rxbuf), GFP_ATOMIC);
1155 }
1156 if (rxtid->rxbuf == NULL) {
1157 DPRINTF(sc, ATH_DBG_AGGR,
1158 "%s: Unable to allocate RX buffer, "
1159 "refusing ADDBA\n", __func__);
1160 } else {
1161 /* Ensure the memory is zeroed out (all internal
1162 * pointers are null) */
1163 memzero(rxtid->rxbuf, ATH_TID_MAX_BUFS *
1164 sizeof(struct ath_rxbuf));
1165 DPRINTF(sc, ATH_DBG_AGGR,
1166 "%s: Allocated @%p\n", __func__, rxtid->rxbuf);
1167
1168 /* Allow aggregation reception */
1169 rxtid->addba_exchangecomplete = 1;
1170 }
1171 }
1172 spin_unlock_bh(&rxtid->tidlock);
1173
1174 return 0;
1175}
1176
1177/* Process DELBA */
1178
1179int ath_rx_aggr_stop(struct ath_softc *sc,
1180 const u8 *addr,
1181 u16 tid)
1182{
1183 struct ath_node *an;
1184
1185 spin_lock_bh(&sc->node_lock);
1186 an = ath_node_find(sc, (u8 *) addr);
1187 spin_unlock_bh(&sc->node_lock);
1188
1189 if (!an) {
1190 DPRINTF(sc, ATH_DBG_AGGR,
1191 "%s: RX aggr stop for non-existent node\n", __func__);
1192 return -1;
1193 }
1194
1195 ath_rx_aggr_teardown(sc, an, tid);
1196 return 0;
1197}
1198
1199/* Rx aggregation tear down */
1200
1201void ath_rx_aggr_teardown(struct ath_softc *sc,
1202 struct ath_node *an, u8 tid)
1203{
1204 struct ath_arx_tid *rxtid = &an->an_aggr.rx.tid[tid];
1205
1206 if (!rxtid->addba_exchangecomplete)
1207 return;
1208
1209 del_timer_sync(&rxtid->timer);
1210 ath_rx_flush_tid(sc, rxtid, 0);
1211 rxtid->addba_exchangecomplete = 0;
1212
1213 /* De-allocate the receive buffer array allocated when addba started */
1214
1215 if (rxtid->rxbuf) {
1216 DPRINTF(sc, ATH_DBG_AGGR,
1217 "%s: Deallocating TID %d rxbuff @%p\n",
1218 __func__, tid, rxtid->rxbuf);
1219 kfree(rxtid->rxbuf);
1220
1221 /* Set pointer to null to avoid reuse*/
1222 rxtid->rxbuf = NULL;
1223 }
1224}
1225
1226/* Initialize per-node receive state */
1227
1228void ath_rx_node_init(struct ath_softc *sc, struct ath_node *an)
1229{
1230 if (sc->sc_rxaggr) {
1231 struct ath_arx_tid *rxtid;
1232 int tidno;
1233
1234 /* Init per tid rx state */
1235 for (tidno = 0, rxtid = &an->an_aggr.rx.tid[tidno];
1236 tidno < WME_NUM_TID;
1237 tidno++, rxtid++) {
1238 rxtid->an = an;
1239 rxtid->seq_reset = 1;
1240 rxtid->seq_next = 0;
1241 rxtid->baw_size = WME_MAX_BA;
1242 rxtid->baw_head = rxtid->baw_tail = 0;
1243
1244 /*
1245 * Ensure the buffer pointer is null at this point
1246 * (needs to be allocated when addba is received)
1247 */
1248
1249 rxtid->rxbuf = NULL;
1250 setup_timer(&rxtid->timer, ath_rx_timer,
1251 (unsigned long)rxtid);
1252 spin_lock_init(&rxtid->tidlock);
1253
1254 /* ADDBA state */
1255 rxtid->addba_exchangecomplete = 0;
1256 }
1257 }
1258}
1259
1260void ath_rx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
1261{
1262 if (sc->sc_rxaggr) {
1263 struct ath_arx_tid *rxtid;
1264 int tidno, i;
1265
1266 /* Init per tid rx state */
1267 for (tidno = 0, rxtid = &an->an_aggr.rx.tid[tidno];
1268 tidno < WME_NUM_TID;
1269 tidno++, rxtid++) {
1270
1271 if (!rxtid->addba_exchangecomplete)
1272 continue;
1273
1274 /* must cancel timer first */
1275 del_timer_sync(&rxtid->timer);
1276
1277 /* drop any pending sub-frames */
1278 ath_rx_flush_tid(sc, rxtid, 1);
1279
1280 for (i = 0; i < ATH_TID_MAX_BUFS; i++)
1281 ASSERT(rxtid->rxbuf[i].rx_wbuf == NULL);
1282
1283 rxtid->addba_exchangecomplete = 0;
1284 }
1285 }
1286
1287}
1288
1289/* Cleanup per-node receive state */
1290
1291void ath_rx_node_free(struct ath_softc *sc, struct ath_node *an)
1292{
1293 ath_rx_node_cleanup(sc, an);
1294}
1295
1296dma_addr_t ath_skb_map_single(struct ath_softc *sc,
1297 struct sk_buff *skb,
1298 int direction,
1299 dma_addr_t *pa)
1300{
1301 /*
1302 * NB: do NOT use skb->len, which is 0 on initialization.
1303 * Use skb's entire data area instead.
1304 */
1305 *pa = pci_map_single(sc->pdev, skb->data,
1306 skb_end_pointer(skb) - skb->head, direction);
1307 return *pa;
1308}
1309
1310void ath_skb_unmap_single(struct ath_softc *sc,
1311 struct sk_buff *skb,
1312 int direction,
1313 dma_addr_t *pa)
1314{
1315 /* Unmap skb's entire data area */
1316 pci_unmap_single(sc->pdev, *pa,
1317 skb_end_pointer(skb) - skb->head, direction);
1318}
diff --git a/drivers/net/wireless/ath9k/reg.h b/drivers/net/wireless/ath9k/reg.h
new file mode 100644
index 000000000000..42b0890a4685
--- /dev/null
+++ b/drivers/net/wireless/ath9k/reg.h
@@ -0,0 +1,1385 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef REG_H
18#define REG_H
19
20#define AR_CR 0x0008
21#define AR_CR_RXE 0x00000004
22#define AR_CR_RXD 0x00000020
23#define AR_CR_SWI 0x00000040
24
25#define AR_RXDP 0x000C
26
27#define AR_CFG 0x0014
28#define AR_CFG_SWTD 0x00000001
29#define AR_CFG_SWTB 0x00000002
30#define AR_CFG_SWRD 0x00000004
31#define AR_CFG_SWRB 0x00000008
32#define AR_CFG_SWRG 0x00000010
33#define AR_CFG_AP_ADHOC_INDICATION 0x00000020
34#define AR_CFG_PHOK 0x00000100
35#define AR_CFG_CLK_GATE_DIS 0x00000400
36#define AR_CFG_EEBS 0x00000200
37#define AR_CFG_PCI_MASTER_REQ_Q_THRESH 0x00060000
38#define AR_CFG_PCI_MASTER_REQ_Q_THRESH_S 17
39
40#define AR_MIRT 0x0020
41#define AR_MIRT_VAL 0x0000ffff
42#define AR_MIRT_VAL_S 16
43
44#define AR_IER 0x0024
45#define AR_IER_ENABLE 0x00000001
46#define AR_IER_DISABLE 0x00000000
47
48#define AR_TIMT 0x0028
49#define AR_TIMT_LAST 0x0000ffff
50#define AR_TIMT_LAST_S 0
51#define AR_TIMT_FIRST 0xffff0000
52#define AR_TIMT_FIRST_S 16
53
54#define AR_RIMT 0x002C
55#define AR_RIMT_LAST 0x0000ffff
56#define AR_RIMT_LAST_S 0
57#define AR_RIMT_FIRST 0xffff0000
58#define AR_RIMT_FIRST_S 16
59
60#define AR_DMASIZE_4B 0x00000000
61#define AR_DMASIZE_8B 0x00000001
62#define AR_DMASIZE_16B 0x00000002
63#define AR_DMASIZE_32B 0x00000003
64#define AR_DMASIZE_64B 0x00000004
65#define AR_DMASIZE_128B 0x00000005
66#define AR_DMASIZE_256B 0x00000006
67#define AR_DMASIZE_512B 0x00000007
68
69#define AR_TXCFG 0x0030
70#define AR_TXCFG_DMASZ_MASK 0x00000003
71#define AR_TXCFG_DMASZ_4B 0
72#define AR_TXCFG_DMASZ_8B 1
73#define AR_TXCFG_DMASZ_16B 2
74#define AR_TXCFG_DMASZ_32B 3
75#define AR_TXCFG_DMASZ_64B 4
76#define AR_TXCFG_DMASZ_128B 5
77#define AR_TXCFG_DMASZ_256B 6
78#define AR_TXCFG_DMASZ_512B 7
79#define AR_FTRIG 0x000003F0
80#define AR_FTRIG_S 4
81#define AR_FTRIG_IMMED 0x00000000
82#define AR_FTRIG_64B 0x00000010
83#define AR_FTRIG_128B 0x00000020
84#define AR_FTRIG_192B 0x00000030
85#define AR_FTRIG_256B 0x00000040
86#define AR_FTRIG_512B 0x00000080
87#define AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY 0x00000800
88
89#define AR_RXCFG 0x0034
90#define AR_RXCFG_CHIRP 0x00000008
91#define AR_RXCFG_ZLFDMA 0x00000010
92#define AR_RXCFG_DMASZ_MASK 0x00000007
93#define AR_RXCFG_DMASZ_4B 0
94#define AR_RXCFG_DMASZ_8B 1
95#define AR_RXCFG_DMASZ_16B 2
96#define AR_RXCFG_DMASZ_32B 3
97#define AR_RXCFG_DMASZ_64B 4
98#define AR_RXCFG_DMASZ_128B 5
99#define AR_RXCFG_DMASZ_256B 6
100#define AR_RXCFG_DMASZ_512B 7
101
102#define AR_MIBC 0x0040
103#define AR_MIBC_COW 0x00000001
104#define AR_MIBC_FMC 0x00000002
105#define AR_MIBC_CMC 0x00000004
106#define AR_MIBC_MCS 0x00000008
107
108#define AR_TOPS 0x0044
109#define AR_TOPS_MASK 0x0000FFFF
110
111#define AR_RXNPTO 0x0048
112#define AR_RXNPTO_MASK 0x000003FF
113
114#define AR_TXNPTO 0x004C
115#define AR_TXNPTO_MASK 0x000003FF
116#define AR_TXNPTO_QCU_MASK 0x000FFC00
117
118#define AR_RPGTO 0x0050
119#define AR_RPGTO_MASK 0x000003FF
120
121#define AR_RPCNT 0x0054
122#define AR_RPCNT_MASK 0x0000001F
123
124#define AR_MACMISC 0x0058
125#define AR_MACMISC_PCI_EXT_FORCE 0x00000010
126#define AR_MACMISC_DMA_OBS 0x000001E0
127#define AR_MACMISC_DMA_OBS_S 5
128#define AR_MACMISC_DMA_OBS_LINE_0 0
129#define AR_MACMISC_DMA_OBS_LINE_1 1
130#define AR_MACMISC_DMA_OBS_LINE_2 2
131#define AR_MACMISC_DMA_OBS_LINE_3 3
132#define AR_MACMISC_DMA_OBS_LINE_4 4
133#define AR_MACMISC_DMA_OBS_LINE_5 5
134#define AR_MACMISC_DMA_OBS_LINE_6 6
135#define AR_MACMISC_DMA_OBS_LINE_7 7
136#define AR_MACMISC_DMA_OBS_LINE_8 8
137#define AR_MACMISC_MISC_OBS 0x00000E00
138#define AR_MACMISC_MISC_OBS_S 9
139#define AR_MACMISC_MISC_OBS_BUS_LSB 0x00007000
140#define AR_MACMISC_MISC_OBS_BUS_LSB_S 12
141#define AR_MACMISC_MISC_OBS_BUS_MSB 0x00038000
142#define AR_MACMISC_MISC_OBS_BUS_MSB_S 15
143#define AR_MACMISC_MISC_OBS_BUS_1 1
144
145#define AR_GTXTO 0x0064
146#define AR_GTXTO_TIMEOUT_COUNTER 0x0000FFFF
147#define AR_GTXTO_TIMEOUT_LIMIT 0xFFFF0000
148#define AR_GTXTO_TIMEOUT_LIMIT_S 16
149
150#define AR_GTTM 0x0068
151#define AR_GTTM_USEC 0x00000001
152#define AR_GTTM_IGNORE_IDLE 0x00000002
153#define AR_GTTM_RESET_IDLE 0x00000004
154#define AR_GTTM_CST_USEC 0x00000008
155
156#define AR_CST 0x006C
157#define AR_CST_TIMEOUT_COUNTER 0x0000FFFF
158#define AR_CST_TIMEOUT_LIMIT 0xFFFF0000
159#define AR_CST_TIMEOUT_LIMIT_S 16
160
161#define AR_SREV_VERSION_9100 0x014
162
163#define AR_SREV_5416_V20_OR_LATER(_ah) \
164 (AR_SREV_9100((_ah)) || AR_SREV_5416_20_OR_LATER(_ah))
165#define AR_SREV_5416_V22_OR_LATER(_ah) \
166 (AR_SREV_9100((_ah)) || AR_SREV_5416_22_OR_LATER(_ah))
167
168#define AR_ISR 0x0080
169#define AR_ISR_RXOK 0x00000001
170#define AR_ISR_RXDESC 0x00000002
171#define AR_ISR_RXERR 0x00000004
172#define AR_ISR_RXNOPKT 0x00000008
173#define AR_ISR_RXEOL 0x00000010
174#define AR_ISR_RXORN 0x00000020
175#define AR_ISR_TXOK 0x00000040
176#define AR_ISR_TXDESC 0x00000080
177#define AR_ISR_TXERR 0x00000100
178#define AR_ISR_TXNOPKT 0x00000200
179#define AR_ISR_TXEOL 0x00000400
180#define AR_ISR_TXURN 0x00000800
181#define AR_ISR_MIB 0x00001000
182#define AR_ISR_SWI 0x00002000
183#define AR_ISR_RXPHY 0x00004000
184#define AR_ISR_RXKCM 0x00008000
185#define AR_ISR_SWBA 0x00010000
186#define AR_ISR_BRSSI 0x00020000
187#define AR_ISR_BMISS 0x00040000
188#define AR_ISR_BNR 0x00100000
189#define AR_ISR_RXCHIRP 0x00200000
190#define AR_ISR_BCNMISC 0x00800000
191#define AR_ISR_TIM 0x00800000
192#define AR_ISR_QCBROVF 0x02000000
193#define AR_ISR_QCBRURN 0x04000000
194#define AR_ISR_QTRIG 0x08000000
195#define AR_ISR_GENTMR 0x10000000
196
197#define AR_ISR_TXMINTR 0x00080000
198#define AR_ISR_RXMINTR 0x01000000
199#define AR_ISR_TXINTM 0x40000000
200#define AR_ISR_RXINTM 0x80000000
201
202#define AR_ISR_S0 0x0084
203#define AR_ISR_S0_QCU_TXOK 0x000003FF
204#define AR_ISR_S0_QCU_TXOK_S 0
205#define AR_ISR_S0_QCU_TXDESC 0x03FF0000
206#define AR_ISR_S0_QCU_TXDESC_S 16
207
208#define AR_ISR_S1 0x0088
209#define AR_ISR_S1_QCU_TXERR 0x000003FF
210#define AR_ISR_S1_QCU_TXERR_S 0
211#define AR_ISR_S1_QCU_TXEOL 0x03FF0000
212#define AR_ISR_S1_QCU_TXEOL_S 16
213
214#define AR_ISR_S2 0x008c
215#define AR_ISR_S2_QCU_TXURN 0x000003FF
216#define AR_ISR_S2_CST 0x00400000
217#define AR_ISR_S2_GTT 0x00800000
218#define AR_ISR_S2_TIM 0x01000000
219#define AR_ISR_S2_CABEND 0x02000000
220#define AR_ISR_S2_DTIMSYNC 0x04000000
221#define AR_ISR_S2_BCNTO 0x08000000
222#define AR_ISR_S2_CABTO 0x10000000
223#define AR_ISR_S2_DTIM 0x20000000
224#define AR_ISR_S2_TSFOOR 0x40000000
225#define AR_ISR_S2_TBTT_TIME 0x80000000
226
227#define AR_ISR_S3 0x0090
228#define AR_ISR_S3_QCU_QCBROVF 0x000003FF
229#define AR_ISR_S3_QCU_QCBRURN 0x03FF0000
230
231#define AR_ISR_S4 0x0094
232#define AR_ISR_S4_QCU_QTRIG 0x000003FF
233#define AR_ISR_S4_RESV0 0xFFFFFC00
234
235#define AR_ISR_S5 0x0098
236#define AR_ISR_S5_TIMER_TRIG 0x000000FF
237#define AR_ISR_S5_TIMER_THRESH 0x0007FE00
238#define AR_ISR_S5_TIM_TIMER 0x00000010
239#define AR_ISR_S5_DTIM_TIMER 0x00000020
240#define AR_ISR_S5_S 0x00d8
241#define AR_IMR_S5 0x00b8
242#define AR_IMR_S5_TIM_TIMER 0x00000010
243#define AR_IMR_S5_DTIM_TIMER 0x00000020
244
245
246#define AR_IMR 0x00a0
247#define AR_IMR_RXOK 0x00000001
248#define AR_IMR_RXDESC 0x00000002
249#define AR_IMR_RXERR 0x00000004
250#define AR_IMR_RXNOPKT 0x00000008
251#define AR_IMR_RXEOL 0x00000010
252#define AR_IMR_RXORN 0x00000020
253#define AR_IMR_TXOK 0x00000040
254#define AR_IMR_TXDESC 0x00000080
255#define AR_IMR_TXERR 0x00000100
256#define AR_IMR_TXNOPKT 0x00000200
257#define AR_IMR_TXEOL 0x00000400
258#define AR_IMR_TXURN 0x00000800
259#define AR_IMR_MIB 0x00001000
260#define AR_IMR_SWI 0x00002000
261#define AR_IMR_RXPHY 0x00004000
262#define AR_IMR_RXKCM 0x00008000
263#define AR_IMR_SWBA 0x00010000
264#define AR_IMR_BRSSI 0x00020000
265#define AR_IMR_BMISS 0x00040000
266#define AR_IMR_BNR 0x00100000
267#define AR_IMR_RXCHIRP 0x00200000
268#define AR_IMR_BCNMISC 0x00800000
269#define AR_IMR_TIM 0x00800000
270#define AR_IMR_QCBROVF 0x02000000
271#define AR_IMR_QCBRURN 0x04000000
272#define AR_IMR_QTRIG 0x08000000
273#define AR_IMR_GENTMR 0x10000000
274
275#define AR_IMR_TXMINTR 0x00080000
276#define AR_IMR_RXMINTR 0x01000000
277#define AR_IMR_TXINTM 0x40000000
278#define AR_IMR_RXINTM 0x80000000
279
280#define AR_IMR_S0 0x00a4
281#define AR_IMR_S0_QCU_TXOK 0x000003FF
282#define AR_IMR_S0_QCU_TXOK_S 0
283#define AR_IMR_S0_QCU_TXDESC 0x03FF0000
284#define AR_IMR_S0_QCU_TXDESC_S 16
285
286#define AR_IMR_S1 0x00a8
287#define AR_IMR_S1_QCU_TXERR 0x000003FF
288#define AR_IMR_S1_QCU_TXERR_S 0
289#define AR_IMR_S1_QCU_TXEOL 0x03FF0000
290#define AR_IMR_S1_QCU_TXEOL_S 16
291
292#define AR_IMR_S2 0x00ac
293#define AR_IMR_S2_QCU_TXURN 0x000003FF
294#define AR_IMR_S2_QCU_TXURN_S 0
295#define AR_IMR_S2_CST 0x00400000
296#define AR_IMR_S2_GTT 0x00800000
297#define AR_IMR_S2_TIM 0x01000000
298#define AR_IMR_S2_CABEND 0x02000000
299#define AR_IMR_S2_DTIMSYNC 0x04000000
300#define AR_IMR_S2_BCNTO 0x08000000
301#define AR_IMR_S2_CABTO 0x10000000
302#define AR_IMR_S2_DTIM 0x20000000
303#define AR_IMR_S2_TSFOOR 0x40000000
304
305#define AR_IMR_S3 0x00b0
306#define AR_IMR_S3_QCU_QCBROVF 0x000003FF
307#define AR_IMR_S3_QCU_QCBRURN 0x03FF0000
308#define AR_IMR_S3_QCU_QCBRURN_S 16
309
310#define AR_IMR_S4 0x00b4
311#define AR_IMR_S4_QCU_QTRIG 0x000003FF
312#define AR_IMR_S4_RESV0 0xFFFFFC00
313
314#define AR_IMR_S5 0x00b8
315#define AR_IMR_S5_TIMER_TRIG 0x000000FF
316#define AR_IMR_S5_TIMER_THRESH 0x0000FF00
317
318
319#define AR_ISR_RAC 0x00c0
320#define AR_ISR_S0_S 0x00c4
321#define AR_ISR_S0_QCU_TXOK 0x000003FF
322#define AR_ISR_S0_QCU_TXOK_S 0
323#define AR_ISR_S0_QCU_TXDESC 0x03FF0000
324#define AR_ISR_S0_QCU_TXDESC_S 16
325
326#define AR_ISR_S1_S 0x00c8
327#define AR_ISR_S1_QCU_TXERR 0x000003FF
328#define AR_ISR_S1_QCU_TXERR_S 0
329#define AR_ISR_S1_QCU_TXEOL 0x03FF0000
330#define AR_ISR_S1_QCU_TXEOL_S 16
331
332#define AR_ISR_S2_S 0x00cc
333#define AR_ISR_S3_S 0x00d0
334#define AR_ISR_S4_S 0x00d4
335#define AR_ISR_S5_S 0x00d8
336#define AR_DMADBG_0 0x00e0
337#define AR_DMADBG_1 0x00e4
338#define AR_DMADBG_2 0x00e8
339#define AR_DMADBG_3 0x00ec
340#define AR_DMADBG_4 0x00f0
341#define AR_DMADBG_5 0x00f4
342#define AR_DMADBG_6 0x00f8
343#define AR_DMADBG_7 0x00fc
344
345#define AR_NUM_QCU 10
346#define AR_QCU_0 0x0001
347#define AR_QCU_1 0x0002
348#define AR_QCU_2 0x0004
349#define AR_QCU_3 0x0008
350#define AR_QCU_4 0x0010
351#define AR_QCU_5 0x0020
352#define AR_QCU_6 0x0040
353#define AR_QCU_7 0x0080
354#define AR_QCU_8 0x0100
355#define AR_QCU_9 0x0200
356
357#define AR_Q0_TXDP 0x0800
358#define AR_Q1_TXDP 0x0804
359#define AR_Q2_TXDP 0x0808
360#define AR_Q3_TXDP 0x080c
361#define AR_Q4_TXDP 0x0810
362#define AR_Q5_TXDP 0x0814
363#define AR_Q6_TXDP 0x0818
364#define AR_Q7_TXDP 0x081c
365#define AR_Q8_TXDP 0x0820
366#define AR_Q9_TXDP 0x0824
367#define AR_QTXDP(_i) (AR_Q0_TXDP + ((_i)<<2))
368
369#define AR_Q_TXE 0x0840
370#define AR_Q_TXE_M 0x000003FF
371
372#define AR_Q_TXD 0x0880
373#define AR_Q_TXD_M 0x000003FF
374
375#define AR_Q0_CBRCFG 0x08c0
376#define AR_Q1_CBRCFG 0x08c4
377#define AR_Q2_CBRCFG 0x08c8
378#define AR_Q3_CBRCFG 0x08cc
379#define AR_Q4_CBRCFG 0x08d0
380#define AR_Q5_CBRCFG 0x08d4
381#define AR_Q6_CBRCFG 0x08d8
382#define AR_Q7_CBRCFG 0x08dc
383#define AR_Q8_CBRCFG 0x08e0
384#define AR_Q9_CBRCFG 0x08e4
385#define AR_QCBRCFG(_i) (AR_Q0_CBRCFG + ((_i)<<2))
386#define AR_Q_CBRCFG_INTERVAL 0x00FFFFFF
387#define AR_Q_CBRCFG_INTERVAL_S 0
388#define AR_Q_CBRCFG_OVF_THRESH 0xFF000000
389#define AR_Q_CBRCFG_OVF_THRESH_S 24
390
391#define AR_Q0_RDYTIMECFG 0x0900
392#define AR_Q1_RDYTIMECFG 0x0904
393#define AR_Q2_RDYTIMECFG 0x0908
394#define AR_Q3_RDYTIMECFG 0x090c
395#define AR_Q4_RDYTIMECFG 0x0910
396#define AR_Q5_RDYTIMECFG 0x0914
397#define AR_Q6_RDYTIMECFG 0x0918
398#define AR_Q7_RDYTIMECFG 0x091c
399#define AR_Q8_RDYTIMECFG 0x0920
400#define AR_Q9_RDYTIMECFG 0x0924
401#define AR_QRDYTIMECFG(_i) (AR_Q0_RDYTIMECFG + ((_i)<<2))
402#define AR_Q_RDYTIMECFG_DURATION 0x00FFFFFF
403#define AR_Q_RDYTIMECFG_DURATION_S 0
404#define AR_Q_RDYTIMECFG_EN 0x01000000
405
406#define AR_Q_ONESHOTARM_SC 0x0940
407#define AR_Q_ONESHOTARM_SC_M 0x000003FF
408#define AR_Q_ONESHOTARM_SC_RESV0 0xFFFFFC00
409
410#define AR_Q_ONESHOTARM_CC 0x0980
411#define AR_Q_ONESHOTARM_CC_M 0x000003FF
412#define AR_Q_ONESHOTARM_CC_RESV0 0xFFFFFC00
413
414#define AR_Q0_MISC 0x09c0
415#define AR_Q1_MISC 0x09c4
416#define AR_Q2_MISC 0x09c8
417#define AR_Q3_MISC 0x09cc
418#define AR_Q4_MISC 0x09d0
419#define AR_Q5_MISC 0x09d4
420#define AR_Q6_MISC 0x09d8
421#define AR_Q7_MISC 0x09dc
422#define AR_Q8_MISC 0x09e0
423#define AR_Q9_MISC 0x09e4
424#define AR_QMISC(_i) (AR_Q0_MISC + ((_i)<<2))
425#define AR_Q_MISC_FSP 0x0000000F
426#define AR_Q_MISC_FSP_ASAP 0
427#define AR_Q_MISC_FSP_CBR 1
428#define AR_Q_MISC_FSP_DBA_GATED 2
429#define AR_Q_MISC_FSP_TIM_GATED 3
430#define AR_Q_MISC_FSP_BEACON_SENT_GATED 4
431#define AR_Q_MISC_FSP_BEACON_RCVD_GATED 5
432#define AR_Q_MISC_ONE_SHOT_EN 0x00000010
433#define AR_Q_MISC_CBR_INCR_DIS1 0x00000020
434#define AR_Q_MISC_CBR_INCR_DIS0 0x00000040
435#define AR_Q_MISC_BEACON_USE 0x00000080
436#define AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN 0x00000100
437#define AR_Q_MISC_RDYTIME_EXP_POLICY 0x00000200
438#define AR_Q_MISC_RESET_CBR_EXP_CTR 0x00000400
439#define AR_Q_MISC_DCU_EARLY_TERM_REQ 0x00000800
440#define AR_Q_MISC_RESV0 0xFFFFF000
441
442#define AR_Q0_STS 0x0a00
443#define AR_Q1_STS 0x0a04
444#define AR_Q2_STS 0x0a08
445#define AR_Q3_STS 0x0a0c
446#define AR_Q4_STS 0x0a10
447#define AR_Q5_STS 0x0a14
448#define AR_Q6_STS 0x0a18
449#define AR_Q7_STS 0x0a1c
450#define AR_Q8_STS 0x0a20
451#define AR_Q9_STS 0x0a24
452#define AR_QSTS(_i) (AR_Q0_STS + ((_i)<<2))
453#define AR_Q_STS_PEND_FR_CNT 0x00000003
454#define AR_Q_STS_RESV0 0x000000FC
455#define AR_Q_STS_CBR_EXP_CNT 0x0000FF00
456#define AR_Q_STS_RESV1 0xFFFF0000
457
458#define AR_Q_RDYTIMESHDN 0x0a40
459#define AR_Q_RDYTIMESHDN_M 0x000003FF
460
461
462#define AR_NUM_DCU 10
463#define AR_DCU_0 0x0001
464#define AR_DCU_1 0x0002
465#define AR_DCU_2 0x0004
466#define AR_DCU_3 0x0008
467#define AR_DCU_4 0x0010
468#define AR_DCU_5 0x0020
469#define AR_DCU_6 0x0040
470#define AR_DCU_7 0x0080
471#define AR_DCU_8 0x0100
472#define AR_DCU_9 0x0200
473
474#define AR_D0_QCUMASK 0x1000
475#define AR_D1_QCUMASK 0x1004
476#define AR_D2_QCUMASK 0x1008
477#define AR_D3_QCUMASK 0x100c
478#define AR_D4_QCUMASK 0x1010
479#define AR_D5_QCUMASK 0x1014
480#define AR_D6_QCUMASK 0x1018
481#define AR_D7_QCUMASK 0x101c
482#define AR_D8_QCUMASK 0x1020
483#define AR_D9_QCUMASK 0x1024
484#define AR_DQCUMASK(_i) (AR_D0_QCUMASK + ((_i)<<2))
485#define AR_D_QCUMASK 0x000003FF
486#define AR_D_QCUMASK_RESV0 0xFFFFFC00
487
488#define AR_D_TXBLK_CMD 0x1038
489#define AR_D_TXBLK_DATA(i) (AR_D_TXBLK_CMD+(i))
490
491#define AR_D0_LCL_IFS 0x1040
492#define AR_D1_LCL_IFS 0x1044
493#define AR_D2_LCL_IFS 0x1048
494#define AR_D3_LCL_IFS 0x104c
495#define AR_D4_LCL_IFS 0x1050
496#define AR_D5_LCL_IFS 0x1054
497#define AR_D6_LCL_IFS 0x1058
498#define AR_D7_LCL_IFS 0x105c
499#define AR_D8_LCL_IFS 0x1060
500#define AR_D9_LCL_IFS 0x1064
501#define AR_DLCL_IFS(_i) (AR_D0_LCL_IFS + ((_i)<<2))
502#define AR_D_LCL_IFS_CWMIN 0x000003FF
503#define AR_D_LCL_IFS_CWMIN_S 0
504#define AR_D_LCL_IFS_CWMAX 0x000FFC00
505#define AR_D_LCL_IFS_CWMAX_S 10
506#define AR_D_LCL_IFS_AIFS 0x0FF00000
507#define AR_D_LCL_IFS_AIFS_S 20
508
509#define AR_D_LCL_IFS_RESV0 0xF0000000
510
511#define AR_D0_RETRY_LIMIT 0x1080
512#define AR_D1_RETRY_LIMIT 0x1084
513#define AR_D2_RETRY_LIMIT 0x1088
514#define AR_D3_RETRY_LIMIT 0x108c
515#define AR_D4_RETRY_LIMIT 0x1090
516#define AR_D5_RETRY_LIMIT 0x1094
517#define AR_D6_RETRY_LIMIT 0x1098
518#define AR_D7_RETRY_LIMIT 0x109c
519#define AR_D8_RETRY_LIMIT 0x10a0
520#define AR_D9_RETRY_LIMIT 0x10a4
521#define AR_DRETRY_LIMIT(_i) (AR_D0_RETRY_LIMIT + ((_i)<<2))
522#define AR_D_RETRY_LIMIT_FR_SH 0x0000000F
523#define AR_D_RETRY_LIMIT_FR_SH_S 0
524#define AR_D_RETRY_LIMIT_STA_SH 0x00003F00
525#define AR_D_RETRY_LIMIT_STA_SH_S 8
526#define AR_D_RETRY_LIMIT_STA_LG 0x000FC000
527#define AR_D_RETRY_LIMIT_STA_LG_S 14
528#define AR_D_RETRY_LIMIT_RESV0 0xFFF00000
529
530#define AR_D0_CHNTIME 0x10c0
531#define AR_D1_CHNTIME 0x10c4
532#define AR_D2_CHNTIME 0x10c8
533#define AR_D3_CHNTIME 0x10cc
534#define AR_D4_CHNTIME 0x10d0
535#define AR_D5_CHNTIME 0x10d4
536#define AR_D6_CHNTIME 0x10d8
537#define AR_D7_CHNTIME 0x10dc
538#define AR_D8_CHNTIME 0x10e0
539#define AR_D9_CHNTIME 0x10e4
540#define AR_DCHNTIME(_i) (AR_D0_CHNTIME + ((_i)<<2))
541#define AR_D_CHNTIME_DUR 0x000FFFFF
542#define AR_D_CHNTIME_DUR_S 0
543#define AR_D_CHNTIME_EN 0x00100000
544#define AR_D_CHNTIME_RESV0 0xFFE00000
545
546#define AR_D0_MISC 0x1100
547#define AR_D1_MISC 0x1104
548#define AR_D2_MISC 0x1108
549#define AR_D3_MISC 0x110c
550#define AR_D4_MISC 0x1110
551#define AR_D5_MISC 0x1114
552#define AR_D6_MISC 0x1118
553#define AR_D7_MISC 0x111c
554#define AR_D8_MISC 0x1120
555#define AR_D9_MISC 0x1124
556#define AR_DMISC(_i) (AR_D0_MISC + ((_i)<<2))
557#define AR_D_MISC_BKOFF_THRESH 0x0000003F
558#define AR_D_MISC_RETRY_CNT_RESET_EN 0x00000040
559#define AR_D_MISC_CW_RESET_EN 0x00000080
560#define AR_D_MISC_FRAG_WAIT_EN 0x00000100
561#define AR_D_MISC_FRAG_BKOFF_EN 0x00000200
562#define AR_D_MISC_CW_BKOFF_EN 0x00001000
563#define AR_D_MISC_VIR_COL_HANDLING 0x0000C000
564#define AR_D_MISC_VIR_COL_HANDLING_S 14
565#define AR_D_MISC_VIR_COL_HANDLING_DEFAULT 0
566#define AR_D_MISC_VIR_COL_HANDLING_IGNORE 1
567#define AR_D_MISC_BEACON_USE 0x00010000
568#define AR_D_MISC_ARB_LOCKOUT_CNTRL 0x00060000
569#define AR_D_MISC_ARB_LOCKOUT_CNTRL_S 17
570#define AR_D_MISC_ARB_LOCKOUT_CNTRL_NONE 0
571#define AR_D_MISC_ARB_LOCKOUT_CNTRL_INTRA_FR 1
572#define AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL 2
573#define AR_D_MISC_ARB_LOCKOUT_IGNORE 0x00080000
574#define AR_D_MISC_SEQ_NUM_INCR_DIS 0x00100000
575#define AR_D_MISC_POST_FR_BKOFF_DIS 0x00200000
576#define AR_D_MISC_VIT_COL_CW_BKOFF_EN 0x00400000
577#define AR_D_MISC_BLOWN_IFS_RETRY_EN 0x00800000
578#define AR_D_MISC_RESV0 0xFF000000
579
580#define AR_D_SEQNUM 0x1140
581
582#define AR_D_GBL_IFS_SIFS 0x1030
583#define AR_D_GBL_IFS_SIFS_M 0x0000FFFF
584#define AR_D_GBL_IFS_SIFS_RESV0 0xFFFFFFFF
585
586#define AR_D_TXBLK_BASE 0x1038
587#define AR_D_TXBLK_WRITE_BITMASK 0x0000FFFF
588#define AR_D_TXBLK_WRITE_BITMASK_S 0
589#define AR_D_TXBLK_WRITE_SLICE 0x000F0000
590#define AR_D_TXBLK_WRITE_SLICE_S 16
591#define AR_D_TXBLK_WRITE_DCU 0x00F00000
592#define AR_D_TXBLK_WRITE_DCU_S 20
593#define AR_D_TXBLK_WRITE_COMMAND 0x0F000000
594#define AR_D_TXBLK_WRITE_COMMAND_S 24
595
596#define AR_D_GBL_IFS_SLOT 0x1070
597#define AR_D_GBL_IFS_SLOT_M 0x0000FFFF
598#define AR_D_GBL_IFS_SLOT_RESV0 0xFFFF0000
599
600#define AR_D_GBL_IFS_EIFS 0x10b0
601#define AR_D_GBL_IFS_EIFS_M 0x0000FFFF
602#define AR_D_GBL_IFS_EIFS_RESV0 0xFFFF0000
603
604#define AR_D_GBL_IFS_MISC 0x10f0
605#define AR_D_GBL_IFS_MISC_LFSR_SLICE_SEL 0x00000007
606#define AR_D_GBL_IFS_MISC_TURBO_MODE 0x00000008
607#define AR_D_GBL_IFS_MISC_USEC_DURATION 0x000FFC00
608#define AR_D_GBL_IFS_MISC_DCU_ARBITER_DLY 0x00300000
609#define AR_D_GBL_IFS_MISC_RANDOM_LFSR_SLICE_DIS 0x01000000
610#define AR_D_GBL_IFS_MISC_SLOT_XMIT_WIND_LEN 0x06000000
611#define AR_D_GBL_IFS_MISC_FORCE_XMIT_SLOT_BOUND 0x08000000
612#define AR_D_GBL_IFS_MISC_IGNORE_BACKOFF 0x10000000
613
614#define AR_D_FPCTL 0x1230
615#define AR_D_FPCTL_DCU 0x0000000F
616#define AR_D_FPCTL_DCU_S 0
617#define AR_D_FPCTL_PREFETCH_EN 0x00000010
618#define AR_D_FPCTL_BURST_PREFETCH 0x00007FE0
619#define AR_D_FPCTL_BURST_PREFETCH_S 5
620
621#define AR_D_TXPSE 0x1270
622#define AR_D_TXPSE_CTRL 0x000003FF
623#define AR_D_TXPSE_RESV0 0x0000FC00
624#define AR_D_TXPSE_STATUS 0x00010000
625#define AR_D_TXPSE_RESV1 0xFFFE0000
626
627#define AR_D_TXSLOTMASK 0x12f0
628#define AR_D_TXSLOTMASK_NUM 0x0000000F
629
630#define AR_CFG_LED 0x1f04
631#define AR_CFG_SCLK_RATE_IND 0x00000003
632#define AR_CFG_SCLK_RATE_IND_S 0
633#define AR_CFG_SCLK_32MHZ 0x00000000
634#define AR_CFG_SCLK_4MHZ 0x00000001
635#define AR_CFG_SCLK_1MHZ 0x00000002
636#define AR_CFG_SCLK_32KHZ 0x00000003
637#define AR_CFG_LED_BLINK_SLOW 0x00000008
638#define AR_CFG_LED_BLINK_THRESH_SEL 0x00000070
639#define AR_CFG_LED_MODE_SEL 0x00000380
640#define AR_CFG_LED_MODE_SEL_S 7
641#define AR_CFG_LED_POWER 0x00000280
642#define AR_CFG_LED_POWER_S 7
643#define AR_CFG_LED_NETWORK 0x00000300
644#define AR_CFG_LED_NETWORK_S 7
645#define AR_CFG_LED_MODE_PROP 0x0
646#define AR_CFG_LED_MODE_RPROP 0x1
647#define AR_CFG_LED_MODE_SPLIT 0x2
648#define AR_CFG_LED_MODE_RAND 0x3
649#define AR_CFG_LED_MODE_POWER_OFF 0x4
650#define AR_CFG_LED_MODE_POWER_ON 0x5
651#define AR_CFG_LED_MODE_NETWORK_OFF 0x4
652#define AR_CFG_LED_MODE_NETWORK_ON 0x6
653#define AR_CFG_LED_ASSOC_CTL 0x00000c00
654#define AR_CFG_LED_ASSOC_CTL_S 10
655#define AR_CFG_LED_ASSOC_NONE 0x0
656#define AR_CFG_LED_ASSOC_ACTIVE 0x1
657#define AR_CFG_LED_ASSOC_PENDING 0x2
658
659#define AR_CFG_LED_BLINK_SLOW 0x00000008
660#define AR_CFG_LED_BLINK_SLOW_S 3
661
662#define AR_CFG_LED_BLINK_THRESH_SEL 0x00000070
663#define AR_CFG_LED_BLINK_THRESH_SEL_S 4
664
665#define AR_MAC_SLEEP 0x1f00
666#define AR_MAC_SLEEP_MAC_AWAKE 0x00000000
667#define AR_MAC_SLEEP_MAC_ASLEEP 0x00000001
668
669#define AR_RC 0x4000
670#define AR_RC_AHB 0x00000001
671#define AR_RC_APB 0x00000002
672#define AR_RC_HOSTIF 0x00000100
673
674#define AR_WA 0x4004
675
676#define AR_PM_STATE 0x4008
677#define AR_PM_STATE_PME_D3COLD_VAUX 0x00100000
678
679#define AR_HOST_TIMEOUT 0x4018
680#define AR_HOST_TIMEOUT_APB_CNTR 0x0000FFFF
681#define AR_HOST_TIMEOUT_APB_CNTR_S 0
682#define AR_HOST_TIMEOUT_LCL_CNTR 0xFFFF0000
683#define AR_HOST_TIMEOUT_LCL_CNTR_S 16
684
685#define AR_EEPROM 0x401c
686#define AR_EEPROM_ABSENT 0x00000100
687#define AR_EEPROM_CORRUPT 0x00000200
688#define AR_EEPROM_PROT_MASK 0x03FFFC00
689#define AR_EEPROM_PROT_MASK_S 10
690
691#define EEPROM_PROTECT_RP_0_31 0x0001
692#define EEPROM_PROTECT_WP_0_31 0x0002
693#define EEPROM_PROTECT_RP_32_63 0x0004
694#define EEPROM_PROTECT_WP_32_63 0x0008
695#define EEPROM_PROTECT_RP_64_127 0x0010
696#define EEPROM_PROTECT_WP_64_127 0x0020
697#define EEPROM_PROTECT_RP_128_191 0x0040
698#define EEPROM_PROTECT_WP_128_191 0x0080
699#define EEPROM_PROTECT_RP_192_255 0x0100
700#define EEPROM_PROTECT_WP_192_255 0x0200
701#define EEPROM_PROTECT_RP_256_511 0x0400
702#define EEPROM_PROTECT_WP_256_511 0x0800
703#define EEPROM_PROTECT_RP_512_1023 0x1000
704#define EEPROM_PROTECT_WP_512_1023 0x2000
705#define EEPROM_PROTECT_RP_1024_2047 0x4000
706#define EEPROM_PROTECT_WP_1024_2047 0x8000
707
708#define AR_SREV \
709 ((AR_SREV_9100(ah)) ? 0x0600 : 0x4020)
710
711#define AR_SREV_ID \
712 ((AR_SREV_9100(ah)) ? 0x00000FFF : 0x000000FF)
713#define AR_SREV_VERSION 0x000000F0
714#define AR_SREV_VERSION_S 4
715#define AR_SREV_REVISION 0x00000007
716
717#define AR_SREV_ID2 0xFFFFFFFF
718#define AR_SREV_VERSION2 0xFFFC0000
719#define AR_SREV_VERSION2_S 18
720#define AR_SREV_TYPE2 0x0003F000
721#define AR_SREV_TYPE2_S 12
722#define AR_SREV_TYPE2_CHAIN 0x00001000
723#define AR_SREV_TYPE2_HOST_MODE 0x00002000
724#define AR_SREV_REVISION2 0x00000F00
725#define AR_SREV_REVISION2_S 8
726
727#define AR_SREV_VERSION_5416_PCI 0xD
728#define AR_SREV_VERSION_5416_PCIE 0xC
729#define AR_SREV_REVISION_5416_10 0
730#define AR_SREV_REVISION_5416_20 1
731#define AR_SREV_REVISION_5416_22 2
732#define AR_SREV_VERSION_9160 0x40
733#define AR_SREV_REVISION_9160_10 0
734#define AR_SREV_REVISION_9160_11 1
735#define AR_SREV_VERSION_9280 0x80
736#define AR_SREV_REVISION_9280_10 0
737#define AR_SREV_REVISION_9280_20 1
738#define AR_SREV_REVISION_9280_21 2
739#define AR_SREV_VERSION_9285 0xC0
740#define AR_SREV_REVISION_9285_10 0
741
742#define AR_SREV_9100_OR_LATER(_ah) \
743 (((_ah)->ah_macVersion >= AR_SREV_VERSION_5416_PCIE))
744#define AR_SREV_5416_20_OR_LATER(_ah) \
745 (((_ah)->ah_macVersion >= AR_SREV_VERSION_9160) || \
746 ((_ah)->ah_macRev >= AR_SREV_REVISION_5416_20))
747#define AR_SREV_5416_22_OR_LATER(_ah) \
748 (((_ah)->ah_macVersion >= AR_SREV_VERSION_9160) || \
749 ((_ah)->ah_macRev >= AR_SREV_REVISION_5416_22))
750#define AR_SREV_9160(_ah) \
751 (((_ah)->ah_macVersion == AR_SREV_VERSION_9160))
752#define AR_SREV_9160_10_OR_LATER(_ah) \
753 (((_ah)->ah_macVersion >= AR_SREV_VERSION_9160))
754#define AR_SREV_9160_11(_ah) \
755 (AR_SREV_9160(_ah) && ((_ah)->ah_macRev == AR_SREV_REVISION_9160_11))
756#define AR_SREV_9280(_ah) \
757 (((_ah)->ah_macVersion == AR_SREV_VERSION_9280))
758#define AR_SREV_9280_10_OR_LATER(_ah) \
759 (((_ah)->ah_macVersion >= AR_SREV_VERSION_9280))
760#define AR_SREV_9280_20(_ah) \
761 (((_ah)->ah_macVersion == AR_SREV_VERSION_9280) && \
762 ((_ah)->ah_macRev >= AR_SREV_REVISION_9280_20))
763#define AR_SREV_9280_20_OR_LATER(_ah) \
764 (((_ah)->ah_macVersion > AR_SREV_VERSION_9280) || \
765 (((_ah)->ah_macVersion == AR_SREV_VERSION_9280) && \
766 ((_ah)->ah_macRev >= AR_SREV_REVISION_9280_20)))
767
768#define AR_SREV_9285(_ah) (((_ah)->ah_macVersion == AR_SREV_VERSION_9285))
769#define AR_SREV_9285_10_OR_LATER(_ah) \
770 (((_ah)->ah_macVersion >= AR_SREV_VERSION_9285))
771
772#define AR_RADIO_SREV_MAJOR 0xf0
773#define AR_RAD5133_SREV_MAJOR 0xc0
774#define AR_RAD2133_SREV_MAJOR 0xd0
775#define AR_RAD5122_SREV_MAJOR 0xe0
776#define AR_RAD2122_SREV_MAJOR 0xf0
777
778#define AR_AHB_MODE 0x4024
779#define AR_AHB_EXACT_WR_EN 0x00000000
780#define AR_AHB_BUF_WR_EN 0x00000001
781#define AR_AHB_EXACT_RD_EN 0x00000000
782#define AR_AHB_CACHELINE_RD_EN 0x00000002
783#define AR_AHB_PREFETCH_RD_EN 0x00000004
784#define AR_AHB_PAGE_SIZE_1K 0x00000000
785#define AR_AHB_PAGE_SIZE_2K 0x00000008
786#define AR_AHB_PAGE_SIZE_4K 0x00000010
787
788#define AR_INTR_RTC_IRQ 0x00000001
789#define AR_INTR_MAC_IRQ 0x00000002
790#define AR_INTR_EEP_PROT_ACCESS 0x00000004
791#define AR_INTR_MAC_AWAKE 0x00020000
792#define AR_INTR_MAC_ASLEEP 0x00040000
793#define AR_INTR_SPURIOUS 0xFFFFFFFF
794
795
796#define AR_INTR_SYNC_CAUSE_CLR 0x4028
797
798#define AR_INTR_SYNC_CAUSE 0x4028
799
800#define AR_INTR_SYNC_ENABLE 0x402c
801#define AR_INTR_SYNC_ENABLE_GPIO 0xFFFC0000
802#define AR_INTR_SYNC_ENABLE_GPIO_S 18
803
804enum {
805 AR_INTR_SYNC_RTC_IRQ = 0x00000001,
806 AR_INTR_SYNC_MAC_IRQ = 0x00000002,
807 AR_INTR_SYNC_EEPROM_ILLEGAL_ACCESS = 0x00000004,
808 AR_INTR_SYNC_APB_TIMEOUT = 0x00000008,
809 AR_INTR_SYNC_PCI_MODE_CONFLICT = 0x00000010,
810 AR_INTR_SYNC_HOST1_FATAL = 0x00000020,
811 AR_INTR_SYNC_HOST1_PERR = 0x00000040,
812 AR_INTR_SYNC_TRCV_FIFO_PERR = 0x00000080,
813 AR_INTR_SYNC_RADM_CPL_EP = 0x00000100,
814 AR_INTR_SYNC_RADM_CPL_DLLP_ABORT = 0x00000200,
815 AR_INTR_SYNC_RADM_CPL_TLP_ABORT = 0x00000400,
816 AR_INTR_SYNC_RADM_CPL_ECRC_ERR = 0x00000800,
817 AR_INTR_SYNC_RADM_CPL_TIMEOUT = 0x00001000,
818 AR_INTR_SYNC_LOCAL_TIMEOUT = 0x00002000,
819 AR_INTR_SYNC_PM_ACCESS = 0x00004000,
820 AR_INTR_SYNC_MAC_AWAKE = 0x00008000,
821 AR_INTR_SYNC_MAC_ASLEEP = 0x00010000,
822 AR_INTR_SYNC_MAC_SLEEP_ACCESS = 0x00020000,
823 AR_INTR_SYNC_ALL = 0x0003FFFF,
824
825
826 AR_INTR_SYNC_DEFAULT = (AR_INTR_SYNC_HOST1_FATAL |
827 AR_INTR_SYNC_HOST1_PERR |
828 AR_INTR_SYNC_RADM_CPL_EP |
829 AR_INTR_SYNC_RADM_CPL_DLLP_ABORT |
830 AR_INTR_SYNC_RADM_CPL_TLP_ABORT |
831 AR_INTR_SYNC_RADM_CPL_ECRC_ERR |
832 AR_INTR_SYNC_RADM_CPL_TIMEOUT |
833 AR_INTR_SYNC_LOCAL_TIMEOUT |
834 AR_INTR_SYNC_MAC_SLEEP_ACCESS),
835
836 AR_INTR_SYNC_SPURIOUS = 0xFFFFFFFF,
837
838};
839
840#define AR_INTR_ASYNC_MASK 0x4030
841#define AR_INTR_ASYNC_MASK_GPIO 0xFFFC0000
842#define AR_INTR_ASYNC_MASK_GPIO_S 18
843
844#define AR_INTR_SYNC_MASK 0x4034
845#define AR_INTR_SYNC_MASK_GPIO 0xFFFC0000
846#define AR_INTR_SYNC_MASK_GPIO_S 18
847
848#define AR_INTR_ASYNC_CAUSE_CLR 0x4038
849#define AR_INTR_ASYNC_CAUSE 0x4038
850
851#define AR_INTR_ASYNC_ENABLE 0x403c
852#define AR_INTR_ASYNC_ENABLE_GPIO 0xFFFC0000
853#define AR_INTR_ASYNC_ENABLE_GPIO_S 18
854
855#define AR_PCIE_SERDES 0x4040
856#define AR_PCIE_SERDES2 0x4044
857#define AR_PCIE_PM_CTRL 0x4014
858#define AR_PCIE_PM_CTRL_ENA 0x00080000
859
860#define AR_NUM_GPIO 14
861#define AR928X_NUM_GPIO 10
862
863#define AR_GPIO_IN_OUT 0x4048
864#define AR_GPIO_IN_VAL 0x0FFFC000
865#define AR_GPIO_IN_VAL_S 14
866#define AR928X_GPIO_IN_VAL 0x000FFC00
867#define AR928X_GPIO_IN_VAL_S 10
868
869#define AR_GPIO_OE_OUT 0x404c
870#define AR_GPIO_OE_OUT_DRV 0x3
871#define AR_GPIO_OE_OUT_DRV_NO 0x0
872#define AR_GPIO_OE_OUT_DRV_LOW 0x1
873#define AR_GPIO_OE_OUT_DRV_HI 0x2
874#define AR_GPIO_OE_OUT_DRV_ALL 0x3
875
876#define AR_GPIO_INTR_POL 0x4050
877#define AR_GPIO_INTR_POL_VAL 0x00001FFF
878#define AR_GPIO_INTR_POL_VAL_S 0
879
880#define AR_GPIO_INPUT_EN_VAL 0x4054
881#define AR_GPIO_INPUT_EN_VAL_RFSILENT_DEF 0x00000080
882#define AR_GPIO_INPUT_EN_VAL_RFSILENT_DEF_S 7
883#define AR_GPIO_INPUT_EN_VAL_RFSILENT_BB 0x00008000
884#define AR_GPIO_INPUT_EN_VAL_RFSILENT_BB_S 15
885#define AR_GPIO_RTC_RESET_OVERRIDE_ENABLE 0x00010000
886#define AR_GPIO_JTAG_DISABLE 0x00020000
887
888#define AR_GPIO_INPUT_MUX1 0x4058
889
890#define AR_GPIO_INPUT_MUX2 0x405c
891#define AR_GPIO_INPUT_MUX2_CLK25 0x0000000f
892#define AR_GPIO_INPUT_MUX2_CLK25_S 0
893#define AR_GPIO_INPUT_MUX2_RFSILENT 0x000000f0
894#define AR_GPIO_INPUT_MUX2_RFSILENT_S 4
895#define AR_GPIO_INPUT_MUX2_RTC_RESET 0x00000f00
896#define AR_GPIO_INPUT_MUX2_RTC_RESET_S 8
897
898#define AR_GPIO_OUTPUT_MUX1 0x4060
899#define AR_GPIO_OUTPUT_MUX2 0x4064
900#define AR_GPIO_OUTPUT_MUX3 0x4068
901
902#define AR_GPIO_OUTPUT_MUX_AS_OUTPUT 0
903#define AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED 1
904#define AR_GPIO_OUTPUT_MUX_AS_PCIE_POWER_LED 2
905#define AR_GPIO_OUTPUT_MUX_AS_MAC_NETWORK_LED 5
906#define AR_GPIO_OUTPUT_MUX_AS_MAC_POWER_LED 6
907
908#define AR_INPUT_STATE 0x406c
909
910#define AR_EEPROM_STATUS_DATA 0x407c
911#define AR_EEPROM_STATUS_DATA_VAL 0x0000ffff
912#define AR_EEPROM_STATUS_DATA_VAL_S 0
913#define AR_EEPROM_STATUS_DATA_BUSY 0x00010000
914#define AR_EEPROM_STATUS_DATA_BUSY_ACCESS 0x00020000
915#define AR_EEPROM_STATUS_DATA_PROT_ACCESS 0x00040000
916#define AR_EEPROM_STATUS_DATA_ABSENT_ACCESS 0x00080000
917
918#define AR_OBS 0x4080
919
920#define AR_PCIE_MSI 0x4094
921#define AR_PCIE_MSI_ENABLE 0x00000001
922
923
924#define AR_RTC_9160_PLL_DIV 0x000003ff
925#define AR_RTC_9160_PLL_DIV_S 0
926#define AR_RTC_9160_PLL_REFDIV 0x00003C00
927#define AR_RTC_9160_PLL_REFDIV_S 10
928#define AR_RTC_9160_PLL_CLKSEL 0x0000C000
929#define AR_RTC_9160_PLL_CLKSEL_S 14
930
931#define AR_RTC_BASE 0x00020000
932#define AR_RTC_RC \
933 (AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0000) : 0x7000
934#define AR_RTC_RC_M 0x00000003
935#define AR_RTC_RC_MAC_WARM 0x00000001
936#define AR_RTC_RC_MAC_COLD 0x00000002
937#define AR_RTC_RC_COLD_RESET 0x00000004
938#define AR_RTC_RC_WARM_RESET 0x00000008
939
940#define AR_RTC_PLL_CONTROL \
941 (AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0014) : 0x7014
942
943#define AR_RTC_PLL_DIV 0x0000001f
944#define AR_RTC_PLL_DIV_S 0
945#define AR_RTC_PLL_DIV2 0x00000020
946#define AR_RTC_PLL_REFDIV_5 0x000000c0
947#define AR_RTC_PLL_CLKSEL 0x00000300
948#define AR_RTC_PLL_CLKSEL_S 8
949
950
951
952#define AR_RTC_RESET \
953 ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0040) : 0x7040)
954#define AR_RTC_RESET_EN (0x00000001)
955
956#define AR_RTC_STATUS \
957 ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0044) : 0x7044)
958
959#define AR_RTC_STATUS_M \
960 ((AR_SREV_9100(ah)) ? 0x0000003f : 0x0000000f)
961
962#define AR_RTC_PM_STATUS_M 0x0000000f
963
964#define AR_RTC_STATUS_SHUTDOWN 0x00000001
965#define AR_RTC_STATUS_ON 0x00000002
966#define AR_RTC_STATUS_SLEEP 0x00000004
967#define AR_RTC_STATUS_WAKEUP 0x00000008
968
969#define AR_RTC_SLEEP_CLK \
970 ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0048) : 0x7048)
971#define AR_RTC_FORCE_DERIVED_CLK 0x2
972
973#define AR_RTC_FORCE_WAKE \
974 ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x004c) : 0x704c)
975#define AR_RTC_FORCE_WAKE_EN 0x00000001
976#define AR_RTC_FORCE_WAKE_ON_INT 0x00000002
977
978
979#define AR_RTC_INTR_CAUSE \
980 ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0050) : 0x7050)
981
982#define AR_RTC_INTR_ENABLE \
983 ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0054) : 0x7054)
984
985#define AR_RTC_INTR_MASK \
986 ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0058) : 0x7058)
987
988#define AR_SEQ_MASK 0x8060
989
990#define AR_AN_RF2G1_CH0 0x7810
991#define AR_AN_RF2G1_CH0_OB 0x03800000
992#define AR_AN_RF2G1_CH0_OB_S 23
993#define AR_AN_RF2G1_CH0_DB 0x1C000000
994#define AR_AN_RF2G1_CH0_DB_S 26
995
996#define AR_AN_RF5G1_CH0 0x7818
997#define AR_AN_RF5G1_CH0_OB5 0x00070000
998#define AR_AN_RF5G1_CH0_OB5_S 16
999#define AR_AN_RF5G1_CH0_DB5 0x00380000
1000#define AR_AN_RF5G1_CH0_DB5_S 19
1001
1002#define AR_AN_RF2G1_CH1 0x7834
1003#define AR_AN_RF2G1_CH1_OB 0x03800000
1004#define AR_AN_RF2G1_CH1_OB_S 23
1005#define AR_AN_RF2G1_CH1_DB 0x1C000000
1006#define AR_AN_RF2G1_CH1_DB_S 26
1007
1008#define AR_AN_RF5G1_CH1 0x783C
1009#define AR_AN_RF5G1_CH1_OB5 0x00070000
1010#define AR_AN_RF5G1_CH1_OB5_S 16
1011#define AR_AN_RF5G1_CH1_DB5 0x00380000
1012#define AR_AN_RF5G1_CH1_DB5_S 19
1013
1014#define AR_AN_TOP2 0x7894
1015#define AR_AN_TOP2_XPABIAS_LVL 0xC0000000
1016#define AR_AN_TOP2_XPABIAS_LVL_S 30
1017#define AR_AN_TOP2_LOCALBIAS 0x00200000
1018#define AR_AN_TOP2_LOCALBIAS_S 21
1019#define AR_AN_TOP2_PWDCLKIND 0x00400000
1020#define AR_AN_TOP2_PWDCLKIND_S 22
1021
1022#define AR_AN_SYNTH9 0x7868
1023#define AR_AN_SYNTH9_REFDIVA 0xf8000000
1024#define AR_AN_SYNTH9_REFDIVA_S 27
1025
1026#define AR_STA_ID0 0x8000
1027#define AR_STA_ID1 0x8004
1028#define AR_STA_ID1_SADH_MASK 0x0000FFFF
1029#define AR_STA_ID1_STA_AP 0x00010000
1030#define AR_STA_ID1_ADHOC 0x00020000
1031#define AR_STA_ID1_PWR_SAV 0x00040000
1032#define AR_STA_ID1_KSRCHDIS 0x00080000
1033#define AR_STA_ID1_PCF 0x00100000
1034#define AR_STA_ID1_USE_DEFANT 0x00200000
1035#define AR_STA_ID1_DEFANT_UPDATE 0x00400000
1036#define AR_STA_ID1_RTS_USE_DEF 0x00800000
1037#define AR_STA_ID1_ACKCTS_6MB 0x01000000
1038#define AR_STA_ID1_BASE_RATE_11B 0x02000000
1039#define AR_STA_ID1_SECTOR_SELF_GEN 0x04000000
1040#define AR_STA_ID1_CRPT_MIC_ENABLE 0x08000000
1041#define AR_STA_ID1_KSRCH_MODE 0x10000000
1042#define AR_STA_ID1_PRESERVE_SEQNUM 0x20000000
1043#define AR_STA_ID1_CBCIV_ENDIAN 0x40000000
1044#define AR_STA_ID1_MCAST_KSRCH 0x80000000
1045
1046#define AR_BSS_ID0 0x8008
1047#define AR_BSS_ID1 0x800C
1048#define AR_BSS_ID1_U16 0x0000FFFF
1049#define AR_BSS_ID1_AID 0x07FF0000
1050#define AR_BSS_ID1_AID_S 16
1051
1052#define AR_BCN_RSSI_AVE 0x8010
1053#define AR_BCN_RSSI_AVE_MASK 0x00000FFF
1054
1055#define AR_TIME_OUT 0x8014
1056#define AR_TIME_OUT_ACK 0x00003FFF
1057#define AR_TIME_OUT_ACK_S 0
1058#define AR_TIME_OUT_CTS 0x3FFF0000
1059#define AR_TIME_OUT_CTS_S 16
1060
1061#define AR_RSSI_THR 0x8018
1062#define AR_RSSI_THR_MASK 0x000000FF
1063#define AR_RSSI_THR_BM_THR 0x0000FF00
1064#define AR_RSSI_THR_BM_THR_S 8
1065#define AR_RSSI_BCN_WEIGHT 0x1F000000
1066#define AR_RSSI_BCN_WEIGHT_S 24
1067#define AR_RSSI_BCN_RSSI_RST 0x20000000
1068
1069#define AR_USEC 0x801c
1070#define AR_USEC_USEC 0x0000007F
1071#define AR_USEC_TX_LAT 0x007FC000
1072#define AR_USEC_TX_LAT_S 14
1073#define AR_USEC_RX_LAT 0x1F800000
1074#define AR_USEC_RX_LAT_S 23
1075
1076#define AR_RESET_TSF 0x8020
1077#define AR_RESET_TSF_ONCE 0x01000000
1078
1079#define AR_MAX_CFP_DUR 0x8038
1080#define AR_CFP_VAL 0x0000FFFF
1081
1082#define AR_RX_FILTER 0x803C
1083#define AR_RX_FILTER_ALL 0x00000000
1084#define AR_RX_UCAST 0x00000001
1085#define AR_RX_MCAST 0x00000002
1086#define AR_RX_BCAST 0x00000004
1087#define AR_RX_CONTROL 0x00000008
1088#define AR_RX_BEACON 0x00000010
1089#define AR_RX_PROM 0x00000020
1090#define AR_RX_PROBE_REQ 0x00000080
1091#define AR_RX_MY_BEACON 0x00000200
1092#define AR_RX_COMPR_BAR 0x00000400
1093#define AR_RX_COMPR_BA 0x00000800
1094#define AR_RX_UNCOM_BA_BAR 0x00001000
1095
1096#define AR_MCAST_FIL0 0x8040
1097#define AR_MCAST_FIL1 0x8044
1098
1099#define AR_DIAG_SW 0x8048
1100#define AR_DIAG_CACHE_ACK 0x00000001
1101#define AR_DIAG_ACK_DIS 0x00000002
1102#define AR_DIAG_CTS_DIS 0x00000004
1103#define AR_DIAG_ENCRYPT_DIS 0x00000008
1104#define AR_DIAG_DECRYPT_DIS 0x00000010
1105#define AR_DIAG_RX_DIS 0x00000020
1106#define AR_DIAG_LOOP_BACK 0x00000040
1107#define AR_DIAG_CORR_FCS 0x00000080
1108#define AR_DIAG_CHAN_INFO 0x00000100
1109#define AR_DIAG_SCRAM_SEED 0x0001FE00
1110#define AR_DIAG_SCRAM_SEED_S 8
1111#define AR_DIAG_FRAME_NV0 0x00020000
1112#define AR_DIAG_OBS_PT_SEL1 0x000C0000
1113#define AR_DIAG_OBS_PT_SEL1_S 18
1114#define AR_DIAG_FORCE_RX_CLEAR 0x00100000
1115#define AR_DIAG_IGNORE_VIRT_CS 0x00200000
1116#define AR_DIAG_FORCE_CH_IDLE_HIGH 0x00400000
1117#define AR_DIAG_EIFS_CTRL_ENA 0x00800000
1118#define AR_DIAG_DUAL_CHAIN_INFO 0x01000000
1119#define AR_DIAG_RX_ABORT 0x02000000
1120#define AR_DIAG_SATURATE_CYCLE_CNT 0x04000000
1121#define AR_DIAG_OBS_PT_SEL2 0x08000000
1122#define AR_DIAG_RX_CLEAR_CTL_LOW 0x10000000
1123#define AR_DIAG_RX_CLEAR_EXT_LOW 0x20000000
1124
1125#define AR_TSF_L32 0x804c
1126#define AR_TSF_U32 0x8050
1127
1128#define AR_TST_ADDAC 0x8054
1129#define AR_DEF_ANTENNA 0x8058
1130
1131#define AR_AES_MUTE_MASK0 0x805c
1132#define AR_AES_MUTE_MASK0_FC 0x0000FFFF
1133#define AR_AES_MUTE_MASK0_QOS 0xFFFF0000
1134#define AR_AES_MUTE_MASK0_QOS_S 16
1135
1136#define AR_AES_MUTE_MASK1 0x8060
1137#define AR_AES_MUTE_MASK1_SEQ 0x0000FFFF
1138
1139#define AR_GATED_CLKS 0x8064
1140#define AR_GATED_CLKS_TX 0x00000002
1141#define AR_GATED_CLKS_RX 0x00000004
1142#define AR_GATED_CLKS_REG 0x00000008
1143
1144#define AR_OBS_BUS_CTRL 0x8068
1145#define AR_OBS_BUS_SEL_1 0x00040000
1146#define AR_OBS_BUS_SEL_2 0x00080000
1147#define AR_OBS_BUS_SEL_3 0x000C0000
1148#define AR_OBS_BUS_SEL_4 0x08040000
1149#define AR_OBS_BUS_SEL_5 0x08080000
1150
1151#define AR_OBS_BUS_1 0x806c
1152#define AR_OBS_BUS_1_PCU 0x00000001
1153#define AR_OBS_BUS_1_RX_END 0x00000002
1154#define AR_OBS_BUS_1_RX_WEP 0x00000004
1155#define AR_OBS_BUS_1_RX_BEACON 0x00000008
1156#define AR_OBS_BUS_1_RX_FILTER 0x00000010
1157#define AR_OBS_BUS_1_TX_HCF 0x00000020
1158#define AR_OBS_BUS_1_QUIET_TIME 0x00000040
1159#define AR_OBS_BUS_1_CHAN_IDLE 0x00000080
1160#define AR_OBS_BUS_1_TX_HOLD 0x00000100
1161#define AR_OBS_BUS_1_TX_FRAME 0x00000200
1162#define AR_OBS_BUS_1_RX_FRAME 0x00000400
1163#define AR_OBS_BUS_1_RX_CLEAR 0x00000800
1164#define AR_OBS_BUS_1_WEP_STATE 0x0003F000
1165#define AR_OBS_BUS_1_WEP_STATE_S 12
1166#define AR_OBS_BUS_1_RX_STATE 0x01F00000
1167#define AR_OBS_BUS_1_RX_STATE_S 20
1168#define AR_OBS_BUS_1_TX_STATE 0x7E000000
1169#define AR_OBS_BUS_1_TX_STATE_S 25
1170
1171#define AR_LAST_TSTP 0x8080
1172#define AR_NAV 0x8084
1173#define AR_RTS_OK 0x8088
1174#define AR_RTS_FAIL 0x808c
1175#define AR_ACK_FAIL 0x8090
1176#define AR_FCS_FAIL 0x8094
1177#define AR_BEACON_CNT 0x8098
1178
1179#define AR_SLEEP1 0x80d4
1180#define AR_SLEEP1_ASSUME_DTIM 0x00080000
1181#define AR_SLEEP1_CAB_TIMEOUT 0xFFE00000
1182#define AR_SLEEP1_CAB_TIMEOUT_S 21
1183
1184#define AR_SLEEP2 0x80d8
1185#define AR_SLEEP2_BEACON_TIMEOUT 0xFFE00000
1186#define AR_SLEEP2_BEACON_TIMEOUT_S 21
1187
1188#define AR_BSSMSKL 0x80e0
1189#define AR_BSSMSKU 0x80e4
1190
1191#define AR_TPC 0x80e8
1192#define AR_TPC_ACK 0x0000003f
1193#define AR_TPC_ACK_S 0x00
1194#define AR_TPC_CTS 0x00003f00
1195#define AR_TPC_CTS_S 0x08
1196#define AR_TPC_CHIRP 0x003f0000
1197#define AR_TPC_CHIRP_S 0x16
1198
1199#define AR_TFCNT 0x80ec
1200#define AR_RFCNT 0x80f0
1201#define AR_RCCNT 0x80f4
1202#define AR_CCCNT 0x80f8
1203
1204#define AR_QUIET1 0x80fc
1205#define AR_QUIET1_NEXT_QUIET_S 0
1206#define AR_QUIET1_NEXT_QUIET_M 0x0000ffff
1207#define AR_QUIET1_QUIET_ENABLE 0x00010000
1208#define AR_QUIET1_QUIET_ACK_CTS_ENABLE 0x00020000
1209#define AR_QUIET2 0x8100
1210#define AR_QUIET2_QUIET_PERIOD_S 0
1211#define AR_QUIET2_QUIET_PERIOD_M 0x0000ffff
1212#define AR_QUIET2_QUIET_DUR_S 16
1213#define AR_QUIET2_QUIET_DUR 0xffff0000
1214
1215#define AR_TSF_PARM 0x8104
1216#define AR_TSF_INCREMENT_M 0x000000ff
1217#define AR_TSF_INCREMENT_S 0x00
1218
1219#define AR_QOS_NO_ACK 0x8108
1220#define AR_QOS_NO_ACK_TWO_BIT 0x0000000f
1221#define AR_QOS_NO_ACK_TWO_BIT_S 0
1222#define AR_QOS_NO_ACK_BIT_OFF 0x00000070
1223#define AR_QOS_NO_ACK_BIT_OFF_S 4
1224#define AR_QOS_NO_ACK_BYTE_OFF 0x00000180
1225#define AR_QOS_NO_ACK_BYTE_OFF_S 7
1226
1227#define AR_PHY_ERR 0x810c
1228
1229#define AR_PHY_ERR_DCHIRP 0x00000008
1230#define AR_PHY_ERR_RADAR 0x00000020
1231#define AR_PHY_ERR_OFDM_TIMING 0x00020000
1232#define AR_PHY_ERR_CCK_TIMING 0x02000000
1233
1234#define AR_RXFIFO_CFG 0x8114
1235
1236
1237#define AR_MIC_QOS_CONTROL 0x8118
1238#define AR_MIC_QOS_SELECT 0x811c
1239
1240#define AR_PCU_MISC 0x8120
1241#define AR_PCU_FORCE_BSSID_MATCH 0x00000001
1242#define AR_PCU_MIC_NEW_LOC_ENA 0x00000004
1243#define AR_PCU_TX_ADD_TSF 0x00000008
1244#define AR_PCU_CCK_SIFS_MODE 0x00000010
1245#define AR_PCU_RX_ANT_UPDT 0x00000800
1246#define AR_PCU_TXOP_TBTT_LIMIT_ENA 0x00001000
1247#define AR_PCU_MISS_BCN_IN_SLEEP 0x00004000
1248#define AR_PCU_BUG_12306_FIX_ENA 0x00020000
1249#define AR_PCU_FORCE_QUIET_COLL 0x00040000
1250#define AR_PCU_TBTT_PROTECT 0x00200000
1251#define AR_PCU_CLEAR_VMF 0x01000000
1252#define AR_PCU_CLEAR_BA_VALID 0x04000000
1253
1254
1255#define AR_FILT_OFDM 0x8124
1256#define AR_FILT_OFDM_COUNT 0x00FFFFFF
1257
1258#define AR_FILT_CCK 0x8128
1259#define AR_FILT_CCK_COUNT 0x00FFFFFF
1260
1261#define AR_PHY_ERR_1 0x812c
1262#define AR_PHY_ERR_1_COUNT 0x00FFFFFF
1263#define AR_PHY_ERR_MASK_1 0x8130
1264
1265#define AR_PHY_ERR_2 0x8134
1266#define AR_PHY_ERR_2_COUNT 0x00FFFFFF
1267#define AR_PHY_ERR_MASK_2 0x8138
1268
1269#define AR_PHY_COUNTMAX (3 << 22)
1270#define AR_MIBCNT_INTRMASK (3 << 22)
1271
1272#define AR_TSF_THRESHOLD 0x813c
1273#define AR_TSF_THRESHOLD_VAL 0x0000FFFF
1274
1275#define AR_PHY_ERR_EIFS_MASK 8144
1276
1277#define AR_PHY_ERR_3 0x8168
1278#define AR_PHY_ERR_3_COUNT 0x00FFFFFF
1279#define AR_PHY_ERR_MASK_3 0x816c
1280
1281#define AR_TXSIFS 0x81d0
1282#define AR_TXSIFS_TIME 0x000000FF
1283#define AR_TXSIFS_TX_LATENCY 0x00000F00
1284#define AR_TXSIFS_TX_LATENCY_S 8
1285#define AR_TXSIFS_ACK_SHIFT 0x00007000
1286#define AR_TXSIFS_ACK_SHIFT_S 12
1287
1288#define AR_TXOP_X 0x81ec
1289#define AR_TXOP_X_VAL 0x000000FF
1290
1291
1292#define AR_TXOP_0_3 0x81f0
1293#define AR_TXOP_4_7 0x81f4
1294#define AR_TXOP_8_11 0x81f8
1295#define AR_TXOP_12_15 0x81fc
1296
1297
1298#define AR_NEXT_TBTT_TIMER 0x8200
1299#define AR_NEXT_DMA_BEACON_ALERT 0x8204
1300#define AR_NEXT_SWBA 0x8208
1301#define AR_NEXT_CFP 0x8208
1302#define AR_NEXT_HCF 0x820C
1303#define AR_NEXT_TIM 0x8210
1304#define AR_NEXT_DTIM 0x8214
1305#define AR_NEXT_QUIET_TIMER 0x8218
1306#define AR_NEXT_NDP_TIMER 0x821C
1307
1308#define AR_BEACON_PERIOD 0x8220
1309#define AR_DMA_BEACON_PERIOD 0x8224
1310#define AR_SWBA_PERIOD 0x8228
1311#define AR_HCF_PERIOD 0x822C
1312#define AR_TIM_PERIOD 0x8230
1313#define AR_DTIM_PERIOD 0x8234
1314#define AR_QUIET_PERIOD 0x8238
1315#define AR_NDP_PERIOD 0x823C
1316
1317#define AR_TIMER_MODE 0x8240
1318#define AR_TBTT_TIMER_EN 0x00000001
1319#define AR_DBA_TIMER_EN 0x00000002
1320#define AR_SWBA_TIMER_EN 0x00000004
1321#define AR_HCF_TIMER_EN 0x00000008
1322#define AR_TIM_TIMER_EN 0x00000010
1323#define AR_DTIM_TIMER_EN 0x00000020
1324#define AR_QUIET_TIMER_EN 0x00000040
1325#define AR_NDP_TIMER_EN 0x00000080
1326#define AR_TIMER_OVERFLOW_INDEX 0x00000700
1327#define AR_TIMER_OVERFLOW_INDEX_S 8
1328#define AR_TIMER_THRESH 0xFFFFF000
1329#define AR_TIMER_THRESH_S 12
1330
1331#define AR_SLP32_MODE 0x8244
1332#define AR_SLP32_HALF_CLK_LATENCY 0x000FFFFF
1333#define AR_SLP32_ENA 0x00100000
1334#define AR_SLP32_TSF_WRITE_STATUS 0x00200000
1335
1336#define AR_SLP32_WAKE 0x8248
1337#define AR_SLP32_WAKE_XTL_TIME 0x0000FFFF
1338
1339#define AR_SLP32_INC 0x824c
1340#define AR_SLP32_TST_INC 0x000FFFFF
1341
1342#define AR_SLP_CNT 0x8250
1343#define AR_SLP_CYCLE_CNT 0x8254
1344
1345#define AR_SLP_MIB_CTRL 0x8258
1346#define AR_SLP_MIB_CLEAR 0x00000001
1347#define AR_SLP_MIB_PENDING 0x00000002
1348
1349#define AR_2040_MODE 0x8318
1350#define AR_2040_JOINED_RX_CLEAR 0x00000001
1351
1352
1353#define AR_EXTRCCNT 0x8328
1354
1355#define AR_SELFGEN_MASK 0x832c
1356
1357#define AR_PCU_TXBUF_CTRL 0x8340
1358#define AR_PCU_TXBUF_CTRL_SIZE_MASK 0x7FF
1359#define AR_PCU_TXBUF_CTRL_USABLE_SIZE 0x700
1360#define AR_9285_PCU_TXBUF_CTRL_USABLE_SIZE 0x380
1361
1362#define AR_KEYTABLE_0 0x8800
1363#define AR_KEYTABLE(_n) (AR_KEYTABLE_0 + ((_n)*32))
1364#define AR_KEY_CACHE_SIZE 128
1365#define AR_RSVD_KEYTABLE_ENTRIES 4
1366#define AR_KEY_TYPE 0x00000007
1367#define AR_KEYTABLE_TYPE_40 0x00000000
1368#define AR_KEYTABLE_TYPE_104 0x00000001
1369#define AR_KEYTABLE_TYPE_128 0x00000003
1370#define AR_KEYTABLE_TYPE_TKIP 0x00000004
1371#define AR_KEYTABLE_TYPE_AES 0x00000005
1372#define AR_KEYTABLE_TYPE_CCM 0x00000006
1373#define AR_KEYTABLE_TYPE_CLR 0x00000007
1374#define AR_KEYTABLE_ANT 0x00000008
1375#define AR_KEYTABLE_VALID 0x00008000
1376#define AR_KEYTABLE_KEY0(_n) (AR_KEYTABLE(_n) + 0)
1377#define AR_KEYTABLE_KEY1(_n) (AR_KEYTABLE(_n) + 4)
1378#define AR_KEYTABLE_KEY2(_n) (AR_KEYTABLE(_n) + 8)
1379#define AR_KEYTABLE_KEY3(_n) (AR_KEYTABLE(_n) + 12)
1380#define AR_KEYTABLE_KEY4(_n) (AR_KEYTABLE(_n) + 16)
1381#define AR_KEYTABLE_TYPE(_n) (AR_KEYTABLE(_n) + 20)
1382#define AR_KEYTABLE_MAC0(_n) (AR_KEYTABLE(_n) + 24)
1383#define AR_KEYTABLE_MAC1(_n) (AR_KEYTABLE(_n) + 28)
1384
1385#endif
diff --git a/drivers/net/wireless/ath9k/regd.c b/drivers/net/wireless/ath9k/regd.c
new file mode 100644
index 000000000000..62e28887ccd3
--- /dev/null
+++ b/drivers/net/wireless/ath9k/regd.c
@@ -0,0 +1,1026 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/kernel.h>
18#include <linux/slab.h>
19#include "core.h"
20#include "hw.h"
21#include "regd.h"
22#include "regd_common.h"
23
24static int ath9k_regd_chansort(const void *a, const void *b)
25{
26 const struct ath9k_channel *ca = a;
27 const struct ath9k_channel *cb = b;
28
29 return (ca->channel == cb->channel) ?
30 (ca->channelFlags & CHAN_FLAGS) -
31 (cb->channelFlags & CHAN_FLAGS) : ca->channel - cb->channel;
32}
33
34static void
35ath9k_regd_sort(void *a, u32 n, u32 size, ath_hal_cmp_t *cmp)
36{
37 u8 *aa = a;
38 u8 *ai, *t;
39
40 for (ai = aa + size; --n >= 1; ai += size)
41 for (t = ai; t > aa; t -= size) {
42 u8 *u = t - size;
43 if (cmp(u, t) <= 0)
44 break;
45 swap(u, t, size);
46 }
47}
48
49static u16 ath9k_regd_get_eepromRD(struct ath_hal *ah)
50{
51 return ah->ah_currentRD & ~WORLDWIDE_ROAMING_FLAG;
52}
53
54static bool ath9k_regd_is_chan_bm_zero(u64 *bitmask)
55{
56 int i;
57
58 for (i = 0; i < BMLEN; i++) {
59 if (bitmask[i] != 0)
60 return false;
61 }
62 return true;
63}
64
65static bool ath9k_regd_is_eeprom_valid(struct ath_hal *ah)
66{
67 u16 rd = ath9k_regd_get_eepromRD(ah);
68 int i;
69
70 if (rd & COUNTRY_ERD_FLAG) {
71 u16 cc = rd & ~COUNTRY_ERD_FLAG;
72 for (i = 0; i < ARRAY_SIZE(allCountries); i++)
73 if (allCountries[i].countryCode == cc)
74 return true;
75 } else {
76 for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++)
77 if (regDomainPairs[i].regDmnEnum == rd)
78 return true;
79 }
80 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
81 "%s: invalid regulatory domain/country code 0x%x\n",
82 __func__, rd);
83 return false;
84}
85
86static bool ath9k_regd_is_fcc_midband_supported(struct ath_hal *ah)
87{
88 u32 regcap;
89
90 regcap = ah->ah_caps.reg_cap;
91
92 if (regcap & AR_EEPROM_EEREGCAP_EN_FCC_MIDBAND)
93 return true;
94 else
95 return false;
96}
97
98static bool ath9k_regd_is_ccode_valid(struct ath_hal *ah,
99 u16 cc)
100{
101 u16 rd;
102 int i;
103
104 if (cc == CTRY_DEFAULT)
105 return true;
106 if (cc == CTRY_DEBUG)
107 return true;
108
109 rd = ath9k_regd_get_eepromRD(ah);
110 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, "%s: EEPROM regdomain 0x%x\n",
111 __func__, rd);
112
113 if (rd & COUNTRY_ERD_FLAG) {
114 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
115 "%s: EEPROM setting is country code %u\n",
116 __func__, rd & ~COUNTRY_ERD_FLAG);
117 return cc == (rd & ~COUNTRY_ERD_FLAG);
118 }
119
120 for (i = 0; i < ARRAY_SIZE(allCountries); i++) {
121 if (cc == allCountries[i].countryCode) {
122#ifdef AH_SUPPORT_11D
123 if ((rd & WORLD_SKU_MASK) == WORLD_SKU_PREFIX)
124 return true;
125#endif
126 if (allCountries[i].regDmnEnum == rd ||
127 rd == DEBUG_REG_DMN || rd == NO_ENUMRD)
128 return true;
129 }
130 }
131 return false;
132}
133
134static void
135ath9k_regd_get_wmodes_nreg(struct ath_hal *ah,
136 struct country_code_to_enum_rd *country,
137 struct regDomain *rd5GHz,
138 unsigned long *modes_allowed)
139{
140 bitmap_copy(modes_allowed, ah->ah_caps.wireless_modes, ATH9K_MODE_MAX);
141
142 if (test_bit(ATH9K_MODE_11G, ah->ah_caps.wireless_modes) &&
143 (!country->allow11g))
144 clear_bit(ATH9K_MODE_11G, modes_allowed);
145
146 if (test_bit(ATH9K_MODE_11A, ah->ah_caps.wireless_modes) &&
147 (ath9k_regd_is_chan_bm_zero(rd5GHz->chan11a)))
148 clear_bit(ATH9K_MODE_11A, modes_allowed);
149
150 if (test_bit(ATH9K_MODE_11NG_HT20, ah->ah_caps.wireless_modes)
151 && (!country->allow11ng20))
152 clear_bit(ATH9K_MODE_11NG_HT20, modes_allowed);
153
154 if (test_bit(ATH9K_MODE_11NA_HT20, ah->ah_caps.wireless_modes)
155 && (!country->allow11na20))
156 clear_bit(ATH9K_MODE_11NA_HT20, modes_allowed);
157
158 if (test_bit(ATH9K_MODE_11NG_HT40PLUS, ah->ah_caps.wireless_modes) &&
159 (!country->allow11ng40))
160 clear_bit(ATH9K_MODE_11NG_HT40PLUS, modes_allowed);
161
162 if (test_bit(ATH9K_MODE_11NG_HT40MINUS, ah->ah_caps.wireless_modes) &&
163 (!country->allow11ng40))
164 clear_bit(ATH9K_MODE_11NG_HT40MINUS, modes_allowed);
165
166 if (test_bit(ATH9K_MODE_11NA_HT40PLUS, ah->ah_caps.wireless_modes) &&
167 (!country->allow11na40))
168 clear_bit(ATH9K_MODE_11NA_HT40PLUS, modes_allowed);
169
170 if (test_bit(ATH9K_MODE_11NA_HT40MINUS, ah->ah_caps.wireless_modes) &&
171 (!country->allow11na40))
172 clear_bit(ATH9K_MODE_11NA_HT40MINUS, modes_allowed);
173}
174
175bool ath9k_regd_is_public_safety_sku(struct ath_hal *ah)
176{
177 u16 rd;
178
179 rd = ath9k_regd_get_eepromRD(ah);
180
181 switch (rd) {
182 case FCC4_FCCA:
183 case (CTRY_UNITED_STATES_FCC49 | COUNTRY_ERD_FLAG):
184 return true;
185 case DEBUG_REG_DMN:
186 case NO_ENUMRD:
187 if (ah->ah_countryCode == CTRY_UNITED_STATES_FCC49)
188 return true;
189 break;
190 }
191 return false;
192}
193
194static struct country_code_to_enum_rd*
195ath9k_regd_find_country(u16 countryCode)
196{
197 int i;
198
199 for (i = 0; i < ARRAY_SIZE(allCountries); i++) {
200 if (allCountries[i].countryCode == countryCode)
201 return &allCountries[i];
202 }
203 return NULL;
204}
205
206static u16 ath9k_regd_get_default_country(struct ath_hal *ah)
207{
208 u16 rd;
209 int i;
210
211 rd = ath9k_regd_get_eepromRD(ah);
212 if (rd & COUNTRY_ERD_FLAG) {
213 struct country_code_to_enum_rd *country = NULL;
214 u16 cc = rd & ~COUNTRY_ERD_FLAG;
215
216 country = ath9k_regd_find_country(cc);
217 if (country != NULL)
218 return cc;
219 }
220
221 for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++)
222 if (regDomainPairs[i].regDmnEnum == rd) {
223 if (regDomainPairs[i].singleCC != 0)
224 return regDomainPairs[i].singleCC;
225 else
226 i = ARRAY_SIZE(regDomainPairs);
227 }
228 return CTRY_DEFAULT;
229}
230
231static bool ath9k_regd_is_valid_reg_domain(int regDmn,
232 struct regDomain *rd)
233{
234 int i;
235
236 for (i = 0; i < ARRAY_SIZE(regDomains); i++) {
237 if (regDomains[i].regDmnEnum == regDmn) {
238 if (rd != NULL) {
239 memcpy(rd, &regDomains[i],
240 sizeof(struct regDomain));
241 }
242 return true;
243 }
244 }
245 return false;
246}
247
248static bool ath9k_regd_is_valid_reg_domainPair(int regDmnPair)
249{
250 int i;
251
252 if (regDmnPair == NO_ENUMRD)
253 return false;
254 for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++) {
255 if (regDomainPairs[i].regDmnEnum == regDmnPair)
256 return true;
257 }
258 return false;
259}
260
261static bool
262ath9k_regd_get_wmode_regdomain(struct ath_hal *ah, int regDmn,
263 u16 channelFlag, struct regDomain *rd)
264{
265 int i, found;
266 u64 flags = NO_REQ;
267 struct reg_dmn_pair_mapping *regPair = NULL;
268 int regOrg;
269
270 regOrg = regDmn;
271 if (regDmn == CTRY_DEFAULT) {
272 u16 rdnum;
273 rdnum = ath9k_regd_get_eepromRD(ah);
274
275 if (!(rdnum & COUNTRY_ERD_FLAG)) {
276 if (ath9k_regd_is_valid_reg_domain(rdnum, NULL) ||
277 ath9k_regd_is_valid_reg_domainPair(rdnum)) {
278 regDmn = rdnum;
279 }
280 }
281 }
282
283 if ((regDmn & MULTI_DOMAIN_MASK) == 0) {
284 for (i = 0, found = 0;
285 (i < ARRAY_SIZE(regDomainPairs)) && (!found); i++) {
286 if (regDomainPairs[i].regDmnEnum == regDmn) {
287 regPair = &regDomainPairs[i];
288 found = 1;
289 }
290 }
291 if (!found) {
292 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
293 "%s: Failed to find reg domain pair %u\n",
294 __func__, regDmn);
295 return false;
296 }
297 if (!(channelFlag & CHANNEL_2GHZ)) {
298 regDmn = regPair->regDmn5GHz;
299 flags = regPair->flags5GHz;
300 }
301 if (channelFlag & CHANNEL_2GHZ) {
302 regDmn = regPair->regDmn2GHz;
303 flags = regPair->flags2GHz;
304 }
305 }
306
307 found = ath9k_regd_is_valid_reg_domain(regDmn, rd);
308 if (!found) {
309 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
310 "%s: Failed to find unitary reg domain %u\n",
311 __func__, regDmn);
312 return false;
313 } else {
314 rd->pscan &= regPair->pscanMask;
315 if (((regOrg & MULTI_DOMAIN_MASK) == 0) &&
316 (flags != NO_REQ)) {
317 rd->flags = flags;
318 }
319
320 rd->flags &= (channelFlag & CHANNEL_2GHZ) ?
321 REG_DOMAIN_2GHZ_MASK : REG_DOMAIN_5GHZ_MASK;
322 return true;
323 }
324}
325
326static bool ath9k_regd_is_bit_set(int bit, u64 *bitmask)
327{
328 int byteOffset, bitnum;
329 u64 val;
330
331 byteOffset = bit / 64;
332 bitnum = bit - byteOffset * 64;
333 val = ((u64) 1) << bitnum;
334 if (bitmask[byteOffset] & val)
335 return true;
336 else
337 return false;
338}
339
340static void
341ath9k_regd_add_reg_classid(u8 *regclassids, u32 maxregids,
342 u32 *nregids, u8 regclassid)
343{
344 int i;
345
346 if (regclassid == 0)
347 return;
348
349 for (i = 0; i < maxregids; i++) {
350 if (regclassids[i] == regclassid)
351 return;
352 if (regclassids[i] == 0)
353 break;
354 }
355
356 if (i == maxregids)
357 return;
358 else {
359 regclassids[i] = regclassid;
360 *nregids += 1;
361 }
362
363 return;
364}
365
366static bool
367ath9k_regd_get_eeprom_reg_ext_bits(struct ath_hal *ah,
368 enum reg_ext_bitmap bit)
369{
370 return (ah->ah_currentRDExt & (1 << bit)) ? true : false;
371}
372
373#ifdef ATH_NF_PER_CHAN
374
375static void ath9k_regd_init_rf_buffer(struct ath9k_channel *ichans,
376 int nchans)
377{
378 int i, j, next;
379
380 for (next = 0; next < nchans; next++) {
381 for (i = 0; i < NUM_NF_READINGS; i++) {
382 ichans[next].nfCalHist[i].currIndex = 0;
383 ichans[next].nfCalHist[i].privNF =
384 AR_PHY_CCA_MAX_GOOD_VALUE;
385 ichans[next].nfCalHist[i].invalidNFcount =
386 AR_PHY_CCA_FILTERWINDOW_LENGTH;
387 for (j = 0; j < ATH9K_NF_CAL_HIST_MAX; j++) {
388 ichans[next].nfCalHist[i].nfCalBuffer[j] =
389 AR_PHY_CCA_MAX_GOOD_VALUE;
390 }
391 }
392 }
393}
394#endif
395
396static int ath9k_regd_is_chan_present(struct ath_hal *ah,
397 u16 c)
398{
399 int i;
400
401 for (i = 0; i < 150; i++) {
402 if (!ah->ah_channels[i].channel)
403 return -1;
404 else if (ah->ah_channels[i].channel == c)
405 return i;
406 }
407
408 return -1;
409}
410
411static bool
412ath9k_regd_add_channel(struct ath_hal *ah,
413 u16 c,
414 u16 c_lo,
415 u16 c_hi,
416 u16 maxChan,
417 u8 ctl,
418 int pos,
419 struct regDomain rd5GHz,
420 struct RegDmnFreqBand *fband,
421 struct regDomain *rd,
422 const struct cmode *cm,
423 struct ath9k_channel *ichans,
424 bool enableExtendedChannels)
425{
426 struct ath9k_channel *chan;
427 int ret;
428 u32 channelFlags = 0;
429 u8 privFlags = 0;
430
431 if (!(c_lo <= c && c <= c_hi)) {
432 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
433 "%s: c %u out of range [%u..%u]\n",
434 __func__, c, c_lo, c_hi);
435 return false;
436 }
437 if ((fband->channelBW == CHANNEL_HALF_BW) &&
438 !(ah->ah_caps.hw_caps & ATH9K_HW_CAP_CHAN_HALFRATE)) {
439 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
440 "%s: Skipping %u half rate channel\n",
441 __func__, c);
442 return false;
443 }
444
445 if ((fband->channelBW == CHANNEL_QUARTER_BW) &&
446 !(ah->ah_caps.hw_caps & ATH9K_HW_CAP_CHAN_QUARTERRATE)) {
447 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
448 "%s: Skipping %u quarter rate channel\n",
449 __func__, c);
450 return false;
451 }
452
453 if (((c + fband->channelSep) / 2) > (maxChan + HALF_MAXCHANBW)) {
454 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
455 "%s: c %u > maxChan %u\n",
456 __func__, c, maxChan);
457 return false;
458 }
459
460 if ((fband->usePassScan & IS_ECM_CHAN) && !enableExtendedChannels) {
461 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
462 "Skipping ecm channel\n");
463 return false;
464 }
465
466 if ((rd->flags & NO_HOSTAP) && (ah->ah_opmode == ATH9K_M_HOSTAP)) {
467 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
468 "Skipping HOSTAP channel\n");
469 return false;
470 }
471
472 if (IS_HT40_MODE(cm->mode) &&
473 !(ath9k_regd_get_eeprom_reg_ext_bits(ah, REG_EXT_FCC_DFS_HT40)) &&
474 (fband->useDfs) &&
475 (rd->conformanceTestLimit != MKK)) {
476 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
477 "Skipping HT40 channel (en_fcc_dfs_ht40 = 0)\n");
478 return false;
479 }
480
481 if (IS_HT40_MODE(cm->mode) &&
482 !(ath9k_regd_get_eeprom_reg_ext_bits(ah,
483 REG_EXT_JAPAN_NONDFS_HT40)) &&
484 !(fband->useDfs) && (rd->conformanceTestLimit == MKK)) {
485 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
486 "Skipping HT40 channel (en_jap_ht40 = 0)\n");
487 return false;
488 }
489
490 if (IS_HT40_MODE(cm->mode) &&
491 !(ath9k_regd_get_eeprom_reg_ext_bits(ah, REG_EXT_JAPAN_DFS_HT40)) &&
492 (fband->useDfs) &&
493 (rd->conformanceTestLimit == MKK)) {
494 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
495 "Skipping HT40 channel (en_jap_dfs_ht40 = 0)\n");
496 return false;
497 }
498
499 /* Calculate channel flags */
500
501 channelFlags = cm->flags;
502
503 switch (fband->channelBW) {
504 case CHANNEL_HALF_BW:
505 channelFlags |= CHANNEL_HALF;
506 break;
507 case CHANNEL_QUARTER_BW:
508 channelFlags |= CHANNEL_QUARTER;
509 break;
510 }
511
512 if (fband->usePassScan & rd->pscan)
513 channelFlags |= CHANNEL_PASSIVE;
514 else
515 channelFlags &= ~CHANNEL_PASSIVE;
516 if (fband->useDfs & rd->dfsMask)
517 privFlags = CHANNEL_DFS;
518 else
519 privFlags = 0;
520 if (rd->flags & LIMIT_FRAME_4MS)
521 privFlags |= CHANNEL_4MS_LIMIT;
522 if (privFlags & CHANNEL_DFS)
523 privFlags |= CHANNEL_DISALLOW_ADHOC;
524 if (rd->flags & ADHOC_PER_11D)
525 privFlags |= CHANNEL_PER_11D_ADHOC;
526
527 if (channelFlags & CHANNEL_PASSIVE) {
528 if ((c < 2412) || (c > 2462)) {
529 if (rd5GHz.regDmnEnum == MKK1 ||
530 rd5GHz.regDmnEnum == MKK2) {
531 u32 regcap = ah->ah_caps.reg_cap;
532 if (!(regcap &
533 (AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN |
534 AR_EEPROM_EEREGCAP_EN_KK_U2 |
535 AR_EEPROM_EEREGCAP_EN_KK_MIDBAND)) &&
536 isUNII1OddChan(c)) {
537 channelFlags &= ~CHANNEL_PASSIVE;
538 } else {
539 privFlags |= CHANNEL_DISALLOW_ADHOC;
540 }
541 } else {
542 privFlags |= CHANNEL_DISALLOW_ADHOC;
543 }
544 }
545 }
546
547 if ((cm->mode == ATH9K_MODE_11A) ||
548 (cm->mode == ATH9K_MODE_11NA_HT20) ||
549 (cm->mode == ATH9K_MODE_11NA_HT40PLUS) ||
550 (cm->mode == ATH9K_MODE_11NA_HT40MINUS)) {
551 if (rd->flags & (ADHOC_NO_11A | DISALLOW_ADHOC_11A))
552 privFlags |= CHANNEL_DISALLOW_ADHOC;
553 }
554
555 /* Fill in channel details */
556
557 ret = ath9k_regd_is_chan_present(ah, c);
558 if (ret == -1) {
559 chan = &ah->ah_channels[pos];
560 chan->channel = c;
561 chan->maxRegTxPower = fband->powerDfs;
562 chan->antennaMax = fband->antennaMax;
563 chan->regDmnFlags = rd->flags;
564 chan->maxTxPower = AR5416_MAX_RATE_POWER;
565 chan->minTxPower = AR5416_MAX_RATE_POWER;
566 chan->channelFlags = channelFlags;
567 chan->privFlags = privFlags;
568 } else {
569 chan = &ah->ah_channels[ret];
570 chan->channelFlags |= channelFlags;
571 chan->privFlags |= privFlags;
572 }
573
574 /* Set CTLs */
575
576 if ((cm->flags & CHANNEL_ALL) == CHANNEL_A)
577 chan->conformanceTestLimit[0] = ctl;
578 else if ((cm->flags & CHANNEL_ALL) == CHANNEL_B)
579 chan->conformanceTestLimit[1] = ctl;
580 else if ((cm->flags & CHANNEL_ALL) == CHANNEL_G)
581 chan->conformanceTestLimit[2] = ctl;
582
583 return (ret == -1) ? true : false;
584}
585
586static bool ath9k_regd_japan_check(struct ath_hal *ah,
587 int b,
588 struct regDomain *rd5GHz)
589{
590 bool skipband = false;
591 int i;
592 u32 regcap;
593
594 for (i = 0; i < ARRAY_SIZE(j_bandcheck); i++) {
595 if (j_bandcheck[i].freqbandbit == b) {
596 regcap = ah->ah_caps.reg_cap;
597 if ((j_bandcheck[i].eepromflagtocheck & regcap) == 0) {
598 skipband = true;
599 } else if ((regcap & AR_EEPROM_EEREGCAP_EN_KK_U2) ||
600 (regcap & AR_EEPROM_EEREGCAP_EN_KK_MIDBAND)) {
601 rd5GHz->dfsMask |= DFS_MKK4;
602 rd5GHz->pscan |= PSCAN_MKK3;
603 }
604 break;
605 }
606 }
607
608 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
609 "%s: Skipping %d freq band\n",
610 __func__, j_bandcheck[i].freqbandbit);
611
612 return skipband;
613}
614
615bool
616ath9k_regd_init_channels(struct ath_hal *ah,
617 u32 maxchans,
618 u32 *nchans, u8 *regclassids,
619 u32 maxregids, u32 *nregids, u16 cc,
620 bool enableOutdoor,
621 bool enableExtendedChannels)
622{
623 u16 maxChan = 7000;
624 struct country_code_to_enum_rd *country = NULL;
625 struct regDomain rd5GHz, rd2GHz;
626 const struct cmode *cm;
627 struct ath9k_channel *ichans = &ah->ah_channels[0];
628 int next = 0, b;
629 u8 ctl;
630 int regdmn;
631 u16 chanSep;
632 unsigned long *modes_avail;
633 DECLARE_BITMAP(modes_allowed, ATH9K_MODE_MAX);
634
635 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, "%s: cc %u %s %s\n",
636 __func__, cc,
637 enableOutdoor ? "Enable outdoor" : "",
638 enableExtendedChannels ? "Enable ecm" : "");
639
640 if (!ath9k_regd_is_ccode_valid(ah, cc)) {
641 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
642 "%s: invalid country code %d\n", __func__, cc);
643 return false;
644 }
645
646 if (!ath9k_regd_is_eeprom_valid(ah)) {
647 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
648 "%s: invalid EEPROM contents\n", __func__);
649 return false;
650 }
651
652 ah->ah_countryCode = ath9k_regd_get_default_country(ah);
653
654 if (ah->ah_countryCode == CTRY_DEFAULT) {
655 ah->ah_countryCode = cc & COUNTRY_CODE_MASK;
656 if ((ah->ah_countryCode == CTRY_DEFAULT) &&
657 (ath9k_regd_get_eepromRD(ah) == CTRY_DEFAULT)) {
658 ah->ah_countryCode = CTRY_UNITED_STATES;
659 }
660 }
661
662#ifdef AH_SUPPORT_11D
663 if (ah->ah_countryCode == CTRY_DEFAULT) {
664 regdmn = ath9k_regd_get_eepromRD(ah);
665 country = NULL;
666 } else {
667#endif
668 country = ath9k_regd_find_country(ah->ah_countryCode);
669 if (country == NULL) {
670 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
671 "Country is NULL!!!!, cc= %d\n",
672 ah->ah_countryCode);
673 return false;
674 } else {
675 regdmn = country->regDmnEnum;
676#ifdef AH_SUPPORT_11D
677 if (((ath9k_regd_get_eepromRD(ah) &
678 WORLD_SKU_MASK) == WORLD_SKU_PREFIX) &&
679 (cc == CTRY_UNITED_STATES)) {
680 if (!isWwrSKU_NoMidband(ah)
681 && ath9k_regd_is_fcc_midband_supported(ah))
682 regdmn = FCC3_FCCA;
683 else
684 regdmn = FCC1_FCCA;
685 }
686#endif
687 }
688#ifdef AH_SUPPORT_11D
689 }
690#endif
691 if (!ath9k_regd_get_wmode_regdomain(ah,
692 regdmn,
693 ~CHANNEL_2GHZ,
694 &rd5GHz)) {
695 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
696 "%s: couldn't find unitary "
697 "5GHz reg domain for country %u\n",
698 __func__, ah->ah_countryCode);
699 return false;
700 }
701 if (!ath9k_regd_get_wmode_regdomain(ah,
702 regdmn,
703 CHANNEL_2GHZ,
704 &rd2GHz)) {
705 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
706 "%s: couldn't find unitary 2GHz "
707 "reg domain for country %u\n",
708 __func__, ah->ah_countryCode);
709 return false;
710 }
711
712 if (!isWwrSKU(ah) && ((rd5GHz.regDmnEnum == FCC1) ||
713 (rd5GHz.regDmnEnum == FCC2))) {
714 if (ath9k_regd_is_fcc_midband_supported(ah)) {
715 if (!ath9k_regd_get_wmode_regdomain(ah,
716 FCC3_FCCA,
717 ~CHANNEL_2GHZ,
718 &rd5GHz)) {
719 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
720 "%s: couldn't find unitary 5GHz "
721 "reg domain for country %u\n",
722 __func__, ah->ah_countryCode);
723 return false;
724 }
725 }
726 }
727
728 if (country == NULL) {
729 modes_avail = ah->ah_caps.wireless_modes;
730 } else {
731 ath9k_regd_get_wmodes_nreg(ah, country, &rd5GHz, modes_allowed);
732 modes_avail = modes_allowed;
733
734 if (!enableOutdoor)
735 maxChan = country->outdoorChanStart;
736 }
737
738 next = 0;
739
740 if (maxchans > ARRAY_SIZE(ah->ah_channels))
741 maxchans = ARRAY_SIZE(ah->ah_channels);
742
743 for (cm = modes; cm < &modes[ARRAY_SIZE(modes)]; cm++) {
744 u16 c, c_hi, c_lo;
745 u64 *channelBM = NULL;
746 struct regDomain *rd = NULL;
747 struct RegDmnFreqBand *fband = NULL, *freqs;
748 int8_t low_adj = 0, hi_adj = 0;
749
750 if (!test_bit(cm->mode, modes_avail)) {
751 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
752 "%s: !avail mode %d flags 0x%x\n",
753 __func__, cm->mode, cm->flags);
754 continue;
755 }
756 if (!ath9k_get_channel_edges(ah, cm->flags, &c_lo, &c_hi)) {
757 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
758 "%s: channels 0x%x not supported "
759 "by hardware\n",
760 __func__, cm->flags);
761 continue;
762 }
763
764 switch (cm->mode) {
765 case ATH9K_MODE_11A:
766 case ATH9K_MODE_11NA_HT20:
767 case ATH9K_MODE_11NA_HT40PLUS:
768 case ATH9K_MODE_11NA_HT40MINUS:
769 rd = &rd5GHz;
770 channelBM = rd->chan11a;
771 freqs = &regDmn5GhzFreq[0];
772 ctl = rd->conformanceTestLimit;
773 break;
774 case ATH9K_MODE_11B:
775 rd = &rd2GHz;
776 channelBM = rd->chan11b;
777 freqs = &regDmn2GhzFreq[0];
778 ctl = rd->conformanceTestLimit | CTL_11B;
779 break;
780 case ATH9K_MODE_11G:
781 case ATH9K_MODE_11NG_HT20:
782 case ATH9K_MODE_11NG_HT40PLUS:
783 case ATH9K_MODE_11NG_HT40MINUS:
784 rd = &rd2GHz;
785 channelBM = rd->chan11g;
786 freqs = &regDmn2Ghz11gFreq[0];
787 ctl = rd->conformanceTestLimit | CTL_11G;
788 break;
789 default:
790 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
791 "%s: Unknown HAL mode 0x%x\n", __func__,
792 cm->mode);
793 continue;
794 }
795
796 if (ath9k_regd_is_chan_bm_zero(channelBM))
797 continue;
798
799 if ((cm->mode == ATH9K_MODE_11NA_HT40PLUS) ||
800 (cm->mode == ATH9K_MODE_11NG_HT40PLUS)) {
801 hi_adj = -20;
802 }
803
804 if ((cm->mode == ATH9K_MODE_11NA_HT40MINUS) ||
805 (cm->mode == ATH9K_MODE_11NG_HT40MINUS)) {
806 low_adj = 20;
807 }
808
809 /* XXX: Add a helper here instead */
810 for (b = 0; b < 64 * BMLEN; b++) {
811 if (ath9k_regd_is_bit_set(b, channelBM)) {
812 fband = &freqs[b];
813 if (rd5GHz.regDmnEnum == MKK1
814 || rd5GHz.regDmnEnum == MKK2) {
815 if (ath9k_regd_japan_check(ah,
816 b,
817 &rd5GHz))
818 continue;
819 }
820
821 ath9k_regd_add_reg_classid(regclassids,
822 maxregids,
823 nregids,
824 fband->
825 regClassId);
826
827 if (IS_HT40_MODE(cm->mode) && (rd == &rd5GHz)) {
828 chanSep = 40;
829 if (fband->lowChannel == 5280)
830 low_adj += 20;
831
832 if (fband->lowChannel == 5170)
833 continue;
834 } else
835 chanSep = fband->channelSep;
836
837 for (c = fband->lowChannel + low_adj;
838 ((c <= (fband->highChannel + hi_adj)) &&
839 (c >= (fband->lowChannel + low_adj)));
840 c += chanSep) {
841 if (next >= maxchans) {
842 DPRINTF(ah->ah_sc,
843 ATH_DBG_REGULATORY,
844 "%s: too many channels "
845 "for channel table\n",
846 __func__);
847 goto done;
848 }
849 if (ath9k_regd_add_channel(ah,
850 c, c_lo, c_hi,
851 maxChan, ctl,
852 next,
853 rd5GHz,
854 fband, rd, cm,
855 ichans,
856 enableExtendedChannels))
857 next++;
858 }
859 if (IS_HT40_MODE(cm->mode) &&
860 (fband->lowChannel == 5280)) {
861 low_adj -= 20;
862 }
863 }
864 }
865 }
866done:
867 if (next != 0) {
868 int i;
869
870 if (next > ARRAY_SIZE(ah->ah_channels)) {
871 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
872 "%s: too many channels %u; truncating to %u\n",
873 __func__, next,
874 (int) ARRAY_SIZE(ah->ah_channels));
875 next = ARRAY_SIZE(ah->ah_channels);
876 }
877#ifdef ATH_NF_PER_CHAN
878 ath9k_regd_init_rf_buffer(ichans, next);
879#endif
880 ath9k_regd_sort(ichans, next,
881 sizeof(struct ath9k_channel),
882 ath9k_regd_chansort);
883
884 ah->ah_nchan = next;
885
886 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, "Channel list:\n");
887 for (i = 0; i < next; i++) {
888 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
889 "chan: %d flags: 0x%x\n",
890 ah->ah_channels[i].channel,
891 ah->ah_channels[i].channelFlags);
892 }
893 }
894 *nchans = next;
895
896 ah->ah_countryCode = ah->ah_countryCode;
897
898 ah->ah_currentRDInUse = regdmn;
899 ah->ah_currentRD5G = rd5GHz.regDmnEnum;
900 ah->ah_currentRD2G = rd2GHz.regDmnEnum;
901 if (country == NULL) {
902 ah->ah_iso[0] = 0;
903 ah->ah_iso[1] = 0;
904 } else {
905 ah->ah_iso[0] = country->isoName[0];
906 ah->ah_iso[1] = country->isoName[1];
907 }
908
909 return next != 0;
910}
911
912struct ath9k_channel*
913ath9k_regd_check_channel(struct ath_hal *ah,
914 const struct ath9k_channel *c)
915{
916 struct ath9k_channel *base, *cc;
917
918 int flags = c->channelFlags & CHAN_FLAGS;
919 int n, lim;
920
921 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
922 "%s: channel %u/0x%x (0x%x) requested\n", __func__,
923 c->channel, c->channelFlags, flags);
924
925 cc = ah->ah_curchan;
926 if (cc != NULL && cc->channel == c->channel &&
927 (cc->channelFlags & CHAN_FLAGS) == flags) {
928 if ((cc->privFlags & CHANNEL_INTERFERENCE) &&
929 (cc->privFlags & CHANNEL_DFS))
930 return NULL;
931 else
932 return cc;
933 }
934
935 base = ah->ah_channels;
936 n = ah->ah_nchan;
937
938 for (lim = n; lim != 0; lim >>= 1) {
939 int d;
940 cc = &base[lim >> 1];
941 d = c->channel - cc->channel;
942 if (d == 0) {
943 if ((cc->channelFlags & CHAN_FLAGS) == flags) {
944 if ((cc->privFlags & CHANNEL_INTERFERENCE) &&
945 (cc->privFlags & CHANNEL_DFS))
946 return NULL;
947 else
948 return cc;
949 }
950 d = flags - (cc->channelFlags & CHAN_FLAGS);
951 }
952 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
953 "%s: channel %u/0x%x d %d\n", __func__,
954 cc->channel, cc->channelFlags, d);
955 if (d > 0) {
956 base = cc + 1;
957 lim--;
958 }
959 }
960 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, "%s: no match for %u/0x%x\n",
961 __func__, c->channel, c->channelFlags);
962 return NULL;
963}
964
965u32
966ath9k_regd_get_antenna_allowed(struct ath_hal *ah,
967 struct ath9k_channel *chan)
968{
969 struct ath9k_channel *ichan = NULL;
970
971 ichan = ath9k_regd_check_channel(ah, chan);
972 if (!ichan)
973 return 0;
974
975 return ichan->antennaMax;
976}
977
978u32 ath9k_regd_get_ctl(struct ath_hal *ah, struct ath9k_channel *chan)
979{
980 u32 ctl = NO_CTL;
981 struct ath9k_channel *ichan;
982
983 if (ah->ah_countryCode == CTRY_DEFAULT && isWwrSKU(ah)) {
984 if (IS_CHAN_B(chan))
985 ctl = SD_NO_CTL | CTL_11B;
986 else if (IS_CHAN_G(chan))
987 ctl = SD_NO_CTL | CTL_11G;
988 else
989 ctl = SD_NO_CTL | CTL_11A;
990 } else {
991 ichan = ath9k_regd_check_channel(ah, chan);
992 if (ichan != NULL) {
993 /* FIXME */
994 if (IS_CHAN_A(ichan))
995 ctl = ichan->conformanceTestLimit[0];
996 else if (IS_CHAN_B(ichan))
997 ctl = ichan->conformanceTestLimit[1];
998 else if (IS_CHAN_G(ichan))
999 ctl = ichan->conformanceTestLimit[2];
1000
1001 if (IS_CHAN_G(chan) && (ctl & 0xf) == CTL_11B)
1002 ctl = (ctl & ~0xf) | CTL_11G;
1003 }
1004 }
1005 return ctl;
1006}
1007
1008void ath9k_regd_get_current_country(struct ath_hal *ah,
1009 struct ath9k_country_entry *ctry)
1010{
1011 u16 rd = ath9k_regd_get_eepromRD(ah);
1012
1013 ctry->isMultidomain = false;
1014 if (rd == CTRY_DEFAULT)
1015 ctry->isMultidomain = true;
1016 else if (!(rd & COUNTRY_ERD_FLAG))
1017 ctry->isMultidomain = isWwrSKU(ah);
1018
1019 ctry->countryCode = ah->ah_countryCode;
1020 ctry->regDmnEnum = ah->ah_currentRD;
1021 ctry->regDmn5G = ah->ah_currentRD5G;
1022 ctry->regDmn2G = ah->ah_currentRD2G;
1023 ctry->iso[0] = ah->ah_iso[0];
1024 ctry->iso[1] = ah->ah_iso[1];
1025 ctry->iso[2] = ah->ah_iso[2];
1026}
diff --git a/drivers/net/wireless/ath9k/regd.h b/drivers/net/wireless/ath9k/regd.h
new file mode 100644
index 000000000000..0ecd344fbd98
--- /dev/null
+++ b/drivers/net/wireless/ath9k/regd.h
@@ -0,0 +1,412 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef REGD_H
18#define REGD_H
19
20#include "ath9k.h"
21
22#define BMLEN 2
23#define BMZERO {(u64) 0, (u64) 0}
24
25#define BM(_fa, _fb, _fc, _fd, _fe, _ff, _fg, _fh, _fi, _fj, _fk, _fl) \
26 {((((_fa >= 0) && (_fa < 64)) ? \
27 (((u64) 1) << _fa) : (u64) 0) | \
28 (((_fb >= 0) && (_fb < 64)) ? \
29 (((u64) 1) << _fb) : (u64) 0) | \
30 (((_fc >= 0) && (_fc < 64)) ? \
31 (((u64) 1) << _fc) : (u64) 0) | \
32 (((_fd >= 0) && (_fd < 64)) ? \
33 (((u64) 1) << _fd) : (u64) 0) | \
34 (((_fe >= 0) && (_fe < 64)) ? \
35 (((u64) 1) << _fe) : (u64) 0) | \
36 (((_ff >= 0) && (_ff < 64)) ? \
37 (((u64) 1) << _ff) : (u64) 0) | \
38 (((_fg >= 0) && (_fg < 64)) ? \
39 (((u64) 1) << _fg) : (u64) 0) | \
40 (((_fh >= 0) && (_fh < 64)) ? \
41 (((u64) 1) << _fh) : (u64) 0) | \
42 (((_fi >= 0) && (_fi < 64)) ? \
43 (((u64) 1) << _fi) : (u64) 0) | \
44 (((_fj >= 0) && (_fj < 64)) ? \
45 (((u64) 1) << _fj) : (u64) 0) | \
46 (((_fk >= 0) && (_fk < 64)) ? \
47 (((u64) 1) << _fk) : (u64) 0) | \
48 (((_fl >= 0) && (_fl < 64)) ? \
49 (((u64) 1) << _fl) : (u64) 0) | \
50 ((((_fa > 63) && (_fa < 128)) ? \
51 (((u64) 1) << (_fa - 64)) : (u64) 0) | \
52 (((_fb > 63) && (_fb < 128)) ? \
53 (((u64) 1) << (_fb - 64)) : (u64) 0) | \
54 (((_fc > 63) && (_fc < 128)) ? \
55 (((u64) 1) << (_fc - 64)) : (u64) 0) | \
56 (((_fd > 63) && (_fd < 128)) ? \
57 (((u64) 1) << (_fd - 64)) : (u64) 0) | \
58 (((_fe > 63) && (_fe < 128)) ? \
59 (((u64) 1) << (_fe - 64)) : (u64) 0) | \
60 (((_ff > 63) && (_ff < 128)) ? \
61 (((u64) 1) << (_ff - 64)) : (u64) 0) | \
62 (((_fg > 63) && (_fg < 128)) ? \
63 (((u64) 1) << (_fg - 64)) : (u64) 0) | \
64 (((_fh > 63) && (_fh < 128)) ? \
65 (((u64) 1) << (_fh - 64)) : (u64) 0) | \
66 (((_fi > 63) && (_fi < 128)) ? \
67 (((u64) 1) << (_fi - 64)) : (u64) 0) | \
68 (((_fj > 63) && (_fj < 128)) ? \
69 (((u64) 1) << (_fj - 64)) : (u64) 0) | \
70 (((_fk > 63) && (_fk < 128)) ? \
71 (((u64) 1) << (_fk - 64)) : (u64) 0) | \
72 (((_fl > 63) && (_fl < 128)) ? \
73 (((u64) 1) << (_fl - 64)) : (u64) 0)))}
74
75#define DEF_REGDMN FCC1_FCCA
76#define DEF_DMN_5 FCC1
77#define DEF_DMN_2 FCCA
78#define COUNTRY_ERD_FLAG 0x8000
79#define WORLDWIDE_ROAMING_FLAG 0x4000
80#define SUPER_DOMAIN_MASK 0x0fff
81#define COUNTRY_CODE_MASK 0x3fff
82#define CF_INTERFERENCE (CHANNEL_CW_INT | CHANNEL_RADAR_INT)
83#define CHANNEL_14 (2484)
84#define IS_11G_CH14(_ch,_cf) \
85 (((_ch) == CHANNEL_14) && ((_cf) == CHANNEL_G))
86
87#define NO_PSCAN 0x0ULL
88#define PSCAN_FCC 0x0000000000000001ULL
89#define PSCAN_FCC_T 0x0000000000000002ULL
90#define PSCAN_ETSI 0x0000000000000004ULL
91#define PSCAN_MKK1 0x0000000000000008ULL
92#define PSCAN_MKK2 0x0000000000000010ULL
93#define PSCAN_MKKA 0x0000000000000020ULL
94#define PSCAN_MKKA_G 0x0000000000000040ULL
95#define PSCAN_ETSIA 0x0000000000000080ULL
96#define PSCAN_ETSIB 0x0000000000000100ULL
97#define PSCAN_ETSIC 0x0000000000000200ULL
98#define PSCAN_WWR 0x0000000000000400ULL
99#define PSCAN_MKKA1 0x0000000000000800ULL
100#define PSCAN_MKKA1_G 0x0000000000001000ULL
101#define PSCAN_MKKA2 0x0000000000002000ULL
102#define PSCAN_MKKA2_G 0x0000000000004000ULL
103#define PSCAN_MKK3 0x0000000000008000ULL
104#define PSCAN_DEFER 0x7FFFFFFFFFFFFFFFULL
105#define IS_ECM_CHAN 0x8000000000000000ULL
106
107#define isWwrSKU(_ah) \
108 (((ath9k_regd_get_eepromRD((_ah)) & WORLD_SKU_MASK) == \
109 WORLD_SKU_PREFIX) || \
110 (ath9k_regd_get_eepromRD(_ah) == WORLD))
111
112#define isWwrSKU_NoMidband(_ah) \
113 ((ath9k_regd_get_eepromRD((_ah)) == WOR3_WORLD) || \
114 (ath9k_regd_get_eepromRD(_ah) == WOR4_WORLD) || \
115 (ath9k_regd_get_eepromRD(_ah) == WOR5_ETSIC))
116
117#define isUNII1OddChan(ch) \
118 ((ch == 5170) || (ch == 5190) || (ch == 5210) || (ch == 5230))
119
120#define IS_HT40_MODE(_mode) \
121 (((_mode == ATH9K_MODE_11NA_HT40PLUS || \
122 _mode == ATH9K_MODE_11NG_HT40PLUS || \
123 _mode == ATH9K_MODE_11NA_HT40MINUS || \
124 _mode == ATH9K_MODE_11NG_HT40MINUS) ? true : false))
125
126#define CHAN_FLAGS (CHANNEL_ALL|CHANNEL_HALF|CHANNEL_QUARTER)
127
128#define swap(_a, _b, _size) { \
129 u8 *s = _b; \
130 int i = _size; \
131 do { \
132 u8 tmp = *_a; \
133 *_a++ = *s; \
134 *s++ = tmp; \
135 } while (--i); \
136 _a -= _size; \
137}
138
139
140#define HALF_MAXCHANBW 10
141
142#define MULTI_DOMAIN_MASK 0xFF00
143
144#define WORLD_SKU_MASK 0x00F0
145#define WORLD_SKU_PREFIX 0x0060
146
147#define CHANNEL_HALF_BW 10
148#define CHANNEL_QUARTER_BW 5
149
150typedef int ath_hal_cmp_t(const void *, const void *);
151
152struct reg_dmn_pair_mapping {
153 u16 regDmnEnum;
154 u16 regDmn5GHz;
155 u16 regDmn2GHz;
156 u32 flags5GHz;
157 u32 flags2GHz;
158 u64 pscanMask;
159 u16 singleCC;
160};
161
162struct ccmap {
163 char isoName[3];
164 u16 countryCode;
165};
166
167struct country_code_to_enum_rd {
168 u16 countryCode;
169 u16 regDmnEnum;
170 const char *isoName;
171 const char *name;
172 bool allow11g;
173 bool allow11aTurbo;
174 bool allow11gTurbo;
175 bool allow11ng20;
176 bool allow11ng40;
177 bool allow11na20;
178 bool allow11na40;
179 u16 outdoorChanStart;
180};
181
182struct RegDmnFreqBand {
183 u16 lowChannel;
184 u16 highChannel;
185 u8 powerDfs;
186 u8 antennaMax;
187 u8 channelBW;
188 u8 channelSep;
189 u64 useDfs;
190 u64 usePassScan;
191 u8 regClassId;
192};
193
194struct regDomain {
195 u16 regDmnEnum;
196 u8 conformanceTestLimit;
197 u64 dfsMask;
198 u64 pscan;
199 u32 flags;
200 u64 chan11a[BMLEN];
201 u64 chan11a_turbo[BMLEN];
202 u64 chan11a_dyn_turbo[BMLEN];
203 u64 chan11b[BMLEN];
204 u64 chan11g[BMLEN];
205 u64 chan11g_turbo[BMLEN];
206};
207
208struct cmode {
209 u32 mode;
210 u32 flags;
211};
212
213#define YES true
214#define NO false
215
216struct japan_bandcheck {
217 u16 freqbandbit;
218 u32 eepromflagtocheck;
219};
220
221struct common_mode_power {
222 u16 lchan;
223 u16 hchan;
224 u8 pwrlvl;
225};
226
227enum CountryCode {
228 CTRY_ALBANIA = 8,
229 CTRY_ALGERIA = 12,
230 CTRY_ARGENTINA = 32,
231 CTRY_ARMENIA = 51,
232 CTRY_AUSTRALIA = 36,
233 CTRY_AUSTRIA = 40,
234 CTRY_AZERBAIJAN = 31,
235 CTRY_BAHRAIN = 48,
236 CTRY_BELARUS = 112,
237 CTRY_BELGIUM = 56,
238 CTRY_BELIZE = 84,
239 CTRY_BOLIVIA = 68,
240 CTRY_BOSNIA_HERZ = 70,
241 CTRY_BRAZIL = 76,
242 CTRY_BRUNEI_DARUSSALAM = 96,
243 CTRY_BULGARIA = 100,
244 CTRY_CANADA = 124,
245 CTRY_CHILE = 152,
246 CTRY_CHINA = 156,
247 CTRY_COLOMBIA = 170,
248 CTRY_COSTA_RICA = 188,
249 CTRY_CROATIA = 191,
250 CTRY_CYPRUS = 196,
251 CTRY_CZECH = 203,
252 CTRY_DENMARK = 208,
253 CTRY_DOMINICAN_REPUBLIC = 214,
254 CTRY_ECUADOR = 218,
255 CTRY_EGYPT = 818,
256 CTRY_EL_SALVADOR = 222,
257 CTRY_ESTONIA = 233,
258 CTRY_FAEROE_ISLANDS = 234,
259 CTRY_FINLAND = 246,
260 CTRY_FRANCE = 250,
261 CTRY_GEORGIA = 268,
262 CTRY_GERMANY = 276,
263 CTRY_GREECE = 300,
264 CTRY_GUATEMALA = 320,
265 CTRY_HONDURAS = 340,
266 CTRY_HONG_KONG = 344,
267 CTRY_HUNGARY = 348,
268 CTRY_ICELAND = 352,
269 CTRY_INDIA = 356,
270 CTRY_INDONESIA = 360,
271 CTRY_IRAN = 364,
272 CTRY_IRAQ = 368,
273 CTRY_IRELAND = 372,
274 CTRY_ISRAEL = 376,
275 CTRY_ITALY = 380,
276 CTRY_JAMAICA = 388,
277 CTRY_JAPAN = 392,
278 CTRY_JORDAN = 400,
279 CTRY_KAZAKHSTAN = 398,
280 CTRY_KENYA = 404,
281 CTRY_KOREA_NORTH = 408,
282 CTRY_KOREA_ROC = 410,
283 CTRY_KOREA_ROC2 = 411,
284 CTRY_KOREA_ROC3 = 412,
285 CTRY_KUWAIT = 414,
286 CTRY_LATVIA = 428,
287 CTRY_LEBANON = 422,
288 CTRY_LIBYA = 434,
289 CTRY_LIECHTENSTEIN = 438,
290 CTRY_LITHUANIA = 440,
291 CTRY_LUXEMBOURG = 442,
292 CTRY_MACAU = 446,
293 CTRY_MACEDONIA = 807,
294 CTRY_MALAYSIA = 458,
295 CTRY_MALTA = 470,
296 CTRY_MEXICO = 484,
297 CTRY_MONACO = 492,
298 CTRY_MOROCCO = 504,
299 CTRY_NEPAL = 524,
300 CTRY_NETHERLANDS = 528,
301 CTRY_NETHERLANDS_ANTILLES = 530,
302 CTRY_NEW_ZEALAND = 554,
303 CTRY_NICARAGUA = 558,
304 CTRY_NORWAY = 578,
305 CTRY_OMAN = 512,
306 CTRY_PAKISTAN = 586,
307 CTRY_PANAMA = 591,
308 CTRY_PAPUA_NEW_GUINEA = 598,
309 CTRY_PARAGUAY = 600,
310 CTRY_PERU = 604,
311 CTRY_PHILIPPINES = 608,
312 CTRY_POLAND = 616,
313 CTRY_PORTUGAL = 620,
314 CTRY_PUERTO_RICO = 630,
315 CTRY_QATAR = 634,
316 CTRY_ROMANIA = 642,
317 CTRY_RUSSIA = 643,
318 CTRY_SAUDI_ARABIA = 682,
319 CTRY_SERBIA_MONTENEGRO = 891,
320 CTRY_SINGAPORE = 702,
321 CTRY_SLOVAKIA = 703,
322 CTRY_SLOVENIA = 705,
323 CTRY_SOUTH_AFRICA = 710,
324 CTRY_SPAIN = 724,
325 CTRY_SRI_LANKA = 144,
326 CTRY_SWEDEN = 752,
327 CTRY_SWITZERLAND = 756,
328 CTRY_SYRIA = 760,
329 CTRY_TAIWAN = 158,
330 CTRY_THAILAND = 764,
331 CTRY_TRINIDAD_Y_TOBAGO = 780,
332 CTRY_TUNISIA = 788,
333 CTRY_TURKEY = 792,
334 CTRY_UAE = 784,
335 CTRY_UKRAINE = 804,
336 CTRY_UNITED_KINGDOM = 826,
337 CTRY_UNITED_STATES = 840,
338 CTRY_UNITED_STATES_FCC49 = 842,
339 CTRY_URUGUAY = 858,
340 CTRY_UZBEKISTAN = 860,
341 CTRY_VENEZUELA = 862,
342 CTRY_VIET_NAM = 704,
343 CTRY_YEMEN = 887,
344 CTRY_ZIMBABWE = 716,
345 CTRY_JAPAN1 = 393,
346 CTRY_JAPAN2 = 394,
347 CTRY_JAPAN3 = 395,
348 CTRY_JAPAN4 = 396,
349 CTRY_JAPAN5 = 397,
350 CTRY_JAPAN6 = 4006,
351 CTRY_JAPAN7 = 4007,
352 CTRY_JAPAN8 = 4008,
353 CTRY_JAPAN9 = 4009,
354 CTRY_JAPAN10 = 4010,
355 CTRY_JAPAN11 = 4011,
356 CTRY_JAPAN12 = 4012,
357 CTRY_JAPAN13 = 4013,
358 CTRY_JAPAN14 = 4014,
359 CTRY_JAPAN15 = 4015,
360 CTRY_JAPAN16 = 4016,
361 CTRY_JAPAN17 = 4017,
362 CTRY_JAPAN18 = 4018,
363 CTRY_JAPAN19 = 4019,
364 CTRY_JAPAN20 = 4020,
365 CTRY_JAPAN21 = 4021,
366 CTRY_JAPAN22 = 4022,
367 CTRY_JAPAN23 = 4023,
368 CTRY_JAPAN24 = 4024,
369 CTRY_JAPAN25 = 4025,
370 CTRY_JAPAN26 = 4026,
371 CTRY_JAPAN27 = 4027,
372 CTRY_JAPAN28 = 4028,
373 CTRY_JAPAN29 = 4029,
374 CTRY_JAPAN30 = 4030,
375 CTRY_JAPAN31 = 4031,
376 CTRY_JAPAN32 = 4032,
377 CTRY_JAPAN33 = 4033,
378 CTRY_JAPAN34 = 4034,
379 CTRY_JAPAN35 = 4035,
380 CTRY_JAPAN36 = 4036,
381 CTRY_JAPAN37 = 4037,
382 CTRY_JAPAN38 = 4038,
383 CTRY_JAPAN39 = 4039,
384 CTRY_JAPAN40 = 4040,
385 CTRY_JAPAN41 = 4041,
386 CTRY_JAPAN42 = 4042,
387 CTRY_JAPAN43 = 4043,
388 CTRY_JAPAN44 = 4044,
389 CTRY_JAPAN45 = 4045,
390 CTRY_JAPAN46 = 4046,
391 CTRY_JAPAN47 = 4047,
392 CTRY_JAPAN48 = 4048,
393 CTRY_JAPAN49 = 4049,
394 CTRY_JAPAN50 = 4050,
395 CTRY_JAPAN51 = 4051,
396 CTRY_JAPAN52 = 4052,
397 CTRY_JAPAN53 = 4053,
398 CTRY_JAPAN54 = 4054,
399 CTRY_JAPAN55 = 4055,
400 CTRY_JAPAN56 = 4056,
401 CTRY_JAPAN57 = 4057,
402 CTRY_JAPAN58 = 4058,
403 CTRY_JAPAN59 = 4059,
404 CTRY_AUSTRALIA2 = 5000,
405 CTRY_CANADA2 = 5001,
406 CTRY_BELGIUM2 = 5002
407};
408
409void ath9k_regd_get_current_country(struct ath_hal *ah,
410 struct ath9k_country_entry *ctry);
411
412#endif
diff --git a/drivers/net/wireless/ath9k/regd_common.h b/drivers/net/wireless/ath9k/regd_common.h
new file mode 100644
index 000000000000..9112c030b1e8
--- /dev/null
+++ b/drivers/net/wireless/ath9k/regd_common.h
@@ -0,0 +1,1915 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef REGD_COMMON_H
18#define REGD_COMMON_H
19
20enum EnumRd {
21 NO_ENUMRD = 0x00,
22 NULL1_WORLD = 0x03,
23 NULL1_ETSIB = 0x07,
24 NULL1_ETSIC = 0x08,
25 FCC1_FCCA = 0x10,
26 FCC1_WORLD = 0x11,
27 FCC4_FCCA = 0x12,
28 FCC5_FCCA = 0x13,
29 FCC6_FCCA = 0x14,
30
31 FCC2_FCCA = 0x20,
32 FCC2_WORLD = 0x21,
33 FCC2_ETSIC = 0x22,
34 FCC6_WORLD = 0x23,
35 FRANCE_RES = 0x31,
36 FCC3_FCCA = 0x3A,
37 FCC3_WORLD = 0x3B,
38
39 ETSI1_WORLD = 0x37,
40 ETSI3_ETSIA = 0x32,
41 ETSI2_WORLD = 0x35,
42 ETSI3_WORLD = 0x36,
43 ETSI4_WORLD = 0x30,
44 ETSI4_ETSIC = 0x38,
45 ETSI5_WORLD = 0x39,
46 ETSI6_WORLD = 0x34,
47 ETSI_RESERVED = 0x33,
48
49 MKK1_MKKA = 0x40,
50 MKK1_MKKB = 0x41,
51 APL4_WORLD = 0x42,
52 MKK2_MKKA = 0x43,
53 APL_RESERVED = 0x44,
54 APL2_WORLD = 0x45,
55 APL2_APLC = 0x46,
56 APL3_WORLD = 0x47,
57 MKK1_FCCA = 0x48,
58 APL2_APLD = 0x49,
59 MKK1_MKKA1 = 0x4A,
60 MKK1_MKKA2 = 0x4B,
61 MKK1_MKKC = 0x4C,
62
63 APL3_FCCA = 0x50,
64 APL1_WORLD = 0x52,
65 APL1_FCCA = 0x53,
66 APL1_APLA = 0x54,
67 APL1_ETSIC = 0x55,
68 APL2_ETSIC = 0x56,
69 APL5_WORLD = 0x58,
70 APL6_WORLD = 0x5B,
71 APL7_FCCA = 0x5C,
72 APL8_WORLD = 0x5D,
73 APL9_WORLD = 0x5E,
74
75 WOR0_WORLD = 0x60,
76 WOR1_WORLD = 0x61,
77 WOR2_WORLD = 0x62,
78 WOR3_WORLD = 0x63,
79 WOR4_WORLD = 0x64,
80 WOR5_ETSIC = 0x65,
81
82 WOR01_WORLD = 0x66,
83 WOR02_WORLD = 0x67,
84 EU1_WORLD = 0x68,
85
86 WOR9_WORLD = 0x69,
87 WORA_WORLD = 0x6A,
88 WORB_WORLD = 0x6B,
89
90 MKK3_MKKB = 0x80,
91 MKK3_MKKA2 = 0x81,
92 MKK3_MKKC = 0x82,
93
94 MKK4_MKKB = 0x83,
95 MKK4_MKKA2 = 0x84,
96 MKK4_MKKC = 0x85,
97
98 MKK5_MKKB = 0x86,
99 MKK5_MKKA2 = 0x87,
100 MKK5_MKKC = 0x88,
101
102 MKK6_MKKB = 0x89,
103 MKK6_MKKA2 = 0x8A,
104 MKK6_MKKC = 0x8B,
105
106 MKK7_MKKB = 0x8C,
107 MKK7_MKKA2 = 0x8D,
108 MKK7_MKKC = 0x8E,
109
110 MKK8_MKKB = 0x8F,
111 MKK8_MKKA2 = 0x90,
112 MKK8_MKKC = 0x91,
113
114 MKK14_MKKA1 = 0x92,
115 MKK15_MKKA1 = 0x93,
116
117 MKK10_FCCA = 0xD0,
118 MKK10_MKKA1 = 0xD1,
119 MKK10_MKKC = 0xD2,
120 MKK10_MKKA2 = 0xD3,
121
122 MKK11_MKKA = 0xD4,
123 MKK11_FCCA = 0xD5,
124 MKK11_MKKA1 = 0xD6,
125 MKK11_MKKC = 0xD7,
126 MKK11_MKKA2 = 0xD8,
127
128 MKK12_MKKA = 0xD9,
129 MKK12_FCCA = 0xDA,
130 MKK12_MKKA1 = 0xDB,
131 MKK12_MKKC = 0xDC,
132 MKK12_MKKA2 = 0xDD,
133
134 MKK13_MKKB = 0xDE,
135
136 MKK3_MKKA = 0xF0,
137 MKK3_MKKA1 = 0xF1,
138 MKK3_FCCA = 0xF2,
139 MKK4_MKKA = 0xF3,
140 MKK4_MKKA1 = 0xF4,
141 MKK4_FCCA = 0xF5,
142 MKK9_MKKA = 0xF6,
143 MKK10_MKKA = 0xF7,
144 MKK6_MKKA1 = 0xF8,
145 MKK6_FCCA = 0xF9,
146 MKK7_MKKA1 = 0xFA,
147 MKK7_FCCA = 0xFB,
148 MKK9_FCCA = 0xFC,
149 MKK9_MKKA1 = 0xFD,
150 MKK9_MKKC = 0xFE,
151 MKK9_MKKA2 = 0xFF,
152
153 APL1 = 0x0150,
154 APL2 = 0x0250,
155 APL3 = 0x0350,
156 APL4 = 0x0450,
157 APL5 = 0x0550,
158 APL6 = 0x0650,
159 APL7 = 0x0750,
160 APL8 = 0x0850,
161 APL9 = 0x0950,
162 APL10 = 0x1050,
163
164 ETSI1 = 0x0130,
165 ETSI2 = 0x0230,
166 ETSI3 = 0x0330,
167 ETSI4 = 0x0430,
168 ETSI5 = 0x0530,
169 ETSI6 = 0x0630,
170 ETSIA = 0x0A30,
171 ETSIB = 0x0B30,
172 ETSIC = 0x0C30,
173
174 FCC1 = 0x0110,
175 FCC2 = 0x0120,
176 FCC3 = 0x0160,
177 FCC4 = 0x0165,
178 FCC5 = 0x0510,
179 FCC6 = 0x0610,
180 FCCA = 0x0A10,
181
182 APLD = 0x0D50,
183
184 MKK1 = 0x0140,
185 MKK2 = 0x0240,
186 MKK3 = 0x0340,
187 MKK4 = 0x0440,
188 MKK5 = 0x0540,
189 MKK6 = 0x0640,
190 MKK7 = 0x0740,
191 MKK8 = 0x0840,
192 MKK9 = 0x0940,
193 MKK10 = 0x0B40,
194 MKK11 = 0x1140,
195 MKK12 = 0x1240,
196 MKK13 = 0x0C40,
197 MKK14 = 0x1440,
198 MKK15 = 0x1540,
199 MKKA = 0x0A40,
200 MKKC = 0x0A50,
201
202 NULL1 = 0x0198,
203 WORLD = 0x0199,
204 DEBUG_REG_DMN = 0x01ff,
205};
206
207enum {
208 FCC = 0x10,
209 MKK = 0x40,
210 ETSI = 0x30,
211};
212
213enum {
214 NO_REQ = 0x00000000,
215 DISALLOW_ADHOC_11A = 0x00000001,
216 DISALLOW_ADHOC_11A_TURB = 0x00000002,
217 NEED_NFC = 0x00000004,
218
219 ADHOC_PER_11D = 0x00000008,
220 ADHOC_NO_11A = 0x00000010,
221
222 PUBLIC_SAFETY_DOMAIN = 0x00000020,
223 LIMIT_FRAME_4MS = 0x00000040,
224
225 NO_HOSTAP = 0x00000080,
226
227 REQ_MASK = 0x000000FF,
228};
229
230#define REG_DOMAIN_2GHZ_MASK (REQ_MASK & \
231 (!(ADHOC_NO_11A | DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB)))
232#define REG_DOMAIN_5GHZ_MASK REQ_MASK
233
234static struct reg_dmn_pair_mapping regDomainPairs[] = {
235 {NO_ENUMRD, DEBUG_REG_DMN, DEBUG_REG_DMN, NO_REQ, NO_REQ,
236 PSCAN_DEFER, 0},
237 {NULL1_WORLD, NULL1, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
238 {NULL1_ETSIB, NULL1, ETSIB, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
239 {NULL1_ETSIC, NULL1, ETSIC, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
240
241 {FCC2_FCCA, FCC2, FCCA, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
242 {FCC2_WORLD, FCC2, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
243 {FCC2_ETSIC, FCC2, ETSIC, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
244 {FCC3_FCCA, FCC3, FCCA, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
245 {FCC3_WORLD, FCC3, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
246 {FCC4_FCCA, FCC4, FCCA,
247 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
248 0},
249 {FCC5_FCCA, FCC5, FCCA, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
250 {FCC6_FCCA, FCC6, FCCA, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
251 {FCC6_WORLD, FCC6, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
252
253 {ETSI1_WORLD, ETSI1, WORLD,
254 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
255 0},
256 {ETSI2_WORLD, ETSI2, WORLD,
257 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
258 0},
259 {ETSI3_WORLD, ETSI3, WORLD,
260 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
261 0},
262 {ETSI4_WORLD, ETSI4, WORLD,
263 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
264 0},
265 {ETSI5_WORLD, ETSI5, WORLD,
266 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
267 0},
268 {ETSI6_WORLD, ETSI6, WORLD,
269 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
270 0},
271
272 {ETSI3_ETSIA, ETSI3, WORLD,
273 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
274 0},
275 {FRANCE_RES, ETSI3, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
276
277 {FCC1_WORLD, FCC1, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
278 {FCC1_FCCA, FCC1, FCCA, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
279 {APL1_WORLD, APL1, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
280 {APL2_WORLD, APL2, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
281 {APL3_WORLD, APL3, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
282 {APL4_WORLD, APL4, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
283 {APL5_WORLD, APL5, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
284 {APL6_WORLD, APL6, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
285 {APL8_WORLD, APL8, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
286 {APL9_WORLD, APL9, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
287
288 {APL3_FCCA, APL3, FCCA, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
289 {APL1_ETSIC, APL1, ETSIC, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
290 {APL2_ETSIC, APL2, ETSIC, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
291 {APL2_APLD, APL2, APLD, NO_REQ, NO_REQ, PSCAN_DEFER,},
292
293 {MKK1_MKKA, MKK1, MKKA,
294 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
295 PSCAN_MKK1 | PSCAN_MKKA, CTRY_JAPAN},
296 {MKK1_MKKB, MKK1, MKKA,
297 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC |
298 LIMIT_FRAME_4MS, NEED_NFC, PSCAN_MKK1 | PSCAN_MKKA | PSCAN_MKKA_G,
299 CTRY_JAPAN1},
300 {MKK1_FCCA, MKK1, FCCA,
301 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
302 PSCAN_MKK1, CTRY_JAPAN2},
303 {MKK1_MKKA1, MKK1, MKKA,
304 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
305 PSCAN_MKK1 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN4},
306 {MKK1_MKKA2, MKK1, MKKA,
307 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
308 PSCAN_MKK1 | PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN5},
309 {MKK1_MKKC, MKK1, MKKC,
310 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
311 PSCAN_MKK1, CTRY_JAPAN6},
312
313 {MKK2_MKKA, MKK2, MKKA,
314 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC |
315 LIMIT_FRAME_4MS, NEED_NFC, PSCAN_MKK2 | PSCAN_MKKA | PSCAN_MKKA_G,
316 CTRY_JAPAN3},
317
318 {MKK3_MKKA, MKK3, MKKA,
319 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
320 PSCAN_MKKA, CTRY_JAPAN25},
321 {MKK3_MKKB, MKK3, MKKA,
322 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC |
323 LIMIT_FRAME_4MS, NEED_NFC, PSCAN_MKKA | PSCAN_MKKA_G,
324 CTRY_JAPAN7},
325 {MKK3_MKKA1, MKK3, MKKA,
326 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
327 PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN26},
328 {MKK3_MKKA2, MKK3, MKKA,
329 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
330 PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN8},
331 {MKK3_MKKC, MKK3, MKKC,
332 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
333 NO_PSCAN, CTRY_JAPAN9},
334 {MKK3_FCCA, MKK3, FCCA,
335 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
336 NO_PSCAN, CTRY_JAPAN27},
337
338 {MKK4_MKKA, MKK4, MKKA,
339 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
340 PSCAN_MKK3, CTRY_JAPAN36},
341 {MKK4_MKKB, MKK4, MKKA,
342 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC |
343 LIMIT_FRAME_4MS, NEED_NFC, PSCAN_MKK3 | PSCAN_MKKA | PSCAN_MKKA_G,
344 CTRY_JAPAN10},
345 {MKK4_MKKA1, MKK4, MKKA,
346 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
347 PSCAN_MKK3 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN28},
348 {MKK4_MKKA2, MKK4, MKKA,
349 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
350 PSCAN_MKK3 | PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN11},
351 {MKK4_MKKC, MKK4, MKKC,
352 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
353 PSCAN_MKK3, CTRY_JAPAN12},
354 {MKK4_FCCA, MKK4, FCCA,
355 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
356 PSCAN_MKK3, CTRY_JAPAN29},
357
358 {MKK5_MKKB, MKK5, MKKA,
359 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC |
360 LIMIT_FRAME_4MS, NEED_NFC, PSCAN_MKK3 | PSCAN_MKKA | PSCAN_MKKA_G,
361 CTRY_JAPAN13},
362 {MKK5_MKKA2, MKK5, MKKA,
363 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
364 PSCAN_MKK3 | PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN14},
365 {MKK5_MKKC, MKK5, MKKC,
366 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
367 PSCAN_MKK3, CTRY_JAPAN15},
368
369 {MKK6_MKKB, MKK6, MKKA,
370 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
371 PSCAN_MKK1 | PSCAN_MKKA | PSCAN_MKKA_G, CTRY_JAPAN16},
372 {MKK6_MKKA1, MKK6, MKKA,
373 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
374 PSCAN_MKK1 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN30},
375 {MKK6_MKKA2, MKK6, MKKA,
376 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
377 PSCAN_MKK1 | PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN17},
378 {MKK6_MKKC, MKK6, MKKC,
379 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
380 PSCAN_MKK1, CTRY_JAPAN18},
381 {MKK6_FCCA, MKK6, FCCA,
382 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
383 NO_PSCAN, CTRY_JAPAN31},
384
385 {MKK7_MKKB, MKK7, MKKA,
386 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
387 PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA | PSCAN_MKKA_G,
388 CTRY_JAPAN19},
389 {MKK7_MKKA1, MKK7, MKKA,
390 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
391 PSCAN_MKK1 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN32},
392 {MKK7_MKKA2, MKK7, MKKA,
393 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
394 PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA2 | PSCAN_MKKA2_G,
395 CTRY_JAPAN20},
396 {MKK7_MKKC, MKK7, MKKC,
397 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
398 PSCAN_MKK1 | PSCAN_MKK3, CTRY_JAPAN21},
399 {MKK7_FCCA, MKK7, FCCA,
400 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
401 PSCAN_MKK1 | PSCAN_MKK3, CTRY_JAPAN33},
402
403 {MKK8_MKKB, MKK8, MKKA,
404 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
405 PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA | PSCAN_MKKA_G,
406 CTRY_JAPAN22},
407 {MKK8_MKKA2, MKK8, MKKA,
408 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
409 PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA2 | PSCAN_MKKA2_G,
410 CTRY_JAPAN23},
411 {MKK8_MKKC, MKK8, MKKC,
412 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
413 PSCAN_MKK1 | PSCAN_MKK3, CTRY_JAPAN24},
414
415 {MKK9_MKKA, MKK9, MKKA,
416 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC |
417 LIMIT_FRAME_4MS, NEED_NFC,
418 PSCAN_MKK2 | PSCAN_MKK3 | PSCAN_MKKA | PSCAN_MKKA_G,
419 CTRY_JAPAN34},
420 {MKK9_FCCA, MKK9, FCCA,
421 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
422 NO_PSCAN, CTRY_JAPAN37},
423 {MKK9_MKKA1, MKK9, MKKA,
424 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
425 PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN38},
426 {MKK9_MKKA2, MKK9, MKKA,
427 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
428 PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN40},
429 {MKK9_MKKC, MKK9, MKKC,
430 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
431 NO_PSCAN, CTRY_JAPAN39},
432
433 {MKK10_MKKA, MKK10, MKKA,
434 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC |
435 LIMIT_FRAME_4MS, NEED_NFC, PSCAN_MKK2 | PSCAN_MKK3, CTRY_JAPAN35},
436 {MKK10_FCCA, MKK10, FCCA,
437 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
438 NO_PSCAN, CTRY_JAPAN41},
439 {MKK10_MKKA1, MKK10, MKKA,
440 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
441 PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN42},
442 {MKK10_MKKA2, MKK10, MKKA,
443 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
444 PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN44},
445 {MKK10_MKKC, MKK10, MKKC,
446 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
447 NO_PSCAN, CTRY_JAPAN43},
448
449 {MKK11_MKKA, MKK11, MKKA,
450 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
451 PSCAN_MKK3, CTRY_JAPAN45},
452 {MKK11_FCCA, MKK11, FCCA,
453 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
454 PSCAN_MKK3, CTRY_JAPAN46},
455 {MKK11_MKKA1, MKK11, MKKA,
456 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
457 PSCAN_MKK3 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN47},
458 {MKK11_MKKA2, MKK11, MKKA,
459 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
460 PSCAN_MKK3 | PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN49},
461 {MKK11_MKKC, MKK11, MKKC,
462 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
463 PSCAN_MKK3, CTRY_JAPAN48},
464
465 {MKK12_MKKA, MKK12, MKKA,
466 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
467 PSCAN_MKK1 | PSCAN_MKK3, CTRY_JAPAN50},
468 {MKK12_FCCA, MKK12, FCCA,
469 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
470 PSCAN_MKK1 | PSCAN_MKK3, CTRY_JAPAN51},
471 {MKK12_MKKA1, MKK12, MKKA,
472 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
473 PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA1 | PSCAN_MKKA1_G,
474 CTRY_JAPAN52},
475 {MKK12_MKKA2, MKK12, MKKA,
476 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
477 PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA2 | PSCAN_MKKA2_G,
478 CTRY_JAPAN54},
479 {MKK12_MKKC, MKK12, MKKC,
480 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
481 PSCAN_MKK1 | PSCAN_MKK3, CTRY_JAPAN53},
482
483 {MKK13_MKKB, MKK13, MKKA,
484 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC |
485 LIMIT_FRAME_4MS, NEED_NFC,
486 PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA | PSCAN_MKKA_G,
487 CTRY_JAPAN57},
488
489 {MKK14_MKKA1, MKK14, MKKA,
490 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
491 PSCAN_MKK1 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN58},
492 {MKK15_MKKA1, MKK15, MKKA,
493 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
494 PSCAN_MKK1 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN59},
495
496 {WOR0_WORLD, WOR0_WORLD, WOR0_WORLD, NO_REQ, NO_REQ, PSCAN_DEFER,
497 0},
498 {WOR1_WORLD, WOR1_WORLD, WOR1_WORLD,
499 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
500 0},
501 {WOR2_WORLD, WOR2_WORLD, WOR2_WORLD, DISALLOW_ADHOC_11A_TURB,
502 NO_REQ, PSCAN_DEFER, 0},
503 {WOR3_WORLD, WOR3_WORLD, WOR3_WORLD, NO_REQ, NO_REQ, PSCAN_DEFER,
504 0},
505 {WOR4_WORLD, WOR4_WORLD, WOR4_WORLD,
506 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
507 0},
508 {WOR5_ETSIC, WOR5_ETSIC, WOR5_ETSIC,
509 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
510 0},
511 {WOR01_WORLD, WOR01_WORLD, WOR01_WORLD, NO_REQ, NO_REQ,
512 PSCAN_DEFER, 0},
513 {WOR02_WORLD, WOR02_WORLD, WOR02_WORLD, NO_REQ, NO_REQ,
514 PSCAN_DEFER, 0},
515 {EU1_WORLD, EU1_WORLD, EU1_WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
516 {WOR9_WORLD, WOR9_WORLD, WOR9_WORLD,
517 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
518 0},
519 {WORA_WORLD, WORA_WORLD, WORA_WORLD,
520 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
521 0},
522 {WORB_WORLD, WORB_WORLD, WORB_WORLD,
523 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
524 0},
525};
526
527#define NO_INTERSECT_REQ 0xFFFFFFFF
528#define NO_UNION_REQ 0
529
530static struct country_code_to_enum_rd allCountries[] = {
531 {CTRY_DEBUG, NO_ENUMRD, "DB", "DEBUG", YES, YES, YES, YES, YES,
532 YES, YES, 7000},
533 {CTRY_DEFAULT, DEF_REGDMN, "NA", "NO_COUNTRY_SET", YES, YES, YES,
534 YES, YES, YES, YES, 7000},
535 {CTRY_ALBANIA, NULL1_WORLD, "AL", "ALBANIA", YES, NO, YES, YES, NO,
536 NO, NO, 7000},
537 {CTRY_ALGERIA, NULL1_WORLD, "DZ", "ALGERIA", YES, NO, YES, YES, NO,
538 NO, NO, 7000},
539 {CTRY_ARGENTINA, APL3_WORLD, "AR", "ARGENTINA", YES, NO, NO, YES,
540 NO, YES, NO, 7000},
541 {CTRY_ARMENIA, ETSI4_WORLD, "AM", "ARMENIA", YES, NO, YES, YES,
542 YES, NO, NO, 7000},
543 {CTRY_AUSTRALIA, FCC2_WORLD, "AU", "AUSTRALIA", YES, YES, YES, YES,
544 YES, YES, YES, 7000},
545 {CTRY_AUSTRALIA2, FCC6_WORLD, "AU", "AUSTRALIA2", YES, YES, YES,
546 YES, YES, YES, YES, 7000},
547 {CTRY_AUSTRIA, ETSI1_WORLD, "AT", "AUSTRIA", YES, NO, YES, YES,
548 YES, YES, YES, 7000},
549 {CTRY_AZERBAIJAN, ETSI4_WORLD, "AZ", "AZERBAIJAN", YES, YES, YES,
550 YES, YES, YES, YES, 7000},
551 {CTRY_BAHRAIN, APL6_WORLD, "BH", "BAHRAIN", YES, NO, YES, YES, YES,
552 YES, NO, 7000},
553 {CTRY_BELARUS, ETSI1_WORLD, "BY", "BELARUS", YES, NO, YES, YES,
554 YES, YES, YES, 7000},
555 {CTRY_BELGIUM, ETSI1_WORLD, "BE", "BELGIUM", YES, NO, YES, YES,
556 YES, YES, YES, 7000},
557 {CTRY_BELGIUM2, ETSI4_WORLD, "BL", "BELGIUM", YES, NO, YES, YES,
558 YES, YES, YES, 7000},
559 {CTRY_BELIZE, APL1_ETSIC, "BZ", "BELIZE", YES, YES, YES, YES, YES,
560 YES, YES, 7000},
561 {CTRY_BOLIVIA, APL1_ETSIC, "BO", "BOLVIA", YES, YES, YES, YES, YES,
562 YES, YES, 7000},
563 {CTRY_BOSNIA_HERZ, ETSI1_WORLD, "BA", "BOSNIA_HERZGOWINA", YES, NO,
564 YES, YES, YES, YES, NO, 7000},
565 {CTRY_BRAZIL, FCC3_WORLD, "BR", "BRAZIL", YES, NO, NO, YES, NO,
566 YES, NO, 7000},
567 {CTRY_BRUNEI_DARUSSALAM, APL1_WORLD, "BN", "BRUNEI DARUSSALAM",
568 YES, YES, YES, YES, YES, YES, YES, 7000},
569 {CTRY_BULGARIA, ETSI6_WORLD, "BG", "BULGARIA", YES, NO, YES, YES,
570 YES, YES, YES, 7000},
571 {CTRY_CANADA, FCC2_FCCA, "CA", "CANADA", YES, YES, YES, YES, YES,
572 YES, YES, 7000},
573 {CTRY_CANADA2, FCC6_FCCA, "CA", "CANADA2", YES, YES, YES, YES, YES,
574 YES, YES, 7000},
575 {CTRY_CHILE, APL6_WORLD, "CL", "CHILE", YES, YES, YES, YES, YES,
576 YES, YES, 7000},
577 {CTRY_CHINA, APL1_WORLD, "CN", "CHINA", YES, YES, YES, YES, YES,
578 YES, YES, 7000},
579 {CTRY_COLOMBIA, FCC1_FCCA, "CO", "COLOMBIA", YES, NO, YES, YES,
580 YES, YES, NO, 7000},
581 {CTRY_COSTA_RICA, FCC1_WORLD, "CR", "COSTA RICA", YES, NO, YES,
582 YES, YES, YES, NO, 7000},
583 {CTRY_CROATIA, ETSI3_WORLD, "HR", "CROATIA", YES, NO, YES, YES,
584 YES, YES, NO, 7000},
585 {CTRY_CYPRUS, ETSI1_WORLD, "CY", "CYPRUS", YES, YES, YES, YES, YES,
586 YES, YES, 7000},
587 {CTRY_CZECH, ETSI3_WORLD, "CZ", "CZECH REPUBLIC", YES, NO, YES,
588 YES, YES, YES, YES, 7000},
589 {CTRY_DENMARK, ETSI1_WORLD, "DK", "DENMARK", YES, NO, YES, YES,
590 YES, YES, YES, 7000},
591 {CTRY_DOMINICAN_REPUBLIC, FCC1_FCCA, "DO", "DOMINICAN REPUBLIC",
592 YES, YES, YES, YES, YES, YES, YES, 7000},
593 {CTRY_ECUADOR, FCC1_WORLD, "EC", "ECUADOR", YES, NO, NO, YES, YES,
594 YES, NO, 7000},
595 {CTRY_EGYPT, ETSI3_WORLD, "EG", "EGYPT", YES, NO, YES, YES, YES,
596 YES, NO, 7000},
597 {CTRY_EL_SALVADOR, FCC1_WORLD, "SV", "EL SALVADOR", YES, NO, YES,
598 YES, YES, YES, NO, 7000},
599 {CTRY_ESTONIA, ETSI1_WORLD, "EE", "ESTONIA", YES, NO, YES, YES,
600 YES, YES, YES, 7000},
601 {CTRY_FINLAND, ETSI1_WORLD, "FI", "FINLAND", YES, NO, YES, YES,
602 YES, YES, YES, 7000},
603 {CTRY_FRANCE, ETSI1_WORLD, "FR", "FRANCE", YES, NO, YES, YES, YES,
604 YES, YES, 7000},
605 {CTRY_GEORGIA, ETSI4_WORLD, "GE", "GEORGIA", YES, YES, YES, YES,
606 YES, YES, YES, 7000},
607 {CTRY_GERMANY, ETSI1_WORLD, "DE", "GERMANY", YES, NO, YES, YES,
608 YES, YES, YES, 7000},
609 {CTRY_GREECE, ETSI1_WORLD, "GR", "GREECE", YES, NO, YES, YES, YES,
610 YES, YES, 7000},
611 {CTRY_GUATEMALA, FCC1_FCCA, "GT", "GUATEMALA", YES, YES, YES, YES,
612 YES, YES, YES, 7000},
613 {CTRY_HONDURAS, NULL1_WORLD, "HN", "HONDURAS", YES, NO, YES, YES,
614 YES, NO, NO, 7000},
615 {CTRY_HONG_KONG, FCC2_WORLD, "HK", "HONG KONG", YES, YES, YES, YES,
616 YES, YES, YES, 7000},
617 {CTRY_HUNGARY, ETSI1_WORLD, "HU", "HUNGARY", YES, NO, YES, YES,
618 YES, YES, YES, 7000},
619 {CTRY_ICELAND, ETSI1_WORLD, "IS", "ICELAND", YES, NO, YES, YES,
620 YES, YES, YES, 7000},
621 {CTRY_INDIA, APL6_WORLD, "IN", "INDIA", YES, NO, YES, YES, YES,
622 YES, NO, 7000},
623 {CTRY_INDONESIA, APL1_WORLD, "ID", "INDONESIA", YES, NO, YES, YES,
624 YES, YES, NO, 7000},
625 {CTRY_IRAN, APL1_WORLD, "IR", "IRAN", YES, YES, YES, YES, YES, YES,
626 YES, 7000},
627 {CTRY_IRELAND, ETSI1_WORLD, "IE", "IRELAND", YES, NO, YES, YES,
628 YES, YES, YES, 7000},
629 {CTRY_ISRAEL, NULL1_WORLD, "IL", "ISRAEL", YES, NO, YES, YES, YES,
630 NO, NO, 7000},
631 {CTRY_ITALY, ETSI1_WORLD, "IT", "ITALY", YES, NO, YES, YES, YES,
632 YES, YES, 7000},
633 {CTRY_JAMAICA, ETSI1_WORLD, "JM", "JAMAICA", YES, NO, YES, YES,
634 YES, YES, YES, 7000},
635
636 {CTRY_JAPAN, MKK1_MKKA, "JP", "JAPAN", YES, NO, NO, YES, YES, YES,
637 YES, 7000},
638 {CTRY_JAPAN1, MKK1_MKKB, "JP", "JAPAN1", YES, NO, NO, YES, YES,
639 YES, YES, 7000},
640 {CTRY_JAPAN2, MKK1_FCCA, "JP", "JAPAN2", YES, NO, NO, YES, YES,
641 YES, YES, 7000},
642 {CTRY_JAPAN3, MKK2_MKKA, "JP", "JAPAN3", YES, NO, NO, YES, YES,
643 YES, YES, 7000},
644 {CTRY_JAPAN4, MKK1_MKKA1, "JP", "JAPAN4", YES, NO, NO, YES, YES,
645 YES, YES, 7000},
646 {CTRY_JAPAN5, MKK1_MKKA2, "JP", "JAPAN5", YES, NO, NO, YES, YES,
647 YES, YES, 7000},
648 {CTRY_JAPAN6, MKK1_MKKC, "JP", "JAPAN6", YES, NO, NO, YES, YES,
649 YES, YES, 7000},
650
651 {CTRY_JAPAN7, MKK3_MKKB, "JP", "JAPAN7", YES, NO, NO, YES, YES,
652 YES, YES, 7000},
653 {CTRY_JAPAN8, MKK3_MKKA2, "JP", "JAPAN8", YES, NO, NO, YES, YES,
654 YES, YES, 7000},
655 {CTRY_JAPAN9, MKK3_MKKC, "JP", "JAPAN9", YES, NO, NO, YES, YES,
656 YES, YES, 7000},
657
658 {CTRY_JAPAN10, MKK4_MKKB, "JP", "JAPAN10", YES, NO, NO, YES, YES,
659 YES, YES, 7000},
660 {CTRY_JAPAN11, MKK4_MKKA2, "JP", "JAPAN11", YES, NO, NO, YES, YES,
661 YES, YES, 7000},
662 {CTRY_JAPAN12, MKK4_MKKC, "JP", "JAPAN12", YES, NO, NO, YES, YES,
663 YES, YES, 7000},
664
665 {CTRY_JAPAN13, MKK5_MKKB, "JP", "JAPAN13", YES, NO, NO, YES, YES,
666 YES, YES, 7000},
667 {CTRY_JAPAN14, MKK5_MKKA2, "JP", "JAPAN14", YES, NO, NO, YES, YES,
668 YES, YES, 7000},
669 {CTRY_JAPAN15, MKK5_MKKC, "JP", "JAPAN15", YES, NO, NO, YES, YES,
670 YES, YES, 7000},
671
672 {CTRY_JAPAN16, MKK6_MKKB, "JP", "JAPAN16", YES, NO, NO, YES, YES,
673 YES, YES, 7000},
674 {CTRY_JAPAN17, MKK6_MKKA2, "JP", "JAPAN17", YES, NO, NO, YES, YES,
675 YES, YES, 7000},
676 {CTRY_JAPAN18, MKK6_MKKC, "JP", "JAPAN18", YES, NO, NO, YES, YES,
677 YES, YES, 7000},
678
679 {CTRY_JAPAN19, MKK7_MKKB, "JP", "JAPAN19", YES, NO, NO, YES, YES,
680 YES, YES, 7000},
681 {CTRY_JAPAN20, MKK7_MKKA2, "JP", "JAPAN20", YES, NO, NO, YES, YES,
682 YES, YES, 7000},
683 {CTRY_JAPAN21, MKK7_MKKC, "JP", "JAPAN21", YES, NO, NO, YES, YES,
684 YES, YES, 7000},
685
686 {CTRY_JAPAN22, MKK8_MKKB, "JP", "JAPAN22", YES, NO, NO, YES, YES,
687 YES, YES, 7000},
688 {CTRY_JAPAN23, MKK8_MKKA2, "JP", "JAPAN23", YES, NO, NO, YES, YES,
689 YES, YES, 7000},
690 {CTRY_JAPAN24, MKK8_MKKC, "JP", "JAPAN24", YES, NO, NO, YES, YES,
691 YES, YES, 7000},
692
693 {CTRY_JAPAN25, MKK3_MKKA, "JP", "JAPAN25", YES, NO, NO, YES, YES,
694 YES, YES, 7000},
695 {CTRY_JAPAN26, MKK3_MKKA1, "JP", "JAPAN26", YES, NO, NO, YES, YES,
696 YES, YES, 7000},
697 {CTRY_JAPAN27, MKK3_FCCA, "JP", "JAPAN27", YES, NO, NO, YES, YES,
698 YES, YES, 7000},
699 {CTRY_JAPAN28, MKK4_MKKA1, "JP", "JAPAN28", YES, NO, NO, YES, YES,
700 YES, YES, 7000},
701 {CTRY_JAPAN29, MKK4_FCCA, "JP", "JAPAN29", YES, NO, NO, YES, YES,
702 YES, YES, 7000},
703 {CTRY_JAPAN30, MKK6_MKKA1, "JP", "JAPAN30", YES, NO, NO, YES, YES,
704 YES, YES, 7000},
705 {CTRY_JAPAN31, MKK6_FCCA, "JP", "JAPAN31", YES, NO, NO, YES, YES,
706 YES, YES, 7000},
707 {CTRY_JAPAN32, MKK7_MKKA1, "JP", "JAPAN32", YES, NO, NO, YES, YES,
708 YES, YES, 7000},
709 {CTRY_JAPAN33, MKK7_FCCA, "JP", "JAPAN33", YES, NO, NO, YES, YES,
710 YES, YES, 7000},
711 {CTRY_JAPAN34, MKK9_MKKA, "JP", "JAPAN34", YES, NO, NO, YES, YES,
712 YES, YES, 7000},
713 {CTRY_JAPAN35, MKK10_MKKA, "JP", "JAPAN35", YES, NO, NO, YES, YES,
714 YES, YES, 7000},
715 {CTRY_JAPAN36, MKK4_MKKA, "JP", "JAPAN36", YES, NO, NO, YES, YES,
716 YES, YES, 7000},
717 {CTRY_JAPAN37, MKK9_FCCA, "JP", "JAPAN37", YES, NO, NO, YES, YES,
718 YES, YES, 7000},
719 {CTRY_JAPAN38, MKK9_MKKA1, "JP", "JAPAN38", YES, NO, NO, YES, YES,
720 YES, YES, 7000},
721 {CTRY_JAPAN39, MKK9_MKKC, "JP", "JAPAN39", YES, NO, NO, YES, YES,
722 YES, YES, 7000},
723 {CTRY_JAPAN40, MKK9_MKKA2, "JP", "JAPAN40", YES, NO, NO, YES, YES,
724 YES, YES, 7000},
725 {CTRY_JAPAN41, MKK10_FCCA, "JP", "JAPAN41", YES, NO, NO, YES, YES,
726 YES, YES, 7000},
727 {CTRY_JAPAN42, MKK10_MKKA1, "JP", "JAPAN42", YES, NO, NO, YES, YES,
728 YES, YES, 7000},
729 {CTRY_JAPAN43, MKK10_MKKC, "JP", "JAPAN43", YES, NO, NO, YES, YES,
730 YES, YES, 7000},
731 {CTRY_JAPAN44, MKK10_MKKA2, "JP", "JAPAN44", YES, NO, NO, YES, YES,
732 YES, YES, 7000},
733 {CTRY_JAPAN45, MKK11_MKKA, "JP", "JAPAN45", YES, NO, NO, YES, YES,
734 YES, YES, 7000},
735 {CTRY_JAPAN46, MKK11_FCCA, "JP", "JAPAN46", YES, NO, NO, YES, YES,
736 YES, YES, 7000},
737 {CTRY_JAPAN47, MKK11_MKKA1, "JP", "JAPAN47", YES, NO, NO, YES, YES,
738 YES, YES, 7000},
739 {CTRY_JAPAN48, MKK11_MKKC, "JP", "JAPAN48", YES, NO, NO, YES, YES,
740 YES, YES, 7000},
741 {CTRY_JAPAN49, MKK11_MKKA2, "JP", "JAPAN49", YES, NO, NO, YES, YES,
742 YES, YES, 7000},
743 {CTRY_JAPAN50, MKK12_MKKA, "JP", "JAPAN50", YES, NO, NO, YES, YES,
744 YES, YES, 7000},
745 {CTRY_JAPAN51, MKK12_FCCA, "JP", "JAPAN51", YES, NO, NO, YES, YES,
746 YES, YES, 7000},
747 {CTRY_JAPAN52, MKK12_MKKA1, "JP", "JAPAN52", YES, NO, NO, YES, YES,
748 YES, YES, 7000},
749 {CTRY_JAPAN53, MKK12_MKKC, "JP", "JAPAN53", YES, NO, NO, YES, YES,
750 YES, YES, 7000},
751 {CTRY_JAPAN54, MKK12_MKKA2, "JP", "JAPAN54", YES, NO, NO, YES, YES,
752 YES, YES, 7000},
753
754 {CTRY_JAPAN57, MKK13_MKKB, "JP", "JAPAN57", YES, NO, NO, YES, YES,
755 YES, YES, 7000},
756 {CTRY_JAPAN58, MKK14_MKKA1, "JP", "JAPAN58", YES, NO, NO, YES, YES,
757 YES, YES, 7000},
758 {CTRY_JAPAN59, MKK15_MKKA1, "JP", "JAPAN59", YES, NO, NO, YES, YES,
759 YES, YES, 7000},
760
761 {CTRY_JORDAN, ETSI2_WORLD, "JO", "JORDAN", YES, NO, YES, YES, YES,
762 YES, NO, 7000},
763 {CTRY_KAZAKHSTAN, NULL1_WORLD, "KZ", "KAZAKHSTAN", YES, NO, YES,
764 YES, YES, NO, NO, 7000},
765 {CTRY_KOREA_NORTH, APL9_WORLD, "KP", "NORTH KOREA", YES, NO, NO,
766 YES, YES, YES, YES, 7000},
767 {CTRY_KOREA_ROC, APL9_WORLD, "KR", "KOREA REPUBLIC", YES, NO, NO,
768 YES, NO, YES, NO, 7000},
769 {CTRY_KOREA_ROC2, APL2_WORLD, "K2", "KOREA REPUBLIC2", YES, NO, NO,
770 YES, NO, YES, NO, 7000},
771 {CTRY_KOREA_ROC3, APL9_WORLD, "K3", "KOREA REPUBLIC3", YES, NO, NO,
772 YES, NO, YES, NO, 7000},
773 {CTRY_KUWAIT, NULL1_WORLD, "KW", "KUWAIT", YES, NO, YES, YES, YES,
774 NO, NO, 7000},
775 {CTRY_LATVIA, ETSI1_WORLD, "LV", "LATVIA", YES, NO, YES, YES, YES,
776 YES, YES, 7000},
777 {CTRY_LEBANON, NULL1_WORLD, "LB", "LEBANON", YES, NO, YES, YES,
778 YES, NO, NO, 7000},
779 {CTRY_LIECHTENSTEIN, ETSI1_WORLD, "LI", "LIECHTENSTEIN", YES, NO,
780 YES, YES, YES, YES, YES, 7000},
781 {CTRY_LITHUANIA, ETSI1_WORLD, "LT", "LITHUANIA", YES, NO, YES, YES,
782 YES, YES, YES, 7000},
783 {CTRY_LUXEMBOURG, ETSI1_WORLD, "LU", "LUXEMBOURG", YES, NO, YES,
784 YES, YES, YES, YES, 7000},
785 {CTRY_MACAU, FCC2_WORLD, "MO", "MACAU", YES, YES, YES, YES, YES,
786 YES, YES, 7000},
787 {CTRY_MACEDONIA, NULL1_WORLD, "MK", "MACEDONIA", YES, NO, YES, YES,
788 YES, NO, NO, 7000},
789 {CTRY_MALAYSIA, APL8_WORLD, "MY", "MALAYSIA", YES, NO, NO, YES, NO,
790 YES, NO, 7000},
791 {CTRY_MALTA, ETSI1_WORLD, "MT", "MALTA", YES, NO, YES, YES, YES,
792 YES, YES, 7000},
793 {CTRY_MEXICO, FCC1_FCCA, "MX", "MEXICO", YES, YES, YES, YES, YES,
794 YES, YES, 7000},
795 {CTRY_MONACO, ETSI4_WORLD, "MC", "MONACO", YES, YES, YES, YES, YES,
796 YES, YES, 7000},
797 {CTRY_MOROCCO, NULL1_WORLD, "MA", "MOROCCO", YES, NO, YES, YES,
798 YES, NO, NO, 7000},
799 {CTRY_NEPAL, APL1_WORLD, "NP", "NEPAL", YES, NO, YES, YES, YES,
800 YES, YES, 7000},
801 {CTRY_NETHERLANDS, ETSI1_WORLD, "NL", "NETHERLANDS", YES, NO, YES,
802 YES, YES, YES, YES, 7000},
803 {CTRY_NETHERLANDS_ANTILLES, ETSI1_WORLD, "AN",
804 "NETHERLANDS-ANTILLES", YES, NO, YES, YES, YES, YES, YES, 7000},
805 {CTRY_NEW_ZEALAND, FCC2_ETSIC, "NZ", "NEW ZEALAND", YES, NO, YES,
806 YES, YES, YES, NO, 7000},
807 {CTRY_NORWAY, ETSI1_WORLD, "NO", "NORWAY", YES, NO, YES, YES, YES,
808 YES, YES, 7000},
809 {CTRY_OMAN, APL6_WORLD, "OM", "OMAN", YES, NO, YES, YES, YES, YES,
810 NO, 7000},
811 {CTRY_PAKISTAN, NULL1_WORLD, "PK", "PAKISTAN", YES, NO, YES, YES,
812 YES, NO, NO, 7000},
813 {CTRY_PANAMA, FCC1_FCCA, "PA", "PANAMA", YES, YES, YES, YES, YES,
814 YES, YES, 7000},
815 {CTRY_PAPUA_NEW_GUINEA, FCC1_WORLD, "PG", "PAPUA NEW GUINEA", YES,
816 YES, YES, YES, YES, YES, YES, 7000},
817 {CTRY_PERU, APL1_WORLD, "PE", "PERU", YES, NO, YES, YES, YES, YES,
818 NO, 7000},
819 {CTRY_PHILIPPINES, APL1_WORLD, "PH", "PHILIPPINES", YES, YES, YES,
820 YES, YES, YES, YES, 7000},
821 {CTRY_POLAND, ETSI1_WORLD, "PL", "POLAND", YES, NO, YES, YES, YES,
822 YES, YES, 7000},
823 {CTRY_PORTUGAL, ETSI1_WORLD, "PT", "PORTUGAL", YES, NO, YES, YES,
824 YES, YES, YES, 7000},
825 {CTRY_PUERTO_RICO, FCC1_FCCA, "PR", "PUERTO RICO", YES, YES, YES,
826 YES, YES, YES, YES, 7000},
827 {CTRY_QATAR, NULL1_WORLD, "QA", "QATAR", YES, NO, YES, YES, YES,
828 NO, NO, 7000},
829 {CTRY_ROMANIA, NULL1_WORLD, "RO", "ROMANIA", YES, NO, YES, YES,
830 YES, NO, NO, 7000},
831 {CTRY_RUSSIA, NULL1_WORLD, "RU", "RUSSIA", YES, NO, YES, YES, YES,
832 NO, NO, 7000},
833 {CTRY_SAUDI_ARABIA, NULL1_WORLD, "SA", "SAUDI ARABIA", YES, NO,
834 YES, YES, YES, NO, NO, 7000},
835 {CTRY_SERBIA_MONTENEGRO, ETSI1_WORLD, "CS", "SERBIA & MONTENEGRO",
836 YES, NO, YES, YES, YES, YES, YES, 7000},
837 {CTRY_SINGAPORE, APL6_WORLD, "SG", "SINGAPORE", YES, YES, YES, YES,
838 YES, YES, YES, 7000},
839 {CTRY_SLOVAKIA, ETSI1_WORLD, "SK", "SLOVAK REPUBLIC", YES, NO, YES,
840 YES, YES, YES, YES, 7000},
841 {CTRY_SLOVENIA, ETSI1_WORLD, "SI", "SLOVENIA", YES, NO, YES, YES,
842 YES, YES, YES, 7000},
843 {CTRY_SOUTH_AFRICA, FCC3_WORLD, "ZA", "SOUTH AFRICA", YES, NO, YES,
844 YES, YES, YES, NO, 7000},
845 {CTRY_SPAIN, ETSI1_WORLD, "ES", "SPAIN", YES, NO, YES, YES, YES,
846 YES, YES, 7000},
847 {CTRY_SRI_LANKA, FCC3_WORLD, "LK", "SRI LANKA", YES, NO, YES, YES,
848 YES, YES, NO, 7000},
849 {CTRY_SWEDEN, ETSI1_WORLD, "SE", "SWEDEN", YES, NO, YES, YES, YES,
850 YES, YES, 7000},
851 {CTRY_SWITZERLAND, ETSI1_WORLD, "CH", "SWITZERLAND", YES, NO, YES,
852 YES, YES, YES, YES, 7000},
853 {CTRY_SYRIA, NULL1_WORLD, "SY", "SYRIA", YES, NO, YES, YES, YES,
854 NO, NO, 7000},
855 {CTRY_TAIWAN, APL3_FCCA, "TW", "TAIWAN", YES, YES, YES, YES, YES,
856 YES, YES, 7000},
857 {CTRY_THAILAND, NULL1_WORLD, "TH", "THAILAND", YES, NO, YES, YES,
858 YES, NO, NO, 7000},
859 {CTRY_TRINIDAD_Y_TOBAGO, ETSI4_WORLD, "TT", "TRINIDAD & TOBAGO",
860 YES, NO, YES, YES, YES, YES, NO, 7000},
861 {CTRY_TUNISIA, ETSI3_WORLD, "TN", "TUNISIA", YES, NO, YES, YES,
862 YES, YES, NO, 7000},
863 {CTRY_TURKEY, ETSI3_WORLD, "TR", "TURKEY", YES, NO, YES, YES, YES,
864 YES, NO, 7000},
865 {CTRY_UKRAINE, NULL1_WORLD, "UA", "UKRAINE", YES, NO, YES, YES,
866 YES, NO, NO, 7000},
867 {CTRY_UAE, NULL1_WORLD, "AE", "UNITED ARAB EMIRATES", YES, NO, YES,
868 YES, YES, NO, NO, 7000},
869 {CTRY_UNITED_KINGDOM, ETSI1_WORLD, "GB", "UNITED KINGDOM", YES, NO,
870 YES, YES, YES, YES, YES, 7000},
871 {CTRY_UNITED_STATES, FCC3_FCCA, "US", "UNITED STATES", YES, YES,
872 YES, YES, YES, YES, YES, 5825},
873 {CTRY_UNITED_STATES_FCC49, FCC4_FCCA, "PS",
874 "UNITED STATES (PUBLIC SAFETY)", YES, YES, YES, YES, YES, YES,
875 YES, 7000},
876 {CTRY_URUGUAY, APL2_WORLD, "UY", "URUGUAY", YES, NO, YES, YES, YES,
877 YES, NO, 7000},
878 {CTRY_UZBEKISTAN, FCC3_FCCA, "UZ", "UZBEKISTAN", YES, YES, YES,
879 YES, YES, YES, YES, 7000},
880 {CTRY_VENEZUELA, APL2_ETSIC, "VE", "VENEZUELA", YES, NO, YES, YES,
881 YES, YES, NO, 7000},
882 {CTRY_VIET_NAM, NULL1_WORLD, "VN", "VIET NAM", YES, NO, YES, YES,
883 YES, NO, NO, 7000},
884 {CTRY_YEMEN, NULL1_WORLD, "YE", "YEMEN", YES, NO, YES, YES, YES,
885 NO, NO, 7000},
886 {CTRY_ZIMBABWE, NULL1_WORLD, "ZW", "ZIMBABWE", YES, NO, YES, YES,
887 YES, NO, NO, 7000}
888};
889
890enum {
891 NO_DFS = 0x0000000000000000ULL,
892 DFS_FCC3 = 0x0000000000000001ULL,
893 DFS_ETSI = 0x0000000000000002ULL,
894 DFS_MKK4 = 0x0000000000000004ULL,
895};
896
897enum {
898 F1_4915_4925,
899 F1_4935_4945,
900 F1_4920_4980,
901 F1_4942_4987,
902 F1_4945_4985,
903 F1_4950_4980,
904 F1_5035_5040,
905 F1_5040_5080,
906 F1_5055_5055,
907
908 F1_5120_5240,
909
910 F1_5170_5230,
911 F2_5170_5230,
912
913 F1_5180_5240,
914 F2_5180_5240,
915 F3_5180_5240,
916 F4_5180_5240,
917 F5_5180_5240,
918 F6_5180_5240,
919 F7_5180_5240,
920 F8_5180_5240,
921
922 F1_5180_5320,
923
924 F1_5240_5280,
925
926 F1_5260_5280,
927
928 F1_5260_5320,
929 F2_5260_5320,
930 F3_5260_5320,
931 F4_5260_5320,
932 F5_5260_5320,
933 F6_5260_5320,
934
935 F1_5260_5700,
936
937 F1_5280_5320,
938
939 F1_5500_5580,
940
941 F1_5500_5620,
942
943 F1_5500_5700,
944 F2_5500_5700,
945 F3_5500_5700,
946 F4_5500_5700,
947 F5_5500_5700,
948
949 F1_5660_5700,
950
951 F1_5745_5805,
952 F2_5745_5805,
953 F3_5745_5805,
954
955 F1_5745_5825,
956 F2_5745_5825,
957 F3_5745_5825,
958 F4_5745_5825,
959 F5_5745_5825,
960 F6_5745_5825,
961
962 W1_4920_4980,
963 W1_5040_5080,
964 W1_5170_5230,
965 W1_5180_5240,
966 W1_5260_5320,
967 W1_5745_5825,
968 W1_5500_5700,
969 A_DEMO_ALL_CHANNELS
970};
971
972static struct RegDmnFreqBand regDmn5GhzFreq[] = {
973 {4915, 4925, 23, 0, 10, 5, NO_DFS, PSCAN_MKK2, 16},
974 {4935, 4945, 23, 0, 10, 5, NO_DFS, PSCAN_MKK2, 16},
975 {4920, 4980, 23, 0, 20, 20, NO_DFS, PSCAN_MKK2, 7},
976 {4942, 4987, 27, 6, 5, 5, NO_DFS, PSCAN_FCC, 0},
977 {4945, 4985, 30, 6, 10, 5, NO_DFS, PSCAN_FCC, 0},
978 {4950, 4980, 33, 6, 20, 5, NO_DFS, PSCAN_FCC, 0},
979 {5035, 5040, 23, 0, 10, 5, NO_DFS, PSCAN_MKK2, 12},
980 {5040, 5080, 23, 0, 20, 20, NO_DFS, PSCAN_MKK2, 2},
981 {5055, 5055, 23, 0, 10, 5, NO_DFS, PSCAN_MKK2, 12},
982
983 {5120, 5240, 5, 6, 20, 20, NO_DFS, NO_PSCAN, 0},
984
985 {5170, 5230, 23, 0, 20, 20, NO_DFS, PSCAN_MKK1 | PSCAN_MKK2, 1},
986 {5170, 5230, 20, 0, 20, 20, NO_DFS, PSCAN_MKK1 | PSCAN_MKK2, 1},
987
988 {5180, 5240, 15, 0, 20, 20, NO_DFS, PSCAN_FCC | PSCAN_ETSI, 0},
989 {5180, 5240, 17, 6, 20, 20, NO_DFS, NO_PSCAN, 1},
990 {5180, 5240, 18, 0, 20, 20, NO_DFS, PSCAN_FCC | PSCAN_ETSI, 0},
991 {5180, 5240, 20, 0, 20, 20, NO_DFS, PSCAN_FCC | PSCAN_ETSI, 0},
992 {5180, 5240, 23, 0, 20, 20, NO_DFS, PSCAN_FCC | PSCAN_ETSI, 0},
993 {5180, 5240, 23, 6, 20, 20, NO_DFS, PSCAN_FCC, 0},
994 {5180, 5240, 20, 0, 20, 20, NO_DFS, PSCAN_MKK1 | PSCAN_MKK3, 0},
995 {5180, 5240, 23, 6, 20, 20, NO_DFS, NO_PSCAN, 0},
996
997 {5180, 5320, 20, 6, 20, 20, NO_DFS, PSCAN_ETSI, 0},
998
999 {5240, 5280, 23, 0, 20, 20, DFS_FCC3, PSCAN_FCC | PSCAN_ETSI, 0},
1000
1001 {5260, 5280, 23, 0, 20, 20, DFS_FCC3 | DFS_ETSI,
1002 PSCAN_FCC | PSCAN_ETSI, 0},
1003
1004 {5260, 5320, 18, 0, 20, 20, DFS_FCC3 | DFS_ETSI,
1005 PSCAN_FCC | PSCAN_ETSI, 0},
1006
1007 {5260, 5320, 20, 0, 20, 20, DFS_FCC3 | DFS_ETSI | DFS_MKK4,
1008 PSCAN_FCC | PSCAN_ETSI | PSCAN_MKK3, 0},
1009
1010
1011 {5260, 5320, 20, 6, 20, 20, DFS_FCC3 | DFS_ETSI,
1012 PSCAN_FCC | PSCAN_ETSI, 2},
1013 {5260, 5320, 23, 6, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_FCC, 2},
1014 {5260, 5320, 23, 6, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_FCC, 0},
1015 {5260, 5320, 30, 0, 20, 20, NO_DFS, NO_PSCAN, 0},
1016
1017 {5260, 5700, 5, 6, 20, 20, DFS_FCC3 | DFS_ETSI, NO_PSCAN, 0},
1018
1019 {5280, 5320, 17, 6, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_FCC, 0},
1020
1021 {5500, 5580, 23, 6, 20, 20, DFS_FCC3, PSCAN_FCC, 0},
1022
1023 {5500, 5620, 30, 6, 20, 20, DFS_ETSI, PSCAN_ETSI, 0},
1024
1025 {5500, 5700, 20, 6, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_FCC, 4},
1026 {5500, 5700, 27, 0, 20, 20, DFS_FCC3 | DFS_ETSI,
1027 PSCAN_FCC | PSCAN_ETSI, 0},
1028 {5500, 5700, 30, 0, 20, 20, DFS_FCC3 | DFS_ETSI,
1029 PSCAN_FCC | PSCAN_ETSI, 0},
1030 {5500, 5700, 23, 0, 20, 20, DFS_FCC3 | DFS_ETSI | DFS_MKK4,
1031 PSCAN_MKK3 | PSCAN_FCC, 0},
1032 {5500, 5700, 30, 6, 20, 20, DFS_ETSI, PSCAN_ETSI, 0},
1033
1034 {5660, 5700, 23, 6, 20, 20, DFS_FCC3, PSCAN_FCC, 0},
1035
1036 {5745, 5805, 23, 0, 20, 20, NO_DFS, NO_PSCAN, 0},
1037 {5745, 5805, 30, 6, 20, 20, NO_DFS, NO_PSCAN, 0},
1038 {5745, 5805, 30, 6, 20, 20, NO_DFS, PSCAN_ETSI, 0},
1039 {5745, 5825, 5, 6, 20, 20, NO_DFS, NO_PSCAN, 0},
1040 {5745, 5825, 17, 0, 20, 20, NO_DFS, NO_PSCAN, 0},
1041 {5745, 5825, 20, 0, 20, 20, NO_DFS, NO_PSCAN, 0},
1042 {5745, 5825, 30, 0, 20, 20, NO_DFS, NO_PSCAN, 0},
1043 {5745, 5825, 30, 6, 20, 20, NO_DFS, NO_PSCAN, 3},
1044 {5745, 5825, 30, 6, 20, 20, NO_DFS, NO_PSCAN, 0},
1045
1046
1047 {4920, 4980, 30, 0, 20, 20, NO_DFS, PSCAN_WWR, 0},
1048 {5040, 5080, 30, 0, 20, 20, NO_DFS, PSCAN_WWR, 0},
1049 {5170, 5230, 30, 0, 20, 20, NO_DFS, PSCAN_WWR, 0},
1050 {5180, 5240, 30, 0, 20, 20, NO_DFS, PSCAN_WWR, 0},
1051 {5260, 5320, 30, 0, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, 0},
1052 {5745, 5825, 30, 0, 20, 20, NO_DFS, PSCAN_WWR, 0},
1053 {5500, 5700, 30, 0, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, 0},
1054 {4920, 6100, 30, 6, 20, 20, NO_DFS, NO_PSCAN, 0},
1055};
1056
1057enum {
1058 T1_5130_5650,
1059 T1_5150_5670,
1060
1061 T1_5200_5200,
1062 T2_5200_5200,
1063 T3_5200_5200,
1064 T4_5200_5200,
1065 T5_5200_5200,
1066 T6_5200_5200,
1067 T7_5200_5200,
1068 T8_5200_5200,
1069
1070 T1_5200_5280,
1071 T2_5200_5280,
1072 T3_5200_5280,
1073 T4_5200_5280,
1074 T5_5200_5280,
1075 T6_5200_5280,
1076
1077 T1_5200_5240,
1078 T1_5210_5210,
1079 T2_5210_5210,
1080 T3_5210_5210,
1081 T4_5210_5210,
1082 T5_5210_5210,
1083 T6_5210_5210,
1084 T7_5210_5210,
1085 T8_5210_5210,
1086 T9_5210_5210,
1087 T10_5210_5210,
1088 T1_5240_5240,
1089
1090 T1_5210_5250,
1091 T1_5210_5290,
1092 T2_5210_5290,
1093 T3_5210_5290,
1094
1095 T1_5280_5280,
1096 T2_5280_5280,
1097 T1_5290_5290,
1098 T2_5290_5290,
1099 T3_5290_5290,
1100 T1_5250_5290,
1101 T2_5250_5290,
1102 T3_5250_5290,
1103 T4_5250_5290,
1104
1105 T1_5540_5660,
1106 T2_5540_5660,
1107 T3_5540_5660,
1108 T1_5760_5800,
1109 T2_5760_5800,
1110 T3_5760_5800,
1111 T4_5760_5800,
1112 T5_5760_5800,
1113 T6_5760_5800,
1114 T7_5760_5800,
1115
1116 T1_5765_5805,
1117 T2_5765_5805,
1118 T3_5765_5805,
1119 T4_5765_5805,
1120 T5_5765_5805,
1121 T6_5765_5805,
1122 T7_5765_5805,
1123 T8_5765_5805,
1124 T9_5765_5805,
1125
1126 WT1_5210_5250,
1127 WT1_5290_5290,
1128 WT1_5540_5660,
1129 WT1_5760_5800,
1130};
1131
1132enum {
1133 F1_2312_2372,
1134 F2_2312_2372,
1135
1136 F1_2412_2472,
1137 F2_2412_2472,
1138 F3_2412_2472,
1139
1140 F1_2412_2462,
1141 F2_2412_2462,
1142
1143 F1_2432_2442,
1144
1145 F1_2457_2472,
1146
1147 F1_2467_2472,
1148
1149 F1_2484_2484,
1150 F2_2484_2484,
1151
1152 F1_2512_2732,
1153
1154 W1_2312_2372,
1155 W1_2412_2412,
1156 W1_2417_2432,
1157 W1_2437_2442,
1158 W1_2447_2457,
1159 W1_2462_2462,
1160 W1_2467_2467,
1161 W2_2467_2467,
1162 W1_2472_2472,
1163 W2_2472_2472,
1164 W1_2484_2484,
1165 W2_2484_2484,
1166};
1167
1168static struct RegDmnFreqBand regDmn2GhzFreq[] = {
1169 {2312, 2372, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
1170 {2312, 2372, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1171
1172 {2412, 2472, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
1173 {2412, 2472, 20, 0, 20, 5, NO_DFS, PSCAN_MKKA, 0},
1174 {2412, 2472, 30, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1175
1176 {2412, 2462, 27, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
1177 {2412, 2462, 20, 0, 20, 5, NO_DFS, PSCAN_MKKA, 0},
1178
1179 {2432, 2442, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1180
1181 {2457, 2472, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1182
1183 {2467, 2472, 20, 0, 20, 5, NO_DFS, PSCAN_MKKA2 | PSCAN_MKKA, 0},
1184
1185 {2484, 2484, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
1186 {2484, 2484, 20, 0, 20, 5, NO_DFS,
1187 PSCAN_MKKA | PSCAN_MKKA1 | PSCAN_MKKA2, 0},
1188
1189 {2512, 2732, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
1190
1191 {2312, 2372, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1192 {2412, 2412, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1193 {2417, 2432, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1194 {2437, 2442, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1195 {2447, 2457, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1196 {2462, 2462, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1197 {2467, 2467, 20, 0, 20, 5, NO_DFS, PSCAN_WWR | IS_ECM_CHAN, 0},
1198 {2467, 2467, 20, 0, 20, 5, NO_DFS, NO_PSCAN | IS_ECM_CHAN, 0},
1199 {2472, 2472, 20, 0, 20, 5, NO_DFS, PSCAN_WWR | IS_ECM_CHAN, 0},
1200 {2472, 2472, 20, 0, 20, 5, NO_DFS, NO_PSCAN | IS_ECM_CHAN, 0},
1201 {2484, 2484, 20, 0, 20, 5, NO_DFS, PSCAN_WWR | IS_ECM_CHAN, 0},
1202 {2484, 2484, 20, 0, 20, 5, NO_DFS, NO_PSCAN | IS_ECM_CHAN, 0},
1203};
1204
1205enum {
1206 G1_2312_2372,
1207 G2_2312_2372,
1208
1209 G1_2412_2472,
1210 G2_2412_2472,
1211 G3_2412_2472,
1212
1213 G1_2412_2462,
1214 G2_2412_2462,
1215
1216 G1_2432_2442,
1217
1218 G1_2457_2472,
1219
1220 G1_2512_2732,
1221
1222 G1_2467_2472,
1223
1224 WG1_2312_2372,
1225 WG1_2412_2462,
1226 WG1_2467_2472,
1227 WG2_2467_2472,
1228 G_DEMO_ALL_CHANNELS
1229};
1230
1231static struct RegDmnFreqBand regDmn2Ghz11gFreq[] = {
1232 {2312, 2372, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
1233 {2312, 2372, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1234
1235 {2412, 2472, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
1236 {2412, 2472, 20, 0, 20, 5, NO_DFS, PSCAN_MKKA_G, 0},
1237 {2412, 2472, 30, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1238
1239 {2412, 2462, 27, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
1240 {2412, 2462, 20, 0, 20, 5, NO_DFS, PSCAN_MKKA_G, 0},
1241
1242 {2432, 2442, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1243
1244 {2457, 2472, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1245
1246 {2512, 2732, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
1247
1248 {2467, 2472, 20, 0, 20, 5, NO_DFS, PSCAN_MKKA2 | PSCAN_MKKA, 0},
1249
1250 {2312, 2372, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1251 {2412, 2462, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1252 {2467, 2472, 20, 0, 20, 5, NO_DFS, PSCAN_WWR | IS_ECM_CHAN, 0},
1253 {2467, 2472, 20, 0, 20, 5, NO_DFS, NO_PSCAN | IS_ECM_CHAN, 0},
1254 {2312, 2732, 27, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
1255};
1256
1257enum {
1258 T1_2312_2372,
1259 T1_2437_2437,
1260 T2_2437_2437,
1261 T3_2437_2437,
1262 T1_2512_2732
1263};
1264
1265static struct regDomain regDomains[] = {
1266
1267 {DEBUG_REG_DMN, FCC, DFS_FCC3, NO_PSCAN, NO_REQ,
1268 BM(A_DEMO_ALL_CHANNELS, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1269 -1),
1270 BM(T1_5130_5650, T1_5150_5670, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1271 -1),
1272 BM(T1_5200_5240, T1_5280_5280, T1_5540_5660, T1_5765_5805, -1, -1,
1273 -1, -1, -1, -1, -1, -1),
1274 BM(F1_2312_2372, F1_2412_2472, F1_2484_2484, F1_2512_2732, -1, -1,
1275 -1, -1, -1, -1, -1, -1),
1276 BM(G_DEMO_ALL_CHANNELS, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1277 -1),
1278 BM(T1_2312_2372, T1_2437_2437, T1_2512_2732, -1, -1, -1, -1, -1,
1279 -1, -1, -1, -1)},
1280
1281 {APL1, FCC, NO_DFS, NO_PSCAN, NO_REQ,
1282 BM(F4_5745_5825, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1283 BM(T2_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1284 BM(T1_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1285 BMZERO,
1286 BMZERO,
1287 BMZERO},
1288
1289 {APL2, FCC, NO_DFS, NO_PSCAN, NO_REQ,
1290 BM(F1_5745_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1291 BM(T1_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1292 BM(T2_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1293 BMZERO,
1294 BMZERO,
1295 BMZERO},
1296
1297 {APL3, FCC, NO_DFS, NO_PSCAN, NO_REQ,
1298 BM(F1_5280_5320, F2_5745_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1299 -1),
1300 BM(T1_5290_5290, T1_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1301 -1),
1302 BM(T1_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1303 BMZERO,
1304 BMZERO,
1305 BMZERO},
1306
1307 {APL4, FCC, NO_DFS, NO_PSCAN, NO_REQ,
1308 BM(F4_5180_5240, F3_5745_5825, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1309 -1),
1310 BM(T1_5210_5210, T3_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1311 -1),
1312 BM(T1_5200_5200, T3_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1313 -1),
1314 BMZERO,
1315 BMZERO,
1316 BMZERO},
1317
1318 {APL5, FCC, NO_DFS, NO_PSCAN, NO_REQ,
1319 BM(F2_5745_5825, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1320 BM(T4_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1321 BM(T4_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1322 BMZERO,
1323 BMZERO,
1324 BMZERO},
1325
1326 {APL6, ETSI, DFS_ETSI, PSCAN_FCC_T | PSCAN_FCC, NO_REQ,
1327 BM(F4_5180_5240, F2_5260_5320, F3_5745_5825, -1, -1, -1, -1, -1,
1328 -1, -1, -1, -1),
1329 BM(T2_5210_5210, T1_5250_5290, T1_5760_5800, -1, -1, -1, -1, -1,
1330 -1, -1, -1, -1),
1331 BM(T1_5200_5280, T5_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1332 -1),
1333 BMZERO,
1334 BMZERO,
1335 BMZERO},
1336
1337 {APL7, ETSI, DFS_ETSI, PSCAN_ETSI, NO_REQ,
1338 BM(F1_5280_5320, F5_5500_5700, F3_5745_5805, -1, -1, -1, -1, -1,
1339 -1, -1, -1, -1),
1340 BM(T3_5290_5290, T5_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1341 -1),
1342 BM(T1_5540_5660, T6_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1343 -1),
1344 BMZERO,
1345 BMZERO,
1346 BMZERO},
1347
1348 {APL8, ETSI, NO_DFS, NO_PSCAN,
1349 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1350 BM(F6_5260_5320, F4_5745_5825, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1351 -1),
1352 BM(T2_5290_5290, T2_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1353 -1),
1354 BM(T1_5280_5280, T1_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1355 -1),
1356 BMZERO,
1357 BMZERO,
1358 BMZERO},
1359
1360 {APL9, ETSI, DFS_ETSI, PSCAN_ETSI,
1361 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1362 BM(F1_5180_5320, F1_5500_5620, F3_5745_5805, -1, -1, -1, -1, -1,
1363 -1, -1, -1, -1),
1364 BM(T3_5290_5290, T5_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1365 -1),
1366 BM(T1_5540_5660, T6_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1367 -1),
1368 BMZERO,
1369 BMZERO,
1370 BMZERO},
1371
1372 {APL10, ETSI, DFS_ETSI, PSCAN_ETSI,
1373 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1374 BM(F1_5180_5320, F5_5500_5700, F3_5745_5805, -1, -1, -1, -1, -1,
1375 -1, -1, -1, -1),
1376 BM(T3_5290_5290, T5_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1377 -1),
1378 BM(T1_5540_5660, T6_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1379 -1),
1380 BMZERO,
1381 BMZERO,
1382 BMZERO},
1383
1384 {ETSI1, ETSI, DFS_ETSI, PSCAN_ETSI,
1385 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1386 BM(F4_5180_5240, F2_5260_5320, F2_5500_5700, -1, -1, -1, -1, -1,
1387 -1, -1, -1, -1),
1388 BM(T1_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1389 BM(T2_5200_5280, T2_5540_5660, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1390 -1),
1391 BMZERO,
1392 BMZERO,
1393 BMZERO},
1394
1395 {ETSI2, ETSI, DFS_ETSI, PSCAN_ETSI,
1396 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1397 BM(F3_5180_5240, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1398 BM(T3_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1399 BM(T2_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1400 BMZERO,
1401 BMZERO,
1402 BMZERO},
1403
1404 {ETSI3, ETSI, DFS_ETSI, PSCAN_ETSI,
1405 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1406 BM(F4_5180_5240, F2_5260_5320, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1407 -1),
1408 BM(T1_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1409 BM(T2_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1410 BMZERO,
1411 BMZERO,
1412 BMZERO},
1413
1414 {ETSI4, ETSI, DFS_ETSI, PSCAN_ETSI,
1415 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1416 BM(F3_5180_5240, F1_5260_5320, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1417 -1),
1418 BM(T2_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1419 BM(T3_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1420 BMZERO,
1421 BMZERO,
1422 BMZERO},
1423
1424 {ETSI5, ETSI, DFS_ETSI, PSCAN_ETSI,
1425 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1426 BM(F1_5180_5240, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1427 BM(T4_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1428 BM(T3_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1429 BMZERO,
1430 BMZERO,
1431 BMZERO},
1432
1433 {ETSI6, ETSI, DFS_ETSI, PSCAN_ETSI,
1434 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1435 BM(F5_5180_5240, F1_5260_5280, F3_5500_5700, -1, -1, -1, -1, -1,
1436 -1, -1, -1, -1),
1437 BM(T1_5210_5250, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1438 BM(T4_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1439 BMZERO,
1440 BMZERO,
1441 BMZERO},
1442
1443 {FCC1, FCC, NO_DFS, NO_PSCAN, NO_REQ,
1444 BM(F2_5180_5240, F4_5260_5320, F5_5745_5825, -1, -1, -1, -1, -1,
1445 -1, -1, -1, -1),
1446 BM(T6_5210_5210, T2_5250_5290, T6_5760_5800, -1, -1, -1, -1, -1,
1447 -1, -1, -1, -1),
1448 BM(T1_5200_5240, T2_5280_5280, T7_5765_5805, -1, -1, -1, -1, -1,
1449 -1, -1, -1, -1),
1450 BMZERO,
1451 BMZERO,
1452 BMZERO},
1453
1454 {FCC2, FCC, NO_DFS, NO_PSCAN, NO_REQ,
1455 BM(F6_5180_5240, F5_5260_5320, F6_5745_5825, -1, -1, -1, -1, -1,
1456 -1, -1, -1, -1),
1457 BM(T7_5210_5210, T3_5250_5290, T2_5760_5800, -1, -1, -1, -1, -1,
1458 -1, -1, -1, -1),
1459 BM(T7_5200_5200, T1_5240_5240, T2_5280_5280, T1_5765_5805, -1, -1,
1460 -1, -1, -1, -1, -1, -1),
1461 BMZERO,
1462 BMZERO,
1463 BMZERO},
1464
1465 {FCC3, FCC, DFS_FCC3, PSCAN_FCC | PSCAN_FCC_T, NO_REQ,
1466 BM(F2_5180_5240, F3_5260_5320, F1_5500_5700, F5_5745_5825, -1, -1,
1467 -1, -1, -1, -1, -1, -1),
1468 BM(T6_5210_5210, T2_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1469 -1),
1470 BM(T4_5200_5200, T8_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1471 -1),
1472 BMZERO,
1473 BMZERO,
1474 BMZERO},
1475
1476 {FCC4, FCC, DFS_FCC3, PSCAN_FCC | PSCAN_FCC_T, NO_REQ,
1477 BM(F1_4942_4987, F1_4945_4985, F1_4950_4980, -1, -1, -1, -1, -1,
1478 -1, -1, -1, -1),
1479 BM(T8_5210_5210, T4_5250_5290, T7_5760_5800, -1, -1, -1, -1, -1,
1480 -1, -1, -1, -1),
1481 BM(T1_5200_5240, T1_5280_5280, T9_5765_5805, -1, -1, -1, -1, -1,
1482 -1, -1, -1, -1),
1483 BMZERO,
1484 BMZERO,
1485 BMZERO},
1486
1487 {FCC5, FCC, NO_DFS, NO_PSCAN, NO_REQ,
1488 BM(F2_5180_5240, F6_5745_5825, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1489 -1),
1490 BM(T6_5210_5210, T2_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1491 -1),
1492 BM(T8_5200_5200, T7_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1493 -1),
1494 BMZERO,
1495 BMZERO,
1496 BMZERO},
1497
1498 {FCC6, FCC, DFS_FCC3, PSCAN_FCC, NO_REQ,
1499 BM(F8_5180_5240, F5_5260_5320, F1_5500_5580, F1_5660_5700,
1500 F6_5745_5825, -1, -1, -1, -1, -1, -1, -1),
1501 BM(T7_5210_5210, T3_5250_5290, T2_5760_5800, -1, -1, -1, -1, -1,
1502 -1, -1, -1, -1),
1503 BM(T7_5200_5200, T1_5240_5240, T2_5280_5280, T1_5765_5805, -1, -1,
1504 -1, -1, -1, -1, -1, -1),
1505 BMZERO,
1506 BMZERO,
1507 BMZERO},
1508
1509 {MKK1, MKK, NO_DFS, PSCAN_MKK1, DISALLOW_ADHOC_11A_TURB,
1510 BM(F1_5170_5230, F4_5180_5240, F2_5260_5320, F4_5500_5700, -1, -1,
1511 -1, -1, -1, -1, -1, -1),
1512 BM(T7_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1513 BM(T5_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1514 BMZERO,
1515 BMZERO,
1516 BMZERO},
1517
1518 {MKK2, MKK, NO_DFS, PSCAN_MKK2, DISALLOW_ADHOC_11A_TURB,
1519 BM(F1_4915_4925, F1_4935_4945, F1_4920_4980, F1_5035_5040,
1520 F1_5055_5055, F1_5040_5080, F1_5170_5230, F4_5180_5240,
1521 F2_5260_5320, F4_5500_5700, -1, -1),
1522 BM(T7_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1523 BM(T5_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1524 BMZERO,
1525 BMZERO,
1526 BMZERO},
1527
1528
1529 {MKK3, MKK, NO_DFS, PSCAN_MKK3, DISALLOW_ADHOC_11A_TURB,
1530 BM(F4_5180_5240, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1531 BM(T9_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1532 BM(T1_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1533 BMZERO,
1534 BMZERO,
1535 BMZERO},
1536
1537
1538 {MKK4, MKK, DFS_MKK4, PSCAN_MKK3, DISALLOW_ADHOC_11A_TURB,
1539 BM(F4_5180_5240, F2_5260_5320, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1540 -1),
1541 BM(T10_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1542 BM(T6_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1543 BMZERO,
1544 BMZERO,
1545 BMZERO},
1546
1547
1548 {MKK5, MKK, DFS_MKK4, PSCAN_MKK3, DISALLOW_ADHOC_11A_TURB,
1549 BM(F4_5180_5240, F2_5260_5320, F4_5500_5700, -1, -1, -1, -1, -1,
1550 -1, -1, -1, -1),
1551 BM(T3_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1552 BM(T5_5200_5280, T3_5540_5660, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1553 -1),
1554 BMZERO,
1555 BMZERO,
1556 BMZERO},
1557
1558
1559 {MKK6, MKK, NO_DFS, PSCAN_MKK1, DISALLOW_ADHOC_11A_TURB,
1560 BM(F2_5170_5230, F4_5180_5240, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1561 -1),
1562 BM(T3_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1563 BM(T6_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1564 BMZERO,
1565 BMZERO,
1566 BMZERO},
1567
1568
1569 {MKK7, MKK, DFS_MKK4, PSCAN_MKK1 | PSCAN_MKK3,
1570 DISALLOW_ADHOC_11A_TURB,
1571 BM(F1_5170_5230, F4_5180_5240, F2_5260_5320, -1, -1, -1, -1, -1,
1572 -1, -1, -1, -1),
1573 BM(T3_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1574 BM(T5_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1575 BMZERO,
1576 BMZERO,
1577 BMZERO},
1578
1579
1580 {MKK8, MKK, DFS_MKK4, PSCAN_MKK1 | PSCAN_MKK3,
1581 DISALLOW_ADHOC_11A_TURB,
1582 BM(F1_5170_5230, F4_5180_5240, F2_5260_5320, F4_5500_5700, -1, -1,
1583 -1, -1, -1, -1, -1, -1),
1584 BM(T3_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1585 BM(T5_5200_5280, T3_5540_5660, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1586 -1),
1587 BMZERO,
1588 BMZERO,
1589 BMZERO},
1590
1591
1592 {MKK9, MKK, NO_DFS, PSCAN_MKK2 | PSCAN_MKK3,
1593 DISALLOW_ADHOC_11A_TURB,
1594 BM(F1_4915_4925, F1_4935_4945, F1_4920_4980, F1_5035_5040,
1595 F1_5055_5055, F1_5040_5080, F4_5180_5240, -1, -1, -1, -1, -1),
1596 BM(T9_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1597 BM(T1_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1598 BMZERO,
1599 BMZERO,
1600 BMZERO},
1601
1602
1603 {MKK10, MKK, DFS_MKK4, PSCAN_MKK2 | PSCAN_MKK3,
1604 DISALLOW_ADHOC_11A_TURB,
1605 BM(F1_4915_4925, F1_4935_4945, F1_4920_4980, F1_5035_5040,
1606 F1_5055_5055, F1_5040_5080, F4_5180_5240, F2_5260_5320, -1, -1,
1607 -1, -1),
1608 BM(T3_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1609 BM(T1_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1610 BMZERO,
1611 BMZERO,
1612 BMZERO},
1613
1614
1615 {MKK11, MKK, DFS_MKK4, PSCAN_MKK3, DISALLOW_ADHOC_11A_TURB,
1616 BM(F1_4915_4925, F1_4935_4945, F1_4920_4980, F1_5035_5040,
1617 F1_5055_5055, F1_5040_5080, F4_5180_5240, F2_5260_5320,
1618 F4_5500_5700, -1, -1, -1),
1619 BM(T3_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1620 BM(T1_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1621 BMZERO,
1622 BMZERO,
1623 BMZERO},
1624
1625
1626 {MKK12, MKK, DFS_MKK4, PSCAN_MKK1 | PSCAN_MKK3,
1627 DISALLOW_ADHOC_11A_TURB,
1628 BM(F1_4915_4925, F1_4935_4945, F1_4920_4980, F1_5035_5040,
1629 F1_5055_5055, F1_5040_5080, F1_5170_5230, F4_5180_5240,
1630 F2_5260_5320, F4_5500_5700, -1, -1),
1631 BM(T3_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1632 BM(T1_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1633 BMZERO,
1634 BMZERO,
1635 BMZERO},
1636
1637
1638 {MKK13, MKK, DFS_MKK4, PSCAN_MKK1 | PSCAN_MKK3,
1639 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1640 BM(F1_5170_5230, F7_5180_5240, F2_5260_5320, F4_5500_5700, -1, -1,
1641 -1, -1, -1, -1, -1, -1),
1642 BMZERO,
1643 BMZERO,
1644 BMZERO,
1645 BMZERO,
1646 BMZERO},
1647
1648
1649 {MKK14, MKK, DFS_MKK4, PSCAN_MKK1, DISALLOW_ADHOC_11A_TURB,
1650 BM(F1_4915_4925, F1_4935_4945, F1_4920_4980, F1_5035_5040,
1651 F1_5040_5080, F1_5055_5055, F1_5170_5230, F4_5180_5240, -1, -1,
1652 -1, -1),
1653 BMZERO,
1654 BMZERO,
1655 BMZERO,
1656 BMZERO,
1657 BMZERO},
1658
1659
1660 {MKK15, MKK, DFS_MKK4, PSCAN_MKK1, DISALLOW_ADHOC_11A_TURB,
1661 BM(F1_4915_4925, F1_4935_4945, F1_4920_4980, F1_5035_5040,
1662 F1_5040_5080, F1_5055_5055, F1_5170_5230, F4_5180_5240,
1663 F2_5260_5320, -1, -1, -1),
1664 BMZERO,
1665 BMZERO,
1666 BMZERO,
1667 BMZERO,
1668 BMZERO},
1669
1670
1671 {APLD, NO_CTL, NO_DFS, NO_PSCAN, NO_REQ,
1672 BMZERO,
1673 BMZERO,
1674 BMZERO,
1675 BM(F2_2312_2372, F2_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1676 -1),
1677 BM(G2_2312_2372, G2_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1678 -1),
1679 BMZERO},
1680
1681 {ETSIA, NO_CTL, NO_DFS, PSCAN_ETSIA,
1682 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1683 BMZERO,
1684 BMZERO,
1685 BMZERO,
1686 BM(F1_2457_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1687 BM(G1_2457_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1688 BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1689
1690 {ETSIB, ETSI, NO_DFS, PSCAN_ETSIB,
1691 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1692 BMZERO,
1693 BMZERO,
1694 BMZERO,
1695 BM(F1_2432_2442, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1696 BM(G1_2432_2442, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1697 BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1698
1699 {ETSIC, ETSI, NO_DFS, PSCAN_ETSIC,
1700 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1701 BMZERO,
1702 BMZERO,
1703 BMZERO,
1704 BM(F3_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1705 BM(G3_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1706 BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1707
1708 {FCCA, FCC, NO_DFS, NO_PSCAN, NO_REQ,
1709 BMZERO,
1710 BMZERO,
1711 BMZERO,
1712 BM(F1_2412_2462, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1713 BM(G1_2412_2462, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1714 BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1715
1716 {MKKA, MKK, NO_DFS,
1717 PSCAN_MKKA | PSCAN_MKKA_G | PSCAN_MKKA1 | PSCAN_MKKA1_G |
1718 PSCAN_MKKA2 | PSCAN_MKKA2_G, DISALLOW_ADHOC_11A_TURB,
1719 BMZERO,
1720 BMZERO,
1721 BMZERO,
1722 BM(F2_2412_2462, F1_2467_2472, F2_2484_2484, -1, -1, -1, -1, -1,
1723 -1, -1, -1, -1),
1724 BM(G2_2412_2462, G1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1725 -1),
1726 BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1727
1728 {MKKC, MKK, NO_DFS, NO_PSCAN, NO_REQ,
1729 BMZERO,
1730 BMZERO,
1731 BMZERO,
1732 BM(F2_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1733 BM(G2_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1734 BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1735
1736 {WORLD, ETSI, NO_DFS, NO_PSCAN, NO_REQ,
1737 BMZERO,
1738 BMZERO,
1739 BMZERO,
1740 BM(F2_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1741 BM(G2_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1742 BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1743
1744 {WOR0_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_PER_11D,
1745 BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825,
1746 W1_5500_5700, -1, -1, -1, -1, -1, -1, -1),
1747 BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1,
1748 -1, -1, -1, -1, -1),
1749 BMZERO,
1750 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
1751 W1_2417_2432, W1_2447_2457, W1_2467_2467, W1_2484_2484, -1, -1,
1752 -1, -1),
1753 BM(WG1_2412_2462, WG1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1,
1754 -1, -1),
1755 BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1756
1757 {WOR01_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR,
1758 ADHOC_PER_11D,
1759 BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825,
1760 W1_5500_5700, -1, -1, -1, -1, -1, -1, -1),
1761 BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1,
1762 -1, -1, -1, -1, -1),
1763 BMZERO,
1764 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2417_2432,
1765 W1_2447_2457, -1, -1, -1, -1, -1, -1, -1),
1766 BM(WG1_2412_2462, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1767 BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1768
1769 {WOR02_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR,
1770 ADHOC_PER_11D,
1771 BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825,
1772 W1_5500_5700, -1, -1, -1, -1, -1, -1, -1),
1773 BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1,
1774 -1, -1, -1, -1, -1),
1775 BMZERO,
1776 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
1777 W1_2417_2432, W1_2447_2457, W1_2467_2467, -1, -1, -1, -1, -1),
1778 BM(WG1_2412_2462, WG1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1,
1779 -1, -1),
1780 BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1781
1782 {EU1_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_PER_11D,
1783 BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825,
1784 W1_5500_5700, -1, -1, -1, -1, -1, -1, -1),
1785 BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1,
1786 -1, -1, -1, -1, -1),
1787 BMZERO,
1788 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W2_2472_2472,
1789 W1_2417_2432, W1_2447_2457, W2_2467_2467, -1, -1, -1, -1, -1),
1790 BM(WG1_2412_2462, WG2_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1,
1791 -1, -1),
1792 BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1793
1794 {WOR1_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A,
1795 BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825,
1796 W1_5500_5700, -1, -1, -1, -1, -1, -1, -1),
1797 BMZERO,
1798 BMZERO,
1799 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
1800 W1_2417_2432, W1_2447_2457, W1_2467_2467, W1_2484_2484, -1, -1,
1801 -1, -1),
1802 BM(WG1_2412_2462, WG1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1,
1803 -1, -1),
1804 BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1805
1806 {WOR2_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A,
1807 BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825,
1808 W1_5500_5700, -1, -1, -1, -1, -1, -1, -1),
1809 BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1,
1810 -1, -1, -1, -1, -1),
1811 BMZERO,
1812 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
1813 W1_2417_2432, W1_2447_2457, W1_2467_2467, W1_2484_2484, -1, -1,
1814 -1, -1),
1815 BM(WG1_2412_2462, WG1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1,
1816 -1, -1),
1817 BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1818
1819 {WOR3_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_PER_11D,
1820 BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825, -1, -1,
1821 -1, -1, -1, -1, -1, -1),
1822 BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1,
1823 -1, -1, -1, -1, -1),
1824 BMZERO,
1825 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
1826 W1_2417_2432, W1_2447_2457, W1_2467_2467, -1, -1, -1, -1, -1),
1827 BM(WG1_2412_2462, WG2_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1,
1828 -1, -1),
1829 BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1830
1831 {WOR4_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A,
1832 BM(W1_5260_5320, W1_5180_5240, W1_5745_5825, -1, -1, -1, -1, -1,
1833 -1, -1, -1, -1),
1834 BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1,
1835 -1, -1, -1, -1, -1),
1836 BMZERO,
1837 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2417_2432,
1838 W1_2447_2457, -1, -1, -1, -1, -1, -1, -1),
1839 BM(WG1_2412_2462, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1840 BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1841
1842 {WOR5_ETSIC, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A,
1843 BM(W1_5260_5320, W1_5180_5240, W1_5745_5825, -1, -1, -1, -1, -1,
1844 -1, -1, -1, -1),
1845 BMZERO,
1846 BMZERO,
1847 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
1848 W1_2417_2432, W1_2447_2457, W1_2467_2467, -1, -1, -1, -1, -1),
1849 BM(WG1_2412_2462, WG1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1,
1850 -1, -1),
1851 BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1852
1853 {WOR9_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A,
1854 BM(W1_5260_5320, W1_5180_5240, W1_5745_5825, W1_5500_5700, -1, -1,
1855 -1, -1, -1, -1, -1, -1),
1856 BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1,
1857 -1, -1, -1, -1, -1),
1858 BMZERO,
1859 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2417_2432,
1860 W1_2447_2457, -1, -1, -1, -1, -1, -1, -1),
1861 BM(WG1_2412_2462, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1862 BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1863
1864 {WORA_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A,
1865 BM(W1_5260_5320, W1_5180_5240, W1_5745_5825, W1_5500_5700, -1, -1,
1866 -1, -1, -1, -1, -1, -1),
1867 BMZERO,
1868 BMZERO,
1869 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
1870 W1_2417_2432, W1_2447_2457, W1_2467_2467, -1, -1, -1, -1, -1),
1871 BM(WG1_2412_2462, WG1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1,
1872 -1, -1),
1873 BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1874
1875 {WORB_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A,
1876 BM(W1_5260_5320, W1_5180_5240, W1_5500_5700, -1, -1, -1, -1, -1,
1877 -1, -1, -1, -1),
1878 BMZERO,
1879 BMZERO,
1880 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
1881 W1_2417_2432, W1_2447_2457, W1_2467_2467, -1, -1, -1, -1, -1),
1882 BM(WG1_2412_2462, WG1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1,
1883 -1, -1),
1884 BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1885
1886 {NULL1, NO_CTL, NO_DFS, NO_PSCAN, NO_REQ,
1887 BMZERO,
1888 BMZERO,
1889 BMZERO,
1890 BMZERO,
1891 BMZERO,
1892 BMZERO}
1893};
1894
1895static const struct cmode modes[] = {
1896 {ATH9K_MODE_11A, CHANNEL_A},
1897 {ATH9K_MODE_11B, CHANNEL_B},
1898 {ATH9K_MODE_11G, CHANNEL_G},
1899 {ATH9K_MODE_11NG_HT20, CHANNEL_G_HT20},
1900 {ATH9K_MODE_11NG_HT40PLUS, CHANNEL_G_HT40PLUS},
1901 {ATH9K_MODE_11NG_HT40MINUS, CHANNEL_G_HT40MINUS},
1902 {ATH9K_MODE_11NA_HT20, CHANNEL_A_HT20},
1903 {ATH9K_MODE_11NA_HT40PLUS, CHANNEL_A_HT40PLUS},
1904 {ATH9K_MODE_11NA_HT40MINUS, CHANNEL_A_HT40MINUS},
1905};
1906
1907static struct japan_bandcheck j_bandcheck[] = {
1908 {F1_5170_5230, AR_EEPROM_EEREGCAP_EN_KK_U1_ODD},
1909 {F4_5180_5240, AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN},
1910 {F2_5260_5320, AR_EEPROM_EEREGCAP_EN_KK_U2},
1911 {F4_5500_5700, AR_EEPROM_EEREGCAP_EN_KK_MIDBAND}
1912};
1913
1914
1915#endif
diff --git a/drivers/net/wireless/ath9k/xmit.c b/drivers/net/wireless/ath9k/xmit.c
new file mode 100644
index 000000000000..157f830ee6b8
--- /dev/null
+++ b/drivers/net/wireless/ath9k/xmit.c
@@ -0,0 +1,2871 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17/*
18 * Implementation of transmit path.
19 */
20
21#include "core.h"
22
23#define BITS_PER_BYTE 8
24#define OFDM_PLCP_BITS 22
25#define HT_RC_2_MCS(_rc) ((_rc) & 0x0f)
26#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
27#define L_STF 8
28#define L_LTF 8
29#define L_SIG 4
30#define HT_SIG 8
31#define HT_STF 4
32#define HT_LTF(_ns) (4 * (_ns))
33#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
34#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
35#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
36#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
37
38#define OFDM_SIFS_TIME 16
39
40static u32 bits_per_symbol[][2] = {
41 /* 20MHz 40MHz */
42 { 26, 54 }, /* 0: BPSK */
43 { 52, 108 }, /* 1: QPSK 1/2 */
44 { 78, 162 }, /* 2: QPSK 3/4 */
45 { 104, 216 }, /* 3: 16-QAM 1/2 */
46 { 156, 324 }, /* 4: 16-QAM 3/4 */
47 { 208, 432 }, /* 5: 64-QAM 2/3 */
48 { 234, 486 }, /* 6: 64-QAM 3/4 */
49 { 260, 540 }, /* 7: 64-QAM 5/6 */
50 { 52, 108 }, /* 8: BPSK */
51 { 104, 216 }, /* 9: QPSK 1/2 */
52 { 156, 324 }, /* 10: QPSK 3/4 */
53 { 208, 432 }, /* 11: 16-QAM 1/2 */
54 { 312, 648 }, /* 12: 16-QAM 3/4 */
55 { 416, 864 }, /* 13: 64-QAM 2/3 */
56 { 468, 972 }, /* 14: 64-QAM 3/4 */
57 { 520, 1080 }, /* 15: 64-QAM 5/6 */
58};
59
60#define IS_HT_RATE(_rate) ((_rate) & 0x80)
61
62/*
63 * Insert a chain of ath_buf (descriptors) on a multicast txq
64 * but do NOT start tx DMA on this queue.
65 * NB: must be called with txq lock held
66 */
67
68static void ath_tx_mcastqaddbuf(struct ath_softc *sc,
69 struct ath_txq *txq,
70 struct list_head *head)
71{
72 struct ath_hal *ah = sc->sc_ah;
73 struct ath_buf *bf;
74
75 if (list_empty(head))
76 return;
77
78 /*
79 * Insert the frame on the outbound list and
80 * pass it on to the hardware.
81 */
82 bf = list_first_entry(head, struct ath_buf, list);
83
84 /*
85 * The CAB queue is started from the SWBA handler since
86 * frames only go out on DTIM and to avoid possible races.
87 */
88 ath9k_hw_set_interrupts(ah, 0);
89
90 /*
91 * If there is anything in the mcastq, we want to set
92 * the "more data" bit in the last item in the queue to
93 * indicate that there is "more data". It makes sense to add
94 * it here since you are *always* going to have
95 * more data when adding to this queue, no matter where
96 * you call from.
97 */
98
99 if (txq->axq_depth) {
100 struct ath_buf *lbf;
101 struct ieee80211_hdr *hdr;
102
103 /*
104 * Add the "more data flag" to the last frame
105 */
106
107 lbf = list_entry(txq->axq_q.prev, struct ath_buf, list);
108 hdr = (struct ieee80211_hdr *)
109 ((struct sk_buff *)(lbf->bf_mpdu))->data;
110 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
111 }
112
113 /*
114 * Now, concat the frame onto the queue
115 */
116 list_splice_tail_init(head, &txq->axq_q);
117 txq->axq_depth++;
118 txq->axq_totalqueued++;
119 txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list);
120
121 DPRINTF(sc, ATH_DBG_QUEUE,
122 "%s: txq depth = %d\n", __func__, txq->axq_depth);
123 if (txq->axq_link != NULL) {
124 *txq->axq_link = bf->bf_daddr;
125 DPRINTF(sc, ATH_DBG_XMIT,
126 "%s: link[%u](%p)=%llx (%p)\n",
127 __func__,
128 txq->axq_qnum, txq->axq_link,
129 ito64(bf->bf_daddr), bf->bf_desc);
130 }
131 txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link);
132 ath9k_hw_set_interrupts(ah, sc->sc_imask);
133}
134
135/*
136 * Insert a chain of ath_buf (descriptors) on a txq and
137 * assume the descriptors are already chained together by caller.
138 * NB: must be called with txq lock held
139 */
140
141static void ath_tx_txqaddbuf(struct ath_softc *sc,
142 struct ath_txq *txq, struct list_head *head)
143{
144 struct ath_hal *ah = sc->sc_ah;
145 struct ath_buf *bf;
146 /*
147 * Insert the frame on the outbound list and
148 * pass it on to the hardware.
149 */
150
151 if (list_empty(head))
152 return;
153
154 bf = list_first_entry(head, struct ath_buf, list);
155
156 list_splice_tail_init(head, &txq->axq_q);
157 txq->axq_depth++;
158 txq->axq_totalqueued++;
159 txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list);
160
161 DPRINTF(sc, ATH_DBG_QUEUE,
162 "%s: txq depth = %d\n", __func__, txq->axq_depth);
163
164 if (txq->axq_link == NULL) {
165 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
166 DPRINTF(sc, ATH_DBG_XMIT,
167 "%s: TXDP[%u] = %llx (%p)\n",
168 __func__, txq->axq_qnum,
169 ito64(bf->bf_daddr), bf->bf_desc);
170 } else {
171 *txq->axq_link = bf->bf_daddr;
172 DPRINTF(sc, ATH_DBG_XMIT, "%s: link[%u] (%p)=%llx (%p)\n",
173 __func__,
174 txq->axq_qnum, txq->axq_link,
175 ito64(bf->bf_daddr), bf->bf_desc);
176 }
177 txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link);
178 ath9k_hw_txstart(ah, txq->axq_qnum);
179}
180
181/* Get transmit rate index using rate in Kbps */
182
183static int ath_tx_findindex(const struct ath9k_rate_table *rt, int rate)
184{
185 int i;
186 int ndx = 0;
187
188 for (i = 0; i < rt->rateCount; i++) {
189 if (rt->info[i].rateKbps == rate) {
190 ndx = i;
191 break;
192 }
193 }
194
195 return ndx;
196}
197
198/* Check if it's okay to send out aggregates */
199
200static int ath_aggr_query(struct ath_softc *sc,
201 struct ath_node *an, u8 tidno)
202{
203 struct ath_atx_tid *tid;
204 tid = ATH_AN_2_TID(an, tidno);
205
206 if (tid->addba_exchangecomplete || tid->addba_exchangeinprogress)
207 return 1;
208 else
209 return 0;
210}
211
212static enum ath9k_pkt_type get_hal_packet_type(struct ieee80211_hdr *hdr)
213{
214 enum ath9k_pkt_type htype;
215 __le16 fc;
216
217 fc = hdr->frame_control;
218
219 /* Calculate Atheros packet type from IEEE80211 packet header */
220
221 if (ieee80211_is_beacon(fc))
222 htype = ATH9K_PKT_TYPE_BEACON;
223 else if (ieee80211_is_probe_resp(fc))
224 htype = ATH9K_PKT_TYPE_PROBE_RESP;
225 else if (ieee80211_is_atim(fc))
226 htype = ATH9K_PKT_TYPE_ATIM;
227 else if (ieee80211_is_pspoll(fc))
228 htype = ATH9K_PKT_TYPE_PSPOLL;
229 else
230 htype = ATH9K_PKT_TYPE_NORMAL;
231
232 return htype;
233}
234
235static void fill_min_rates(struct sk_buff *skb, struct ath_tx_control *txctl)
236{
237 struct ieee80211_hdr *hdr;
238 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
239 struct ath_tx_info_priv *tx_info_priv;
240 __le16 fc;
241
242 hdr = (struct ieee80211_hdr *)skb->data;
243 fc = hdr->frame_control;
244 tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
245
246 if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc)) {
247 txctl->use_minrate = 1;
248 txctl->min_rate = tx_info_priv->min_rate;
249 } else if (ieee80211_is_data(fc)) {
250 if (ieee80211_is_nullfunc(fc) ||
251 /* Port Access Entity (IEEE 802.1X) */
252 (skb->protocol == cpu_to_be16(0x888E))) {
253 txctl->use_minrate = 1;
254 txctl->min_rate = tx_info_priv->min_rate;
255 }
256 if (is_multicast_ether_addr(hdr->addr1))
257 txctl->mcast_rate = tx_info_priv->min_rate;
258 }
259
260}
261
262/* This function will setup additional txctl information, mostly rate stuff */
263/* FIXME: seqno, ps */
264static int ath_tx_prepare(struct ath_softc *sc,
265 struct sk_buff *skb,
266 struct ath_tx_control *txctl)
267{
268 struct ieee80211_hw *hw = sc->hw;
269 struct ieee80211_hdr *hdr;
270 struct ath_rc_series *rcs;
271 struct ath_txq *txq = NULL;
272 const struct ath9k_rate_table *rt;
273 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
274 struct ath_tx_info_priv *tx_info_priv;
275 int hdrlen;
276 u8 rix, antenna;
277 __le16 fc;
278 u8 *qc;
279
280 memset(txctl, 0, sizeof(struct ath_tx_control));
281
282 txctl->dev = sc;
283 hdr = (struct ieee80211_hdr *)skb->data;
284 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
285 fc = hdr->frame_control;
286
287 rt = sc->sc_currates;
288 BUG_ON(!rt);
289
290 /* Fill misc fields */
291
292 spin_lock_bh(&sc->node_lock);
293 txctl->an = ath_node_get(sc, hdr->addr1);
294 /* create a temp node, if the node is not there already */
295 if (!txctl->an)
296 txctl->an = ath_node_attach(sc, hdr->addr1, 0);
297 spin_unlock_bh(&sc->node_lock);
298
299 if (ieee80211_is_data_qos(fc)) {
300 qc = ieee80211_get_qos_ctl(hdr);
301 txctl->tidno = qc[0] & 0xf;
302 }
303
304 txctl->if_id = 0;
305 txctl->nextfraglen = 0;
306 txctl->frmlen = skb->len + FCS_LEN - (hdrlen & 3);
307 txctl->txpower = MAX_RATE_POWER; /* FIXME */
308
309 /* Fill Key related fields */
310
311 txctl->keytype = ATH9K_KEY_TYPE_CLEAR;
312 txctl->keyix = ATH9K_TXKEYIX_INVALID;
313
314 if (tx_info->control.hw_key) {
315 txctl->keyix = tx_info->control.hw_key->hw_key_idx;
316 txctl->frmlen += tx_info->control.icv_len;
317
318 if (sc->sc_keytype == ATH9K_CIPHER_WEP)
319 txctl->keytype = ATH9K_KEY_TYPE_WEP;
320 else if (sc->sc_keytype == ATH9K_CIPHER_TKIP)
321 txctl->keytype = ATH9K_KEY_TYPE_TKIP;
322 else if (sc->sc_keytype == ATH9K_CIPHER_AES_CCM)
323 txctl->keytype = ATH9K_KEY_TYPE_AES;
324 }
325
326 /* Fill packet type */
327
328 txctl->atype = get_hal_packet_type(hdr);
329
330 /* Fill qnum */
331
332 txctl->qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc);
333 txq = &sc->sc_txq[txctl->qnum];
334 spin_lock_bh(&txq->axq_lock);
335
336 /* Try to avoid running out of descriptors */
337 if (txq->axq_depth >= (ATH_TXBUF - 20)) {
338 DPRINTF(sc, ATH_DBG_FATAL,
339 "%s: TX queue: %d is full, depth: %d\n",
340 __func__,
341 txctl->qnum,
342 txq->axq_depth);
343 ieee80211_stop_queue(hw, skb_get_queue_mapping(skb));
344 txq->stopped = 1;
345 spin_unlock_bh(&txq->axq_lock);
346 return -1;
347 }
348
349 spin_unlock_bh(&txq->axq_lock);
350
351 /* Fill rate */
352
353 fill_min_rates(skb, txctl);
354
355 /* Fill flags */
356
357 txctl->flags = ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
358
359 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
360 tx_info->flags |= ATH9K_TXDESC_NOACK;
361 if (tx_info->flags & IEEE80211_TX_CTL_USE_RTS_CTS)
362 tx_info->flags |= ATH9K_TXDESC_RTSENA;
363
364 /*
365 * Setup for rate calculations.
366 */
367 tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
368 rcs = tx_info_priv->rcs;
369
370 if (ieee80211_is_data(fc) && !txctl->use_minrate) {
371
372 /* Enable HT only for DATA frames and not for EAPOL */
373 txctl->ht = (hw->conf.ht_conf.ht_supported &&
374 (tx_info->flags & IEEE80211_TX_CTL_AMPDU));
375
376 if (is_multicast_ether_addr(hdr->addr1)) {
377 rcs[0].rix = (u8)
378 ath_tx_findindex(rt, txctl->mcast_rate);
379
380 /*
381 * mcast packets are not re-tried.
382 */
383 rcs[0].tries = 1;
384 }
385 /* For HT capable stations, we save tidno for later use.
386 * We also override seqno set by upper layer with the one
387 * in tx aggregation state.
388 *
389 * First, the fragmentation stat is determined.
390 * If fragmentation is on, the sequence number is
391 * not overridden, since it has been
392 * incremented by the fragmentation routine.
393 */
394 if (likely(!(txctl->flags & ATH9K_TXDESC_FRAG_IS_ON)) &&
395 txctl->ht && sc->sc_txaggr) {
396 struct ath_atx_tid *tid;
397
398 tid = ATH_AN_2_TID(txctl->an, txctl->tidno);
399
400 hdr->seq_ctrl = cpu_to_le16(tid->seq_next <<
401 IEEE80211_SEQ_SEQ_SHIFT);
402 txctl->seqno = tid->seq_next;
403 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
404 }
405 } else {
406 /* for management and control frames,
407 * or for NULL and EAPOL frames */
408 if (txctl->min_rate)
409 rcs[0].rix = ath_rate_findrateix(sc, txctl->min_rate);
410 else
411 rcs[0].rix = 0;
412 rcs[0].tries = ATH_MGT_TXMAXTRY;
413 }
414 rix = rcs[0].rix;
415
416 /*
417 * Calculate duration. This logically belongs in the 802.11
418 * layer but it lacks sufficient information to calculate it.
419 */
420 if ((txctl->flags & ATH9K_TXDESC_NOACK) == 0 && !ieee80211_is_ctl(fc)) {
421 u16 dur;
422 /*
423 * XXX not right with fragmentation.
424 */
425 if (sc->sc_flags & ATH_PREAMBLE_SHORT)
426 dur = rt->info[rix].spAckDuration;
427 else
428 dur = rt->info[rix].lpAckDuration;
429
430 if (le16_to_cpu(hdr->frame_control) &
431 IEEE80211_FCTL_MOREFRAGS) {
432 dur += dur; /* Add additional 'SIFS + ACK' */
433
434 /*
435 ** Compute size of next fragment in order to compute
436 ** durations needed to update NAV.
437 ** The last fragment uses the ACK duration only.
438 ** Add time for next fragment.
439 */
440 dur += ath9k_hw_computetxtime(sc->sc_ah, rt,
441 txctl->nextfraglen,
442 rix, sc->sc_flags & ATH_PREAMBLE_SHORT);
443 }
444
445 if (ieee80211_has_morefrags(fc) ||
446 (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)) {
447 /*
448 ** Force hardware to use computed duration for next
449 ** fragment by disabling multi-rate retry, which
450 ** updates duration based on the multi-rate
451 ** duration table.
452 */
453 rcs[1].tries = rcs[2].tries = rcs[3].tries = 0;
454 rcs[1].rix = rcs[2].rix = rcs[3].rix = 0;
455 /* reset tries but keep rate index */
456 rcs[0].tries = ATH_TXMAXTRY;
457 }
458
459 hdr->duration_id = cpu_to_le16(dur);
460 }
461
462 /*
463 * Determine if a tx interrupt should be generated for
464 * this descriptor. We take a tx interrupt to reap
465 * descriptors when the h/w hits an EOL condition or
466 * when the descriptor is specifically marked to generate
467 * an interrupt. We periodically mark descriptors in this
468 * way to insure timely replenishing of the supply needed
469 * for sending frames. Defering interrupts reduces system
470 * load and potentially allows more concurrent work to be
471 * done but if done to aggressively can cause senders to
472 * backup.
473 *
474 * NB: use >= to deal with sc_txintrperiod changing
475 * dynamically through sysctl.
476 */
477 spin_lock_bh(&txq->axq_lock);
478 if ((++txq->axq_intrcnt >= sc->sc_txintrperiod)) {
479 txctl->flags |= ATH9K_TXDESC_INTREQ;
480 txq->axq_intrcnt = 0;
481 }
482 spin_unlock_bh(&txq->axq_lock);
483
484 if (is_multicast_ether_addr(hdr->addr1)) {
485 antenna = sc->sc_mcastantenna + 1;
486 sc->sc_mcastantenna = (sc->sc_mcastantenna + 1) & 0x1;
487 } else
488 antenna = sc->sc_txantenna;
489
490#ifdef USE_LEGACY_HAL
491 txctl->antenna = antenna;
492#endif
493 return 0;
494}
495
496/* To complete a chain of buffers associated a frame */
497
498static void ath_tx_complete_buf(struct ath_softc *sc,
499 struct ath_buf *bf,
500 struct list_head *bf_q,
501 int txok, int sendbar)
502{
503 struct sk_buff *skb = bf->bf_mpdu;
504 struct ath_xmit_status tx_status;
505 dma_addr_t *pa;
506
507 /*
508 * Set retry information.
509 * NB: Don't use the information in the descriptor, because the frame
510 * could be software retried.
511 */
512 tx_status.retries = bf->bf_retries;
513 tx_status.flags = 0;
514
515 if (sendbar)
516 tx_status.flags = ATH_TX_BAR;
517
518 if (!txok) {
519 tx_status.flags |= ATH_TX_ERROR;
520
521 if (bf->bf_isxretried)
522 tx_status.flags |= ATH_TX_XRETRY;
523 }
524 /* Unmap this frame */
525 pa = get_dma_mem_context(bf, bf_dmacontext);
526 pci_unmap_single(sc->pdev,
527 *pa,
528 skb->len,
529 PCI_DMA_TODEVICE);
530 /* complete this frame */
531 ath_tx_complete(sc, skb, &tx_status, bf->bf_node);
532
533 /*
534 * Return the list of ath_buf of this mpdu to free queue
535 */
536 spin_lock_bh(&sc->sc_txbuflock);
537 list_splice_tail_init(bf_q, &sc->sc_txbuf);
538 spin_unlock_bh(&sc->sc_txbuflock);
539}
540
541/*
542 * queue up a dest/ac pair for tx scheduling
543 * NB: must be called with txq lock held
544 */
545
546static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
547{
548 struct ath_atx_ac *ac = tid->ac;
549
550 /*
551 * if tid is paused, hold off
552 */
553 if (tid->paused)
554 return;
555
556 /*
557 * add tid to ac atmost once
558 */
559 if (tid->sched)
560 return;
561
562 tid->sched = true;
563 list_add_tail(&tid->list, &ac->tid_q);
564
565 /*
566 * add node ac to txq atmost once
567 */
568 if (ac->sched)
569 return;
570
571 ac->sched = true;
572 list_add_tail(&ac->list, &txq->axq_acq);
573}
574
575/* pause a tid */
576
577static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
578{
579 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
580
581 spin_lock_bh(&txq->axq_lock);
582
583 tid->paused++;
584
585 spin_unlock_bh(&txq->axq_lock);
586}
587
588/* resume a tid and schedule aggregate */
589
590void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
591{
592 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
593
594 ASSERT(tid->paused > 0);
595 spin_lock_bh(&txq->axq_lock);
596
597 tid->paused--;
598
599 if (tid->paused > 0)
600 goto unlock;
601
602 if (list_empty(&tid->buf_q))
603 goto unlock;
604
605 /*
606 * Add this TID to scheduler and try to send out aggregates
607 */
608 ath_tx_queue_tid(txq, tid);
609 ath_txq_schedule(sc, txq);
610unlock:
611 spin_unlock_bh(&txq->axq_lock);
612}
613
614/* Compute the number of bad frames */
615
616static int ath_tx_num_badfrms(struct ath_softc *sc,
617 struct ath_buf *bf, int txok)
618{
619 struct ath_node *an = bf->bf_node;
620 int isnodegone = (an->an_flags & ATH_NODE_CLEAN);
621 struct ath_buf *bf_last = bf->bf_lastbf;
622 struct ath_desc *ds = bf_last->bf_desc;
623 u16 seq_st = 0;
624 u32 ba[WME_BA_BMP_SIZE >> 5];
625 int ba_index;
626 int nbad = 0;
627 int isaggr = 0;
628
629 if (isnodegone || ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED)
630 return 0;
631
632 isaggr = bf->bf_isaggr;
633 if (isaggr) {
634 seq_st = ATH_DS_BA_SEQ(ds);
635 memcpy(ba, ATH_DS_BA_BITMAP(ds), WME_BA_BMP_SIZE >> 3);
636 }
637
638 while (bf) {
639 ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
640 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
641 nbad++;
642
643 bf = bf->bf_next;
644 }
645
646 return nbad;
647}
648
649static void ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf)
650{
651 struct sk_buff *skb;
652 struct ieee80211_hdr *hdr;
653
654 bf->bf_isretried = 1;
655 bf->bf_retries++;
656
657 skb = bf->bf_mpdu;
658 hdr = (struct ieee80211_hdr *)skb->data;
659 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
660}
661
662/* Update block ack window */
663
664static void ath_tx_update_baw(struct ath_softc *sc,
665 struct ath_atx_tid *tid, int seqno)
666{
667 int index, cindex;
668
669 index = ATH_BA_INDEX(tid->seq_start, seqno);
670 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
671
672 tid->tx_buf[cindex] = NULL;
673
674 while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) {
675 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
676 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
677 }
678}
679
680/*
681 * ath_pkt_dur - compute packet duration (NB: not NAV)
682 *
683 * rix - rate index
684 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
685 * width - 0 for 20 MHz, 1 for 40 MHz
686 * half_gi - to use 4us v/s 3.6 us for symbol time
687 */
688
689static u32 ath_pkt_duration(struct ath_softc *sc,
690 u8 rix,
691 struct ath_buf *bf,
692 int width,
693 int half_gi,
694 bool shortPreamble)
695{
696 const struct ath9k_rate_table *rt = sc->sc_currates;
697 u32 nbits, nsymbits, duration, nsymbols;
698 u8 rc;
699 int streams, pktlen;
700
701 pktlen = bf->bf_isaggr ? bf->bf_al : bf->bf_frmlen;
702 rc = rt->info[rix].rateCode;
703
704 /*
705 * for legacy rates, use old function to compute packet duration
706 */
707 if (!IS_HT_RATE(rc))
708 return ath9k_hw_computetxtime(sc->sc_ah,
709 rt,
710 pktlen,
711 rix,
712 shortPreamble);
713 /*
714 * find number of symbols: PLCP + data
715 */
716 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
717 nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
718 nsymbols = (nbits + nsymbits - 1) / nsymbits;
719
720 if (!half_gi)
721 duration = SYMBOL_TIME(nsymbols);
722 else
723 duration = SYMBOL_TIME_HALFGI(nsymbols);
724
725 /*
726 * addup duration for legacy/ht training and signal fields
727 */
728 streams = HT_RC_2_STREAMS(rc);
729 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
730 return duration;
731}
732
733/* Rate module function to set rate related fields in tx descriptor */
734
735static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
736{
737 struct ath_hal *ah = sc->sc_ah;
738 const struct ath9k_rate_table *rt;
739 struct ath_desc *ds = bf->bf_desc;
740 struct ath_desc *lastds = bf->bf_lastbf->bf_desc;
741 struct ath9k_11n_rate_series series[4];
742 int i, flags, rtsctsena = 0, dynamic_mimops = 0;
743 u32 ctsduration = 0;
744 u8 rix = 0, cix, ctsrate = 0;
745 u32 aggr_limit_with_rts = sc->sc_rtsaggrlimit;
746 struct ath_node *an = (struct ath_node *) bf->bf_node;
747
748 /*
749 * get the cix for the lowest valid rix.
750 */
751 rt = sc->sc_currates;
752 for (i = 4; i--;) {
753 if (bf->bf_rcs[i].tries) {
754 rix = bf->bf_rcs[i].rix;
755 break;
756 }
757 }
758 flags = (bf->bf_flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA));
759 cix = rt->info[rix].controlRate;
760
761 /*
762 * If 802.11g protection is enabled, determine whether
763 * to use RTS/CTS or just CTS. Note that this is only
764 * done for OFDM/HT unicast frames.
765 */
766 if (sc->sc_protmode != PROT_M_NONE &&
767 (rt->info[rix].phy == PHY_OFDM ||
768 rt->info[rix].phy == PHY_HT) &&
769 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) {
770 if (sc->sc_protmode == PROT_M_RTSCTS)
771 flags = ATH9K_TXDESC_RTSENA;
772 else if (sc->sc_protmode == PROT_M_CTSONLY)
773 flags = ATH9K_TXDESC_CTSENA;
774
775 cix = rt->info[sc->sc_protrix].controlRate;
776 rtsctsena = 1;
777 }
778
779 /* For 11n, the default behavior is to enable RTS for
780 * hw retried frames. We enable the global flag here and
781 * let rate series flags determine which rates will actually
782 * use RTS.
783 */
784 if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) && bf->bf_isdata) {
785 BUG_ON(!an);
786 /*
787 * 802.11g protection not needed, use our default behavior
788 */
789 if (!rtsctsena)
790 flags = ATH9K_TXDESC_RTSENA;
791 /*
792 * For dynamic MIMO PS, RTS needs to precede the first aggregate
793 * and the second aggregate should have any protection at all.
794 */
795 if (an->an_smmode == ATH_SM_PWRSAV_DYNAMIC) {
796 if (!bf->bf_aggrburst) {
797 flags = ATH9K_TXDESC_RTSENA;
798 dynamic_mimops = 1;
799 } else {
800 flags = 0;
801 }
802 }
803 }
804
805 /*
806 * Set protection if aggregate protection on
807 */
808 if (sc->sc_config.ath_aggr_prot &&
809 (!bf->bf_isaggr || (bf->bf_isaggr && bf->bf_al < 8192))) {
810 flags = ATH9K_TXDESC_RTSENA;
811 cix = rt->info[sc->sc_protrix].controlRate;
812 rtsctsena = 1;
813 }
814
815 /*
816 * For AR5416 - RTS cannot be followed by a frame larger than 8K.
817 */
818 if (bf->bf_isaggr && (bf->bf_al > aggr_limit_with_rts)) {
819 /*
820 * Ensure that in the case of SM Dynamic power save
821 * while we are bursting the second aggregate the
822 * RTS is cleared.
823 */
824 flags &= ~(ATH9K_TXDESC_RTSENA);
825 }
826
827 /*
828 * CTS transmit rate is derived from the transmit rate
829 * by looking in the h/w rate table. We must also factor
830 * in whether or not a short preamble is to be used.
831 */
832 /* NB: cix is set above where RTS/CTS is enabled */
833 BUG_ON(cix == 0xff);
834 ctsrate = rt->info[cix].rateCode |
835 (bf->bf_shpreamble ? rt->info[cix].shortPreamble : 0);
836
837 /*
838 * Setup HAL rate series
839 */
840 memzero(series, sizeof(struct ath9k_11n_rate_series) * 4);
841
842 for (i = 0; i < 4; i++) {
843 if (!bf->bf_rcs[i].tries)
844 continue;
845
846 rix = bf->bf_rcs[i].rix;
847
848 series[i].Rate = rt->info[rix].rateCode |
849 (bf->bf_shpreamble ? rt->info[rix].shortPreamble : 0);
850
851 series[i].Tries = bf->bf_rcs[i].tries;
852
853 series[i].RateFlags = (
854 (bf->bf_rcs[i].flags & ATH_RC_RTSCTS_FLAG) ?
855 ATH9K_RATESERIES_RTS_CTS : 0) |
856 ((bf->bf_rcs[i].flags & ATH_RC_CW40_FLAG) ?
857 ATH9K_RATESERIES_2040 : 0) |
858 ((bf->bf_rcs[i].flags & ATH_RC_SGI_FLAG) ?
859 ATH9K_RATESERIES_HALFGI : 0);
860
861 series[i].PktDuration = ath_pkt_duration(
862 sc, rix, bf,
863 (bf->bf_rcs[i].flags & ATH_RC_CW40_FLAG) != 0,
864 (bf->bf_rcs[i].flags & ATH_RC_SGI_FLAG),
865 bf->bf_shpreamble);
866
867 if ((an->an_smmode == ATH_SM_PWRSAV_STATIC) &&
868 (bf->bf_rcs[i].flags & ATH_RC_DS_FLAG) == 0) {
869 /*
870 * When sending to an HT node that has enabled static
871 * SM/MIMO power save, send at single stream rates but
872 * use maximum allowed transmit chains per user,
873 * hardware, regulatory, or country limits for
874 * better range.
875 */
876 series[i].ChSel = sc->sc_tx_chainmask;
877 } else {
878 if (bf->bf_ht)
879 series[i].ChSel =
880 ath_chainmask_sel_logic(sc, an);
881 else
882 series[i].ChSel = sc->sc_tx_chainmask;
883 }
884
885 if (rtsctsena)
886 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
887
888 /*
889 * Set RTS for all rates if node is in dynamic powersave
890 * mode and we are using dual stream rates.
891 */
892 if (dynamic_mimops && (bf->bf_rcs[i].flags & ATH_RC_DS_FLAG))
893 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
894 }
895
896 /*
897 * For non-HT devices, calculate RTS/CTS duration in software
898 * and disable multi-rate retry.
899 */
900 if (flags && !(ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)) {
901 /*
902 * Compute the transmit duration based on the frame
903 * size and the size of an ACK frame. We call into the
904 * HAL to do the computation since it depends on the
905 * characteristics of the actual PHY being used.
906 *
907 * NB: CTS is assumed the same size as an ACK so we can
908 * use the precalculated ACK durations.
909 */
910 if (flags & ATH9K_TXDESC_RTSENA) { /* SIFS + CTS */
911 ctsduration += bf->bf_shpreamble ?
912 rt->info[cix].spAckDuration :
913 rt->info[cix].lpAckDuration;
914 }
915
916 ctsduration += series[0].PktDuration;
917
918 if ((bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) { /* SIFS + ACK */
919 ctsduration += bf->bf_shpreamble ?
920 rt->info[rix].spAckDuration :
921 rt->info[rix].lpAckDuration;
922 }
923
924 /*
925 * Disable multi-rate retry when using RTS/CTS by clearing
926 * series 1, 2 and 3.
927 */
928 memzero(&series[1], sizeof(struct ath9k_11n_rate_series) * 3);
929 }
930
931 /*
932 * set dur_update_en for l-sig computation except for PS-Poll frames
933 */
934 ath9k_hw_set11n_ratescenario(ah, ds, lastds,
935 !bf->bf_ispspoll,
936 ctsrate,
937 ctsduration,
938 series, 4, flags);
939 if (sc->sc_config.ath_aggr_prot && flags)
940 ath9k_hw_set11n_burstduration(ah, ds, 8192);
941}
942
943/*
944 * Function to send a normal HT (non-AMPDU) frame
945 * NB: must be called with txq lock held
946 */
947
948static int ath_tx_send_normal(struct ath_softc *sc,
949 struct ath_txq *txq,
950 struct ath_atx_tid *tid,
951 struct list_head *bf_head)
952{
953 struct ath_buf *bf;
954 struct sk_buff *skb;
955 struct ieee80211_tx_info *tx_info;
956 struct ath_tx_info_priv *tx_info_priv;
957
958 BUG_ON(list_empty(bf_head));
959
960 bf = list_first_entry(bf_head, struct ath_buf, list);
961 bf->bf_isampdu = 0; /* regular HT frame */
962
963 skb = (struct sk_buff *)bf->bf_mpdu;
964 tx_info = IEEE80211_SKB_CB(skb);
965 tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
966 memcpy(bf->bf_rcs, tx_info_priv->rcs, 4 * sizeof(tx_info_priv->rcs[0]));
967
968 /* update starting sequence number for subsequent ADDBA request */
969 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
970
971 /* Queue to h/w without aggregation */
972 bf->bf_nframes = 1;
973 bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */
974 ath_buf_set_rate(sc, bf);
975 ath_tx_txqaddbuf(sc, txq, bf_head);
976
977 return 0;
978}
979
980/* flush tid's software queue and send frames as non-ampdu's */
981
982static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
983{
984 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
985 struct ath_buf *bf;
986 struct list_head bf_head;
987 INIT_LIST_HEAD(&bf_head);
988
989 ASSERT(tid->paused > 0);
990 spin_lock_bh(&txq->axq_lock);
991
992 tid->paused--;
993
994 if (tid->paused > 0) {
995 spin_unlock_bh(&txq->axq_lock);
996 return;
997 }
998
999 while (!list_empty(&tid->buf_q)) {
1000 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
1001 ASSERT(!bf->bf_isretried);
1002 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
1003 ath_tx_send_normal(sc, txq, tid, &bf_head);
1004 }
1005
1006 spin_unlock_bh(&txq->axq_lock);
1007}
1008
1009/* Completion routine of an aggregate */
1010
1011static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
1012 struct ath_txq *txq,
1013 struct ath_buf *bf,
1014 struct list_head *bf_q,
1015 int txok)
1016{
1017 struct ath_node *an = bf->bf_node;
1018 struct ath_atx_tid *tid = ATH_AN_2_TID(an, bf->bf_tidno);
1019 struct ath_buf *bf_last = bf->bf_lastbf;
1020 struct ath_desc *ds = bf_last->bf_desc;
1021 struct ath_buf *bf_next, *bf_lastq = NULL;
1022 struct list_head bf_head, bf_pending;
1023 u16 seq_st = 0;
1024 u32 ba[WME_BA_BMP_SIZE >> 5];
1025 int isaggr, txfail, txpending, sendbar = 0, needreset = 0;
1026 int isnodegone = (an->an_flags & ATH_NODE_CLEAN);
1027
1028 isaggr = bf->bf_isaggr;
1029 if (isaggr) {
1030 if (txok) {
1031 if (ATH_DS_TX_BA(ds)) {
1032 /*
1033 * extract starting sequence and
1034 * block-ack bitmap
1035 */
1036 seq_st = ATH_DS_BA_SEQ(ds);
1037 memcpy(ba,
1038 ATH_DS_BA_BITMAP(ds),
1039 WME_BA_BMP_SIZE >> 3);
1040 } else {
1041 memzero(ba, WME_BA_BMP_SIZE >> 3);
1042
1043 /*
1044 * AR5416 can become deaf/mute when BA
1045 * issue happens. Chip needs to be reset.
1046 * But AP code may have sychronization issues
1047 * when perform internal reset in this routine.
1048 * Only enable reset in STA mode for now.
1049 */
1050 if (sc->sc_opmode == ATH9K_M_STA)
1051 needreset = 1;
1052 }
1053 } else {
1054 memzero(ba, WME_BA_BMP_SIZE >> 3);
1055 }
1056 }
1057
1058 INIT_LIST_HEAD(&bf_pending);
1059 INIT_LIST_HEAD(&bf_head);
1060
1061 while (bf) {
1062 txfail = txpending = 0;
1063 bf_next = bf->bf_next;
1064
1065 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
1066 /* transmit completion, subframe is
1067 * acked by block ack */
1068 } else if (!isaggr && txok) {
1069 /* transmit completion */
1070 } else {
1071
1072 if (!tid->cleanup_inprogress && !isnodegone &&
1073 ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) {
1074 if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
1075 ath_tx_set_retry(sc, bf);
1076 txpending = 1;
1077 } else {
1078 bf->bf_isxretried = 1;
1079 txfail = 1;
1080 sendbar = 1;
1081 }
1082 } else {
1083 /*
1084 * cleanup in progress, just fail
1085 * the un-acked sub-frames
1086 */
1087 txfail = 1;
1088 }
1089 }
1090 /*
1091 * Remove ath_buf's of this sub-frame from aggregate queue.
1092 */
1093 if (bf_next == NULL) { /* last subframe in the aggregate */
1094 ASSERT(bf->bf_lastfrm == bf_last);
1095
1096 /*
1097 * The last descriptor of the last sub frame could be
1098 * a holding descriptor for h/w. If that's the case,
1099 * bf->bf_lastfrm won't be in the bf_q.
1100 * Make sure we handle bf_q properly here.
1101 */
1102
1103 if (!list_empty(bf_q)) {
1104 bf_lastq = list_entry(bf_q->prev,
1105 struct ath_buf, list);
1106 list_cut_position(&bf_head,
1107 bf_q, &bf_lastq->list);
1108 } else {
1109 /*
1110 * XXX: if the last subframe only has one
1111 * descriptor which is also being used as
1112 * a holding descriptor. Then the ath_buf
1113 * is not in the bf_q at all.
1114 */
1115 INIT_LIST_HEAD(&bf_head);
1116 }
1117 } else {
1118 ASSERT(!list_empty(bf_q));
1119 list_cut_position(&bf_head,
1120 bf_q, &bf->bf_lastfrm->list);
1121 }
1122
1123 if (!txpending) {
1124 /*
1125 * complete the acked-ones/xretried ones; update
1126 * block-ack window
1127 */
1128 spin_lock_bh(&txq->axq_lock);
1129 ath_tx_update_baw(sc, tid, bf->bf_seqno);
1130 spin_unlock_bh(&txq->axq_lock);
1131
1132 /* complete this sub-frame */
1133 ath_tx_complete_buf(sc, bf, &bf_head, !txfail, sendbar);
1134 } else {
1135 /*
1136 * retry the un-acked ones
1137 */
1138 /*
1139 * XXX: if the last descriptor is holding descriptor,
1140 * in order to requeue the frame to software queue, we
1141 * need to allocate a new descriptor and
1142 * copy the content of holding descriptor to it.
1143 */
1144 if (bf->bf_next == NULL &&
1145 bf_last->bf_status & ATH_BUFSTATUS_STALE) {
1146 struct ath_buf *tbf;
1147
1148 /* allocate new descriptor */
1149 spin_lock_bh(&sc->sc_txbuflock);
1150 ASSERT(!list_empty((&sc->sc_txbuf)));
1151 tbf = list_first_entry(&sc->sc_txbuf,
1152 struct ath_buf, list);
1153 list_del(&tbf->list);
1154 spin_unlock_bh(&sc->sc_txbuflock);
1155
1156 ATH_TXBUF_RESET(tbf);
1157
1158 /* copy descriptor content */
1159 tbf->bf_mpdu = bf_last->bf_mpdu;
1160 tbf->bf_node = bf_last->bf_node;
1161 tbf->bf_buf_addr = bf_last->bf_buf_addr;
1162 *(tbf->bf_desc) = *(bf_last->bf_desc);
1163
1164 /* link it to the frame */
1165 if (bf_lastq) {
1166 bf_lastq->bf_desc->ds_link =
1167 tbf->bf_daddr;
1168 bf->bf_lastfrm = tbf;
1169 ath9k_hw_cleartxdesc(sc->sc_ah,
1170 bf->bf_lastfrm->bf_desc);
1171 } else {
1172 tbf->bf_state = bf_last->bf_state;
1173 tbf->bf_lastfrm = tbf;
1174 ath9k_hw_cleartxdesc(sc->sc_ah,
1175 tbf->bf_lastfrm->bf_desc);
1176
1177 /* copy the DMA context */
1178 copy_dma_mem_context(
1179 get_dma_mem_context(tbf,
1180 bf_dmacontext),
1181 get_dma_mem_context(bf_last,
1182 bf_dmacontext));
1183 }
1184 list_add_tail(&tbf->list, &bf_head);
1185 } else {
1186 /*
1187 * Clear descriptor status words for
1188 * software retry
1189 */
1190 ath9k_hw_cleartxdesc(sc->sc_ah,
1191 bf->bf_lastfrm->bf_desc);
1192 }
1193
1194 /*
1195 * Put this buffer to the temporary pending
1196 * queue to retain ordering
1197 */
1198 list_splice_tail_init(&bf_head, &bf_pending);
1199 }
1200
1201 bf = bf_next;
1202 }
1203
1204 /*
1205 * node is already gone. no more assocication
1206 * with the node. the node might have been freed
1207 * any node acces can result in panic.note tid
1208 * is part of the node.
1209 */
1210 if (isnodegone)
1211 return;
1212
1213 if (tid->cleanup_inprogress) {
1214 /* check to see if we're done with cleaning the h/w queue */
1215 spin_lock_bh(&txq->axq_lock);
1216
1217 if (tid->baw_head == tid->baw_tail) {
1218 tid->addba_exchangecomplete = 0;
1219 tid->addba_exchangeattempts = 0;
1220 spin_unlock_bh(&txq->axq_lock);
1221
1222 tid->cleanup_inprogress = false;
1223
1224 /* send buffered frames as singles */
1225 ath_tx_flush_tid(sc, tid);
1226 } else
1227 spin_unlock_bh(&txq->axq_lock);
1228
1229 return;
1230 }
1231
1232 /*
1233 * prepend un-acked frames to the beginning of the pending frame queue
1234 */
1235 if (!list_empty(&bf_pending)) {
1236 spin_lock_bh(&txq->axq_lock);
1237 /* Note: we _prepend_, we _do_not_ at to
1238 * the end of the queue ! */
1239 list_splice(&bf_pending, &tid->buf_q);
1240 ath_tx_queue_tid(txq, tid);
1241 spin_unlock_bh(&txq->axq_lock);
1242 }
1243
1244 if (needreset)
1245 ath_internal_reset(sc);
1246
1247 return;
1248}
1249
1250/* Process completed xmit descriptors from the specified queue */
1251
1252static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1253{
1254 struct ath_hal *ah = sc->sc_ah;
1255 struct ath_buf *bf, *lastbf, *bf_held = NULL;
1256 struct list_head bf_head;
1257 struct ath_desc *ds, *tmp_ds;
1258 struct sk_buff *skb;
1259 struct ieee80211_tx_info *tx_info;
1260 struct ath_tx_info_priv *tx_info_priv;
1261 int nacked, txok, nbad = 0, isrifs = 0;
1262 int status;
1263
1264 DPRINTF(sc, ATH_DBG_QUEUE,
1265 "%s: tx queue %d (%x), link %p\n", __func__,
1266 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
1267 txq->axq_link);
1268
1269 nacked = 0;
1270 for (;;) {
1271 spin_lock_bh(&txq->axq_lock);
1272 txq->axq_intrcnt = 0; /* reset periodic desc intr count */
1273 if (list_empty(&txq->axq_q)) {
1274 txq->axq_link = NULL;
1275 txq->axq_linkbuf = NULL;
1276 spin_unlock_bh(&txq->axq_lock);
1277 break;
1278 }
1279 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
1280
1281 /*
1282 * There is a race condition that a BH gets scheduled
1283 * after sw writes TxE and before hw re-load the last
1284 * descriptor to get the newly chained one.
1285 * Software must keep the last DONE descriptor as a
1286 * holding descriptor - software does so by marking
1287 * it with the STALE flag.
1288 */
1289 bf_held = NULL;
1290 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
1291 bf_held = bf;
1292 if (list_is_last(&bf_held->list, &txq->axq_q)) {
1293 /* FIXME:
1294 * The holding descriptor is the last
1295 * descriptor in queue. It's safe to remove
1296 * the last holding descriptor in BH context.
1297 */
1298 spin_unlock_bh(&txq->axq_lock);
1299 break;
1300 } else {
1301 /* Lets work with the next buffer now */
1302 bf = list_entry(bf_held->list.next,
1303 struct ath_buf, list);
1304 }
1305 }
1306
1307 lastbf = bf->bf_lastbf;
1308 ds = lastbf->bf_desc; /* NB: last decriptor */
1309
1310 status = ath9k_hw_txprocdesc(ah, ds);
1311 if (status == -EINPROGRESS) {
1312 spin_unlock_bh(&txq->axq_lock);
1313 break;
1314 }
1315 if (bf->bf_desc == txq->axq_lastdsWithCTS)
1316 txq->axq_lastdsWithCTS = NULL;
1317 if (ds == txq->axq_gatingds)
1318 txq->axq_gatingds = NULL;
1319
1320 /*
1321 * Remove ath_buf's of the same transmit unit from txq,
1322 * however leave the last descriptor back as the holding
1323 * descriptor for hw.
1324 */
1325 lastbf->bf_status |= ATH_BUFSTATUS_STALE;
1326 INIT_LIST_HEAD(&bf_head);
1327
1328 if (!list_is_singular(&lastbf->list))
1329 list_cut_position(&bf_head,
1330 &txq->axq_q, lastbf->list.prev);
1331
1332 txq->axq_depth--;
1333
1334 if (bf->bf_isaggr)
1335 txq->axq_aggr_depth--;
1336
1337 txok = (ds->ds_txstat.ts_status == 0);
1338
1339 spin_unlock_bh(&txq->axq_lock);
1340
1341 if (bf_held) {
1342 list_del(&bf_held->list);
1343 spin_lock_bh(&sc->sc_txbuflock);
1344 list_add_tail(&bf_held->list, &sc->sc_txbuf);
1345 spin_unlock_bh(&sc->sc_txbuflock);
1346 }
1347
1348 if (!bf->bf_isampdu) {
1349 /*
1350 * This frame is sent out as a single frame.
1351 * Use hardware retry status for this frame.
1352 */
1353 bf->bf_retries = ds->ds_txstat.ts_longretry;
1354 if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY)
1355 bf->bf_isxretried = 1;
1356 nbad = 0;
1357 } else {
1358 nbad = ath_tx_num_badfrms(sc, bf, txok);
1359 }
1360 skb = bf->bf_mpdu;
1361 tx_info = IEEE80211_SKB_CB(skb);
1362 tx_info_priv = (struct ath_tx_info_priv *)
1363 tx_info->driver_data[0];
1364 if (ds->ds_txstat.ts_status & ATH9K_TXERR_FILT)
1365 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1366 if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 &&
1367 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) {
1368 if (ds->ds_txstat.ts_status == 0)
1369 nacked++;
1370
1371 if (bf->bf_isdata) {
1372 if (isrifs)
1373 tmp_ds = bf->bf_rifslast->bf_desc;
1374 else
1375 tmp_ds = ds;
1376 memcpy(&tx_info_priv->tx,
1377 &tmp_ds->ds_txstat,
1378 sizeof(tx_info_priv->tx));
1379 tx_info_priv->n_frames = bf->bf_nframes;
1380 tx_info_priv->n_bad_frames = nbad;
1381 }
1382 }
1383
1384 /*
1385 * Complete this transmit unit
1386 */
1387 if (bf->bf_isampdu)
1388 ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, txok);
1389 else
1390 ath_tx_complete_buf(sc, bf, &bf_head, txok, 0);
1391
1392 /* Wake up mac80211 queue */
1393
1394 spin_lock_bh(&txq->axq_lock);
1395 if (txq->stopped && ath_txq_depth(sc, txq->axq_qnum) <=
1396 (ATH_TXBUF - 20)) {
1397 int qnum;
1398 qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc);
1399 if (qnum != -1) {
1400 ieee80211_wake_queue(sc->hw, qnum);
1401 txq->stopped = 0;
1402 }
1403
1404 }
1405
1406 /*
1407 * schedule any pending packets if aggregation is enabled
1408 */
1409 if (sc->sc_txaggr)
1410 ath_txq_schedule(sc, txq);
1411 spin_unlock_bh(&txq->axq_lock);
1412 }
1413 return nacked;
1414}
1415
1416static void ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq)
1417{
1418 struct ath_hal *ah = sc->sc_ah;
1419
1420 (void) ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1421 DPRINTF(sc, ATH_DBG_XMIT, "%s: tx queue [%u] %x, link %p\n",
1422 __func__, txq->axq_qnum,
1423 ath9k_hw_gettxbuf(ah, txq->axq_qnum), txq->axq_link);
1424}
1425
1426/* Drain only the data queues */
1427
1428static void ath_drain_txdataq(struct ath_softc *sc, bool retry_tx)
1429{
1430 struct ath_hal *ah = sc->sc_ah;
1431 int i;
1432 int npend = 0;
1433 enum ath9k_ht_macmode ht_macmode = ath_cwm_macmode(sc);
1434
1435 /* XXX return value */
1436 if (!sc->sc_invalid) {
1437 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1438 if (ATH_TXQ_SETUP(sc, i)) {
1439 ath_tx_stopdma(sc, &sc->sc_txq[i]);
1440
1441 /* The TxDMA may not really be stopped.
1442 * Double check the hal tx pending count */
1443 npend += ath9k_hw_numtxpending(ah,
1444 sc->sc_txq[i].axq_qnum);
1445 }
1446 }
1447 }
1448
1449 if (npend) {
1450 int status;
1451
1452 /* TxDMA not stopped, reset the hal */
1453 DPRINTF(sc, ATH_DBG_XMIT,
1454 "%s: Unable to stop TxDMA. Reset HAL!\n", __func__);
1455
1456 spin_lock_bh(&sc->sc_resetlock);
1457 if (!ath9k_hw_reset(ah, sc->sc_opmode,
1458 &sc->sc_curchan, ht_macmode,
1459 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
1460 sc->sc_ht_extprotspacing, true, &status)) {
1461
1462 DPRINTF(sc, ATH_DBG_FATAL,
1463 "%s: unable to reset hardware; hal status %u\n",
1464 __func__,
1465 status);
1466 }
1467 spin_unlock_bh(&sc->sc_resetlock);
1468 }
1469
1470 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1471 if (ATH_TXQ_SETUP(sc, i))
1472 ath_tx_draintxq(sc, &sc->sc_txq[i], retry_tx);
1473 }
1474}
1475
1476/* Add a sub-frame to block ack window */
1477
1478static void ath_tx_addto_baw(struct ath_softc *sc,
1479 struct ath_atx_tid *tid,
1480 struct ath_buf *bf)
1481{
1482 int index, cindex;
1483
1484 if (bf->bf_isretried)
1485 return;
1486
1487 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
1488 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
1489
1490 ASSERT(tid->tx_buf[cindex] == NULL);
1491 tid->tx_buf[cindex] = bf;
1492
1493 if (index >= ((tid->baw_tail - tid->baw_head) &
1494 (ATH_TID_MAX_BUFS - 1))) {
1495 tid->baw_tail = cindex;
1496 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
1497 }
1498}
1499
1500/*
1501 * Function to send an A-MPDU
1502 * NB: must be called with txq lock held
1503 */
1504
1505static int ath_tx_send_ampdu(struct ath_softc *sc,
1506 struct ath_txq *txq,
1507 struct ath_atx_tid *tid,
1508 struct list_head *bf_head,
1509 struct ath_tx_control *txctl)
1510{
1511 struct ath_buf *bf;
1512 struct sk_buff *skb;
1513 struct ieee80211_tx_info *tx_info;
1514 struct ath_tx_info_priv *tx_info_priv;
1515
1516 BUG_ON(list_empty(bf_head));
1517
1518 bf = list_first_entry(bf_head, struct ath_buf, list);
1519 bf->bf_isampdu = 1;
1520 bf->bf_seqno = txctl->seqno; /* save seqno and tidno in buffer */
1521 bf->bf_tidno = txctl->tidno;
1522
1523 /*
1524 * Do not queue to h/w when any of the following conditions is true:
1525 * - there are pending frames in software queue
1526 * - the TID is currently paused for ADDBA/BAR request
1527 * - seqno is not within block-ack window
1528 * - h/w queue depth exceeds low water mark
1529 */
1530 if (!list_empty(&tid->buf_q) || tid->paused ||
1531 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
1532 txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
1533 /*
1534 * Add this frame to software queue for scheduling later
1535 * for aggregation.
1536 */
1537 list_splice_tail_init(bf_head, &tid->buf_q);
1538 ath_tx_queue_tid(txq, tid);
1539 return 0;
1540 }
1541
1542 skb = (struct sk_buff *)bf->bf_mpdu;
1543 tx_info = IEEE80211_SKB_CB(skb);
1544 tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
1545 memcpy(bf->bf_rcs, tx_info_priv->rcs, 4 * sizeof(tx_info_priv->rcs[0]));
1546
1547 /* Add sub-frame to BAW */
1548 ath_tx_addto_baw(sc, tid, bf);
1549
1550 /* Queue to h/w without aggregation */
1551 bf->bf_nframes = 1;
1552 bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */
1553 ath_buf_set_rate(sc, bf);
1554 ath_tx_txqaddbuf(sc, txq, bf_head);
1555 return 0;
1556}
1557
1558/*
1559 * looks up the rate
1560 * returns aggr limit based on lowest of the rates
1561 */
1562
1563static u32 ath_lookup_rate(struct ath_softc *sc,
1564 struct ath_buf *bf)
1565{
1566 const struct ath9k_rate_table *rt = sc->sc_currates;
1567 struct sk_buff *skb;
1568 struct ieee80211_tx_info *tx_info;
1569 struct ath_tx_info_priv *tx_info_priv;
1570 u32 max_4ms_framelen, frame_length;
1571 u16 aggr_limit, legacy = 0, maxampdu;
1572 int i;
1573
1574
1575 skb = (struct sk_buff *)bf->bf_mpdu;
1576 tx_info = IEEE80211_SKB_CB(skb);
1577 tx_info_priv = (struct ath_tx_info_priv *)
1578 tx_info->driver_data[0];
1579 memcpy(bf->bf_rcs,
1580 tx_info_priv->rcs, 4 * sizeof(tx_info_priv->rcs[0]));
1581
1582 /*
1583 * Find the lowest frame length among the rate series that will have a
1584 * 4ms transmit duration.
1585 * TODO - TXOP limit needs to be considered.
1586 */
1587 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
1588
1589 for (i = 0; i < 4; i++) {
1590 if (bf->bf_rcs[i].tries) {
1591 frame_length = bf->bf_rcs[i].max_4ms_framelen;
1592
1593 if (rt->info[bf->bf_rcs[i].rix].phy != PHY_HT) {
1594 legacy = 1;
1595 break;
1596 }
1597
1598 max_4ms_framelen = min(max_4ms_framelen, frame_length);
1599 }
1600 }
1601
1602 /*
1603 * limit aggregate size by the minimum rate if rate selected is
1604 * not a probe rate, if rate selected is a probe rate then
1605 * avoid aggregation of this packet.
1606 */
1607 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
1608 return 0;
1609
1610 aggr_limit = min(max_4ms_framelen,
1611 (u32)ATH_AMPDU_LIMIT_DEFAULT);
1612
1613 /*
1614 * h/w can accept aggregates upto 16 bit lengths (65535).
1615 * The IE, however can hold upto 65536, which shows up here
1616 * as zero. Ignore 65536 since we are constrained by hw.
1617 */
1618 maxampdu = sc->sc_ht_info.maxampdu;
1619 if (maxampdu)
1620 aggr_limit = min(aggr_limit, maxampdu);
1621
1622 return aggr_limit;
1623}
1624
1625/*
1626 * returns the number of delimiters to be added to
1627 * meet the minimum required mpdudensity.
1628 * caller should make sure that the rate is HT rate .
1629 */
1630
1631static int ath_compute_num_delims(struct ath_softc *sc,
1632 struct ath_buf *bf,
1633 u16 frmlen)
1634{
1635 const struct ath9k_rate_table *rt = sc->sc_currates;
1636 u32 nsymbits, nsymbols, mpdudensity;
1637 u16 minlen;
1638 u8 rc, flags, rix;
1639 int width, half_gi, ndelim, mindelim;
1640
1641 /* Select standard number of delimiters based on frame length alone */
1642 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
1643
1644 /*
1645 * If encryption enabled, hardware requires some more padding between
1646 * subframes.
1647 * TODO - this could be improved to be dependent on the rate.
1648 * The hardware can keep up at lower rates, but not higher rates
1649 */
1650 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR)
1651 ndelim += ATH_AGGR_ENCRYPTDELIM;
1652
1653 /*
1654 * Convert desired mpdu density from microeconds to bytes based
1655 * on highest rate in rate series (i.e. first rate) to determine
1656 * required minimum length for subframe. Take into account
1657 * whether high rate is 20 or 40Mhz and half or full GI.
1658 */
1659 mpdudensity = sc->sc_ht_info.mpdudensity;
1660
1661 /*
1662 * If there is no mpdu density restriction, no further calculation
1663 * is needed.
1664 */
1665 if (mpdudensity == 0)
1666 return ndelim;
1667
1668 rix = bf->bf_rcs[0].rix;
1669 flags = bf->bf_rcs[0].flags;
1670 rc = rt->info[rix].rateCode;
1671 width = (flags & ATH_RC_CW40_FLAG) ? 1 : 0;
1672 half_gi = (flags & ATH_RC_SGI_FLAG) ? 1 : 0;
1673
1674 if (half_gi)
1675 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(mpdudensity);
1676 else
1677 nsymbols = NUM_SYMBOLS_PER_USEC(mpdudensity);
1678
1679 if (nsymbols == 0)
1680 nsymbols = 1;
1681
1682 nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
1683 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
1684
1685 /* Is frame shorter than required minimum length? */
1686 if (frmlen < minlen) {
1687 /* Get the minimum number of delimiters required. */
1688 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
1689 ndelim = max(mindelim, ndelim);
1690 }
1691
1692 return ndelim;
1693}
1694
1695/*
1696 * For aggregation from software buffer queue.
1697 * NB: must be called with txq lock held
1698 */
1699
1700static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
1701 struct ath_atx_tid *tid,
1702 struct list_head *bf_q,
1703 struct ath_buf **bf_last,
1704 struct aggr_rifs_param *param,
1705 int *prev_frames)
1706{
1707#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
1708 struct ath_buf *bf, *tbf, *bf_first, *bf_prev = NULL;
1709 struct list_head bf_head;
1710 int rl = 0, nframes = 0, ndelim;
1711 u16 aggr_limit = 0, al = 0, bpad = 0,
1712 al_delta, h_baw = tid->baw_size / 2;
1713 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
1714 int prev_al = 0, is_ds_rate = 0;
1715 INIT_LIST_HEAD(&bf_head);
1716
1717 BUG_ON(list_empty(&tid->buf_q));
1718
1719 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
1720
1721 do {
1722 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
1723
1724 /*
1725 * do not step over block-ack window
1726 */
1727 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
1728 status = ATH_AGGR_BAW_CLOSED;
1729 break;
1730 }
1731
1732 if (!rl) {
1733 aggr_limit = ath_lookup_rate(sc, bf);
1734 rl = 1;
1735 /*
1736 * Is rate dual stream
1737 */
1738 is_ds_rate =
1739 (bf->bf_rcs[0].flags & ATH_RC_DS_FLAG) ? 1 : 0;
1740 }
1741
1742 /*
1743 * do not exceed aggregation limit
1744 */
1745 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
1746
1747 if (nframes && (aggr_limit <
1748 (al + bpad + al_delta + prev_al))) {
1749 status = ATH_AGGR_LIMITED;
1750 break;
1751 }
1752
1753 /*
1754 * do not exceed subframe limit
1755 */
1756 if ((nframes + *prev_frames) >=
1757 min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
1758 status = ATH_AGGR_LIMITED;
1759 break;
1760 }
1761
1762 /*
1763 * add padding for previous frame to aggregation length
1764 */
1765 al += bpad + al_delta;
1766
1767 /*
1768 * Get the delimiters needed to meet the MPDU
1769 * density for this node.
1770 */
1771 ndelim = ath_compute_num_delims(sc, bf_first, bf->bf_frmlen);
1772
1773 bpad = PADBYTES(al_delta) + (ndelim << 2);
1774
1775 bf->bf_next = NULL;
1776 bf->bf_lastfrm->bf_desc->ds_link = 0;
1777
1778 /*
1779 * this packet is part of an aggregate
1780 * - remove all descriptors belonging to this frame from
1781 * software queue
1782 * - add it to block ack window
1783 * - set up descriptors for aggregation
1784 */
1785 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
1786 ath_tx_addto_baw(sc, tid, bf);
1787
1788 list_for_each_entry(tbf, &bf_head, list) {
1789 ath9k_hw_set11n_aggr_middle(sc->sc_ah,
1790 tbf->bf_desc, ndelim);
1791 }
1792
1793 /*
1794 * link buffers of this frame to the aggregate
1795 */
1796 list_splice_tail_init(&bf_head, bf_q);
1797 nframes++;
1798
1799 if (bf_prev) {
1800 bf_prev->bf_next = bf;
1801 bf_prev->bf_lastfrm->bf_desc->ds_link = bf->bf_daddr;
1802 }
1803 bf_prev = bf;
1804
1805#ifdef AGGR_NOSHORT
1806 /*
1807 * terminate aggregation on a small packet boundary
1808 */
1809 if (bf->bf_frmlen < ATH_AGGR_MINPLEN) {
1810 status = ATH_AGGR_SHORTPKT;
1811 break;
1812 }
1813#endif
1814 } while (!list_empty(&tid->buf_q));
1815
1816 bf_first->bf_al = al;
1817 bf_first->bf_nframes = nframes;
1818 *bf_last = bf_prev;
1819 return status;
1820#undef PADBYTES
1821}
1822
1823/*
1824 * process pending frames possibly doing a-mpdu aggregation
1825 * NB: must be called with txq lock held
1826 */
1827
1828static void ath_tx_sched_aggr(struct ath_softc *sc,
1829 struct ath_txq *txq, struct ath_atx_tid *tid)
1830{
1831 struct ath_buf *bf, *tbf, *bf_last, *bf_lastaggr = NULL;
1832 enum ATH_AGGR_STATUS status;
1833 struct list_head bf_q;
1834 struct aggr_rifs_param param = {0, 0, 0, 0, NULL};
1835 int prev_frames = 0;
1836
1837 do {
1838 if (list_empty(&tid->buf_q))
1839 return;
1840
1841 INIT_LIST_HEAD(&bf_q);
1842
1843 status = ath_tx_form_aggr(sc, tid, &bf_q, &bf_lastaggr, &param,
1844 &prev_frames);
1845
1846 /*
1847 * no frames picked up to be aggregated; block-ack
1848 * window is not open
1849 */
1850 if (list_empty(&bf_q))
1851 break;
1852
1853 bf = list_first_entry(&bf_q, struct ath_buf, list);
1854 bf_last = list_entry(bf_q.prev, struct ath_buf, list);
1855 bf->bf_lastbf = bf_last;
1856
1857 /*
1858 * if only one frame, send as non-aggregate
1859 */
1860 if (bf->bf_nframes == 1) {
1861 ASSERT(bf->bf_lastfrm == bf_last);
1862
1863 bf->bf_isaggr = 0;
1864 /*
1865 * clear aggr bits for every descriptor
1866 * XXX TODO: is there a way to optimize it?
1867 */
1868 list_for_each_entry(tbf, &bf_q, list) {
1869 ath9k_hw_clr11n_aggr(sc->sc_ah, tbf->bf_desc);
1870 }
1871
1872 ath_buf_set_rate(sc, bf);
1873 ath_tx_txqaddbuf(sc, txq, &bf_q);
1874 continue;
1875 }
1876
1877 /*
1878 * setup first desc with rate and aggr info
1879 */
1880 bf->bf_isaggr = 1;
1881 ath_buf_set_rate(sc, bf);
1882 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
1883
1884 /*
1885 * anchor last frame of aggregate correctly
1886 */
1887 ASSERT(bf_lastaggr);
1888 ASSERT(bf_lastaggr->bf_lastfrm == bf_last);
1889 tbf = bf_lastaggr;
1890 ath9k_hw_set11n_aggr_last(sc->sc_ah, tbf->bf_desc);
1891
1892 /* XXX: We don't enter into this loop, consider removing this */
1893 while (!list_empty(&bf_q) && !list_is_last(&tbf->list, &bf_q)) {
1894 tbf = list_entry(tbf->list.next, struct ath_buf, list);
1895 ath9k_hw_set11n_aggr_last(sc->sc_ah, tbf->bf_desc);
1896 }
1897
1898 txq->axq_aggr_depth++;
1899
1900 /*
1901 * Normal aggregate, queue to hardware
1902 */
1903 ath_tx_txqaddbuf(sc, txq, &bf_q);
1904
1905 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
1906 status != ATH_AGGR_BAW_CLOSED);
1907}
1908
1909/* Called with txq lock held */
1910
1911static void ath_tid_drain(struct ath_softc *sc,
1912 struct ath_txq *txq,
1913 struct ath_atx_tid *tid,
1914 bool bh_flag)
1915{
1916 struct ath_buf *bf;
1917 struct list_head bf_head;
1918 INIT_LIST_HEAD(&bf_head);
1919
1920 for (;;) {
1921 if (list_empty(&tid->buf_q))
1922 break;
1923 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
1924
1925 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
1926
1927 /* update baw for software retried frame */
1928 if (bf->bf_isretried)
1929 ath_tx_update_baw(sc, tid, bf->bf_seqno);
1930
1931 /*
1932 * do not indicate packets while holding txq spinlock.
1933 * unlock is intentional here
1934 */
1935 if (likely(bh_flag))
1936 spin_unlock_bh(&txq->axq_lock);
1937 else
1938 spin_unlock(&txq->axq_lock);
1939
1940 /* complete this sub-frame */
1941 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
1942
1943 if (likely(bh_flag))
1944 spin_lock_bh(&txq->axq_lock);
1945 else
1946 spin_lock(&txq->axq_lock);
1947 }
1948
1949 /*
1950 * TODO: For frame(s) that are in the retry state, we will reuse the
1951 * sequence number(s) without setting the retry bit. The
1952 * alternative is to give up on these and BAR the receiver's window
1953 * forward.
1954 */
1955 tid->seq_next = tid->seq_start;
1956 tid->baw_tail = tid->baw_head;
1957}
1958
1959/*
1960 * Drain all pending buffers
1961 * NB: must be called with txq lock held
1962 */
1963
1964static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1965 struct ath_txq *txq,
1966 bool bh_flag)
1967{
1968 struct ath_atx_ac *ac, *ac_tmp;
1969 struct ath_atx_tid *tid, *tid_tmp;
1970
1971 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1972 list_del(&ac->list);
1973 ac->sched = false;
1974 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1975 list_del(&tid->list);
1976 tid->sched = false;
1977 ath_tid_drain(sc, txq, tid, bh_flag);
1978 }
1979 }
1980}
1981
1982static int ath_tx_start_dma(struct ath_softc *sc,
1983 struct sk_buff *skb,
1984 struct scatterlist *sg,
1985 u32 n_sg,
1986 struct ath_tx_control *txctl)
1987{
1988 struct ath_node *an = txctl->an;
1989 struct ath_buf *bf = NULL;
1990 struct list_head bf_head;
1991 struct ath_desc *ds;
1992 struct ath_hal *ah = sc->sc_ah;
1993 struct ath_txq *txq = &sc->sc_txq[txctl->qnum];
1994 struct ath_tx_info_priv *tx_info_priv;
1995 struct ath_rc_series *rcs;
1996 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1997 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1998 __le16 fc = hdr->frame_control;
1999
2000 /* For each sglist entry, allocate an ath_buf for DMA */
2001 INIT_LIST_HEAD(&bf_head);
2002 spin_lock_bh(&sc->sc_txbuflock);
2003 if (unlikely(list_empty(&sc->sc_txbuf))) {
2004 spin_unlock_bh(&sc->sc_txbuflock);
2005 return -ENOMEM;
2006 }
2007
2008 bf = list_first_entry(&sc->sc_txbuf, struct ath_buf, list);
2009 list_del(&bf->list);
2010 spin_unlock_bh(&sc->sc_txbuflock);
2011
2012 list_add_tail(&bf->list, &bf_head);
2013
2014 /* set up this buffer */
2015 ATH_TXBUF_RESET(bf);
2016 bf->bf_frmlen = txctl->frmlen;
2017 bf->bf_isdata = ieee80211_is_data(fc);
2018 bf->bf_isbar = ieee80211_is_back_req(fc);
2019 bf->bf_ispspoll = ieee80211_is_pspoll(fc);
2020 bf->bf_flags = txctl->flags;
2021 bf->bf_shpreamble = sc->sc_flags & ATH_PREAMBLE_SHORT;
2022 bf->bf_keytype = txctl->keytype;
2023 tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
2024 rcs = tx_info_priv->rcs;
2025 bf->bf_rcs[0] = rcs[0];
2026 bf->bf_rcs[1] = rcs[1];
2027 bf->bf_rcs[2] = rcs[2];
2028 bf->bf_rcs[3] = rcs[3];
2029 bf->bf_node = an;
2030 bf->bf_mpdu = skb;
2031 bf->bf_buf_addr = sg_dma_address(sg);
2032
2033 /* setup descriptor */
2034 ds = bf->bf_desc;
2035 ds->ds_link = 0;
2036 ds->ds_data = bf->bf_buf_addr;
2037
2038 /*
2039 * Save the DMA context in the first ath_buf
2040 */
2041 copy_dma_mem_context(get_dma_mem_context(bf, bf_dmacontext),
2042 get_dma_mem_context(txctl, dmacontext));
2043
2044 /*
2045 * Formulate first tx descriptor with tx controls.
2046 */
2047 ath9k_hw_set11n_txdesc(ah,
2048 ds,
2049 bf->bf_frmlen, /* frame length */
2050 txctl->atype, /* Atheros packet type */
2051 min(txctl->txpower, (u16)60), /* txpower */
2052 txctl->keyix, /* key cache index */
2053 txctl->keytype, /* key type */
2054 txctl->flags); /* flags */
2055 ath9k_hw_filltxdesc(ah,
2056 ds,
2057 sg_dma_len(sg), /* segment length */
2058 true, /* first segment */
2059 (n_sg == 1) ? true : false, /* last segment */
2060 ds); /* first descriptor */
2061
2062 bf->bf_lastfrm = bf;
2063 bf->bf_ht = txctl->ht;
2064
2065 spin_lock_bh(&txq->axq_lock);
2066
2067 if (txctl->ht && sc->sc_txaggr) {
2068 struct ath_atx_tid *tid = ATH_AN_2_TID(an, txctl->tidno);
2069 if (ath_aggr_query(sc, an, txctl->tidno)) {
2070 /*
2071 * Try aggregation if it's a unicast data frame
2072 * and the destination is HT capable.
2073 */
2074 ath_tx_send_ampdu(sc, txq, tid, &bf_head, txctl);
2075 } else {
2076 /*
2077 * Send this frame as regular when ADDBA exchange
2078 * is neither complete nor pending.
2079 */
2080 ath_tx_send_normal(sc, txq, tid, &bf_head);
2081 }
2082 } else {
2083 bf->bf_lastbf = bf;
2084 bf->bf_nframes = 1;
2085 ath_buf_set_rate(sc, bf);
2086
2087 if (ieee80211_is_back_req(fc)) {
2088 /* This is required for resuming tid
2089 * during BAR completion */
2090 bf->bf_tidno = txctl->tidno;
2091 }
2092
2093 if (is_multicast_ether_addr(hdr->addr1)) {
2094 struct ath_vap *avp = sc->sc_vaps[txctl->if_id];
2095
2096 /*
2097 * When servicing one or more stations in power-save
2098 * mode (or) if there is some mcast data waiting on
2099 * mcast queue (to prevent out of order delivery of
2100 * mcast,bcast packets) multicast frames must be
2101 * buffered until after the beacon. We use the private
2102 * mcast queue for that.
2103 */
2104 /* XXX? more bit in 802.11 frame header */
2105 spin_lock_bh(&avp->av_mcastq.axq_lock);
2106 if (txctl->ps || avp->av_mcastq.axq_depth)
2107 ath_tx_mcastqaddbuf(sc,
2108 &avp->av_mcastq, &bf_head);
2109 else
2110 ath_tx_txqaddbuf(sc, txq, &bf_head);
2111 spin_unlock_bh(&avp->av_mcastq.axq_lock);
2112 } else
2113 ath_tx_txqaddbuf(sc, txq, &bf_head);
2114 }
2115 spin_unlock_bh(&txq->axq_lock);
2116 return 0;
2117}
2118
2119static void xmit_map_sg(struct ath_softc *sc,
2120 struct sk_buff *skb,
2121 dma_addr_t *pa,
2122 struct ath_tx_control *txctl)
2123{
2124 struct ath_xmit_status tx_status;
2125 struct ath_atx_tid *tid;
2126 struct scatterlist sg;
2127
2128 *pa = pci_map_single(sc->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
2129
2130 /* setup S/G list */
2131 memset(&sg, 0, sizeof(struct scatterlist));
2132 sg_dma_address(&sg) = *pa;
2133 sg_dma_len(&sg) = skb->len;
2134
2135 if (ath_tx_start_dma(sc, skb, &sg, 1, txctl) != 0) {
2136 /*
2137 * We have to do drop frame here.
2138 */
2139 pci_unmap_single(sc->pdev, *pa, skb->len, PCI_DMA_TODEVICE);
2140
2141 tx_status.retries = 0;
2142 tx_status.flags = ATH_TX_ERROR;
2143
2144 if (txctl->ht && sc->sc_txaggr) {
2145 /* Reclaim the seqno. */
2146 tid = ATH_AN_2_TID((struct ath_node *)
2147 txctl->an, txctl->tidno);
2148 DECR(tid->seq_next, IEEE80211_SEQ_MAX);
2149 }
2150 ath_tx_complete(sc, skb, &tx_status, txctl->an);
2151 }
2152}
2153
2154/* Initialize TX queue and h/w */
2155
2156int ath_tx_init(struct ath_softc *sc, int nbufs)
2157{
2158 int error = 0;
2159
2160 do {
2161 spin_lock_init(&sc->sc_txbuflock);
2162
2163 /* Setup tx descriptors */
2164 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf,
2165 "tx", nbufs * ATH_FRAG_PER_MSDU, ATH_TXDESC);
2166 if (error != 0) {
2167 DPRINTF(sc, ATH_DBG_FATAL,
2168 "%s: failed to allocate tx descriptors: %d\n",
2169 __func__, error);
2170 break;
2171 }
2172
2173 /* XXX allocate beacon state together with vap */
2174 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf,
2175 "beacon", ATH_BCBUF, 1);
2176 if (error != 0) {
2177 DPRINTF(sc, ATH_DBG_FATAL,
2178 "%s: failed to allocate "
2179 "beacon descripotrs: %d\n",
2180 __func__, error);
2181 break;
2182 }
2183
2184 } while (0);
2185
2186 if (error != 0)
2187 ath_tx_cleanup(sc);
2188
2189 return error;
2190}
2191
2192/* Reclaim all tx queue resources */
2193
2194int ath_tx_cleanup(struct ath_softc *sc)
2195{
2196 /* cleanup beacon descriptors */
2197 if (sc->sc_bdma.dd_desc_len != 0)
2198 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf);
2199
2200 /* cleanup tx descriptors */
2201 if (sc->sc_txdma.dd_desc_len != 0)
2202 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
2203
2204 return 0;
2205}
2206
2207/* Setup a h/w transmit queue */
2208
2209struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
2210{
2211 struct ath_hal *ah = sc->sc_ah;
2212 struct ath9k_tx_queue_info qi;
2213 int qnum;
2214
2215 memzero(&qi, sizeof(qi));
2216 qi.tqi_subtype = subtype;
2217 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
2218 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
2219 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
2220 qi.tqi_physCompBuf = 0;
2221
2222 /*
2223 * Enable interrupts only for EOL and DESC conditions.
2224 * We mark tx descriptors to receive a DESC interrupt
2225 * when a tx queue gets deep; otherwise waiting for the
2226 * EOL to reap descriptors. Note that this is done to
2227 * reduce interrupt load and this only defers reaping
2228 * descriptors, never transmitting frames. Aside from
2229 * reducing interrupts this also permits more concurrency.
2230 * The only potential downside is if the tx queue backs
2231 * up in which case the top half of the kernel may backup
2232 * due to a lack of tx descriptors.
2233 *
2234 * The UAPSD queue is an exception, since we take a desc-
2235 * based intr on the EOSP frames.
2236 */
2237 if (qtype == ATH9K_TX_QUEUE_UAPSD)
2238 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
2239 else
2240 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
2241 TXQ_FLAG_TXDESCINT_ENABLE;
2242 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
2243 if (qnum == -1) {
2244 /*
2245 * NB: don't print a message, this happens
2246 * normally on parts with too few tx queues
2247 */
2248 return NULL;
2249 }
2250 if (qnum >= ARRAY_SIZE(sc->sc_txq)) {
2251 DPRINTF(sc, ATH_DBG_FATAL,
2252 "%s: hal qnum %u out of range, max %u!\n",
2253 __func__, qnum, (unsigned int)ARRAY_SIZE(sc->sc_txq));
2254 ath9k_hw_releasetxqueue(ah, qnum);
2255 return NULL;
2256 }
2257 if (!ATH_TXQ_SETUP(sc, qnum)) {
2258 struct ath_txq *txq = &sc->sc_txq[qnum];
2259
2260 txq->axq_qnum = qnum;
2261 txq->axq_link = NULL;
2262 INIT_LIST_HEAD(&txq->axq_q);
2263 INIT_LIST_HEAD(&txq->axq_acq);
2264 spin_lock_init(&txq->axq_lock);
2265 txq->axq_depth = 0;
2266 txq->axq_aggr_depth = 0;
2267 txq->axq_totalqueued = 0;
2268 txq->axq_intrcnt = 0;
2269 txq->axq_linkbuf = NULL;
2270 sc->sc_txqsetup |= 1<<qnum;
2271 }
2272 return &sc->sc_txq[qnum];
2273}
2274
2275/* Reclaim resources for a setup queue */
2276
2277void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
2278{
2279 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
2280 sc->sc_txqsetup &= ~(1<<txq->axq_qnum);
2281}
2282
2283/*
2284 * Setup a hardware data transmit queue for the specified
2285 * access control. The hal may not support all requested
2286 * queues in which case it will return a reference to a
2287 * previously setup queue. We record the mapping from ac's
2288 * to h/w queues for use by ath_tx_start and also track
2289 * the set of h/w queues being used to optimize work in the
2290 * transmit interrupt handler and related routines.
2291 */
2292
2293int ath_tx_setup(struct ath_softc *sc, int haltype)
2294{
2295 struct ath_txq *txq;
2296
2297 if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) {
2298 DPRINTF(sc, ATH_DBG_FATAL,
2299 "%s: HAL AC %u out of range, max %zu!\n",
2300 __func__, haltype, ARRAY_SIZE(sc->sc_haltype2q));
2301 return 0;
2302 }
2303 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
2304 if (txq != NULL) {
2305 sc->sc_haltype2q[haltype] = txq->axq_qnum;
2306 return 1;
2307 } else
2308 return 0;
2309}
2310
2311int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype)
2312{
2313 int qnum;
2314
2315 switch (qtype) {
2316 case ATH9K_TX_QUEUE_DATA:
2317 if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) {
2318 DPRINTF(sc, ATH_DBG_FATAL,
2319 "%s: HAL AC %u out of range, max %zu!\n",
2320 __func__,
2321 haltype, ARRAY_SIZE(sc->sc_haltype2q));
2322 return -1;
2323 }
2324 qnum = sc->sc_haltype2q[haltype];
2325 break;
2326 case ATH9K_TX_QUEUE_BEACON:
2327 qnum = sc->sc_bhalq;
2328 break;
2329 case ATH9K_TX_QUEUE_CAB:
2330 qnum = sc->sc_cabq->axq_qnum;
2331 break;
2332 default:
2333 qnum = -1;
2334 }
2335 return qnum;
2336}
2337
2338/* Update parameters for a transmit queue */
2339
2340int ath_txq_update(struct ath_softc *sc, int qnum,
2341 struct ath9k_tx_queue_info *qinfo)
2342{
2343 struct ath_hal *ah = sc->sc_ah;
2344 int error = 0;
2345 struct ath9k_tx_queue_info qi;
2346
2347 if (qnum == sc->sc_bhalq) {
2348 /*
2349 * XXX: for beacon queue, we just save the parameter.
2350 * It will be picked up by ath_beaconq_config when
2351 * it's necessary.
2352 */
2353 sc->sc_beacon_qi = *qinfo;
2354 return 0;
2355 }
2356
2357 ASSERT(sc->sc_txq[qnum].axq_qnum == qnum);
2358
2359 ath9k_hw_get_txq_props(ah, qnum, &qi);
2360 qi.tqi_aifs = qinfo->tqi_aifs;
2361 qi.tqi_cwmin = qinfo->tqi_cwmin;
2362 qi.tqi_cwmax = qinfo->tqi_cwmax;
2363 qi.tqi_burstTime = qinfo->tqi_burstTime;
2364 qi.tqi_readyTime = qinfo->tqi_readyTime;
2365
2366 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
2367 DPRINTF(sc, ATH_DBG_FATAL,
2368 "%s: unable to update hardware queue %u!\n",
2369 __func__, qnum);
2370 error = -EIO;
2371 } else {
2372 ath9k_hw_resettxqueue(ah, qnum); /* push to h/w */
2373 }
2374
2375 return error;
2376}
2377
2378int ath_cabq_update(struct ath_softc *sc)
2379{
2380 struct ath9k_tx_queue_info qi;
2381 int qnum = sc->sc_cabq->axq_qnum;
2382 struct ath_beacon_config conf;
2383
2384 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
2385 /*
2386 * Ensure the readytime % is within the bounds.
2387 */
2388 if (sc->sc_config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
2389 sc->sc_config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
2390 else if (sc->sc_config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
2391 sc->sc_config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
2392
2393 ath_get_beaconconfig(sc, ATH_IF_ID_ANY, &conf);
2394 qi.tqi_readyTime =
2395 (conf.beacon_interval * sc->sc_config.cabqReadytime) / 100;
2396 ath_txq_update(sc, qnum, &qi);
2397
2398 return 0;
2399}
2400
2401int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb)
2402{
2403 struct ath_tx_control txctl;
2404 int error = 0;
2405
2406 error = ath_tx_prepare(sc, skb, &txctl);
2407 if (error == 0)
2408 /*
2409 * Start DMA mapping.
2410 * ath_tx_start_dma() will be called either synchronously
2411 * or asynchrounsly once DMA is complete.
2412 */
2413 xmit_map_sg(sc, skb,
2414 get_dma_mem_context(&txctl, dmacontext),
2415 &txctl);
2416 else
2417 ath_node_put(sc, txctl.an, ATH9K_BH_STATUS_CHANGE);
2418
2419 /* failed packets will be dropped by the caller */
2420 return error;
2421}
2422
2423/* Deferred processing of transmit interrupt */
2424
2425void ath_tx_tasklet(struct ath_softc *sc)
2426{
2427 u64 tsf = ath9k_hw_gettsf64(sc->sc_ah);
2428 int i, nacked = 0;
2429 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
2430
2431 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
2432
2433 /*
2434 * Process each active queue.
2435 */
2436 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2437 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2438 nacked += ath_tx_processq(sc, &sc->sc_txq[i]);
2439 }
2440 if (nacked)
2441 sc->sc_lastrx = tsf;
2442}
2443
2444void ath_tx_draintxq(struct ath_softc *sc,
2445 struct ath_txq *txq, bool retry_tx)
2446{
2447 struct ath_buf *bf, *lastbf;
2448 struct list_head bf_head;
2449
2450 INIT_LIST_HEAD(&bf_head);
2451
2452 /*
2453 * NB: this assumes output has been stopped and
2454 * we do not need to block ath_tx_tasklet
2455 */
2456 for (;;) {
2457 spin_lock_bh(&txq->axq_lock);
2458
2459 if (list_empty(&txq->axq_q)) {
2460 txq->axq_link = NULL;
2461 txq->axq_linkbuf = NULL;
2462 spin_unlock_bh(&txq->axq_lock);
2463 break;
2464 }
2465
2466 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2467
2468 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
2469 list_del(&bf->list);
2470 spin_unlock_bh(&txq->axq_lock);
2471
2472 spin_lock_bh(&sc->sc_txbuflock);
2473 list_add_tail(&bf->list, &sc->sc_txbuf);
2474 spin_unlock_bh(&sc->sc_txbuflock);
2475 continue;
2476 }
2477
2478 lastbf = bf->bf_lastbf;
2479 if (!retry_tx)
2480 lastbf->bf_desc->ds_txstat.ts_flags =
2481 ATH9K_TX_SW_ABORTED;
2482
2483 /* remove ath_buf's of the same mpdu from txq */
2484 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
2485 txq->axq_depth--;
2486
2487 spin_unlock_bh(&txq->axq_lock);
2488
2489 if (bf->bf_isampdu)
2490 ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, 0);
2491 else
2492 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
2493 }
2494
2495 /* flush any pending frames if aggregation is enabled */
2496 if (sc->sc_txaggr) {
2497 if (!retry_tx) {
2498 spin_lock_bh(&txq->axq_lock);
2499 ath_txq_drain_pending_buffers(sc, txq,
2500 ATH9K_BH_STATUS_CHANGE);
2501 spin_unlock_bh(&txq->axq_lock);
2502 }
2503 }
2504}
2505
2506/* Drain the transmit queues and reclaim resources */
2507
2508void ath_draintxq(struct ath_softc *sc, bool retry_tx)
2509{
2510 /* stop beacon queue. The beacon will be freed when
2511 * we go to INIT state */
2512 if (!sc->sc_invalid) {
2513 (void) ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq);
2514 DPRINTF(sc, ATH_DBG_XMIT, "%s: beacon queue %x\n", __func__,
2515 ath9k_hw_gettxbuf(sc->sc_ah, sc->sc_bhalq));
2516 }
2517
2518 ath_drain_txdataq(sc, retry_tx);
2519}
2520
2521u32 ath_txq_depth(struct ath_softc *sc, int qnum)
2522{
2523 return sc->sc_txq[qnum].axq_depth;
2524}
2525
2526u32 ath_txq_aggr_depth(struct ath_softc *sc, int qnum)
2527{
2528 return sc->sc_txq[qnum].axq_aggr_depth;
2529}
2530
2531/* Check if an ADDBA is required. A valid node must be passed. */
2532enum ATH_AGGR_CHECK ath_tx_aggr_check(struct ath_softc *sc,
2533 struct ath_node *an,
2534 u8 tidno)
2535{
2536 struct ath_atx_tid *txtid;
2537 DECLARE_MAC_BUF(mac);
2538
2539 if (!sc->sc_txaggr)
2540 return AGGR_NOT_REQUIRED;
2541
2542 /* ADDBA exchange must be completed before sending aggregates */
2543 txtid = ATH_AN_2_TID(an, tidno);
2544
2545 if (txtid->addba_exchangecomplete)
2546 return AGGR_EXCHANGE_DONE;
2547
2548 if (txtid->cleanup_inprogress)
2549 return AGGR_CLEANUP_PROGRESS;
2550
2551 if (txtid->addba_exchangeinprogress)
2552 return AGGR_EXCHANGE_PROGRESS;
2553
2554 if (!txtid->addba_exchangecomplete) {
2555 if (!txtid->addba_exchangeinprogress &&
2556 (txtid->addba_exchangeattempts < ADDBA_EXCHANGE_ATTEMPTS)) {
2557 txtid->addba_exchangeattempts++;
2558 return AGGR_REQUIRED;
2559 }
2560 }
2561
2562 return AGGR_NOT_REQUIRED;
2563}
2564
2565/* Start TX aggregation */
2566
2567int ath_tx_aggr_start(struct ath_softc *sc,
2568 const u8 *addr,
2569 u16 tid,
2570 u16 *ssn)
2571{
2572 struct ath_atx_tid *txtid;
2573 struct ath_node *an;
2574
2575 spin_lock_bh(&sc->node_lock);
2576 an = ath_node_find(sc, (u8 *) addr);
2577 spin_unlock_bh(&sc->node_lock);
2578
2579 if (!an) {
2580 DPRINTF(sc, ATH_DBG_AGGR,
2581 "%s: Node not found to initialize "
2582 "TX aggregation\n", __func__);
2583 return -1;
2584 }
2585
2586 if (sc->sc_txaggr) {
2587 txtid = ATH_AN_2_TID(an, tid);
2588 txtid->addba_exchangeinprogress = 1;
2589 ath_tx_pause_tid(sc, txtid);
2590 }
2591
2592 return 0;
2593}
2594
2595/* Stop tx aggregation */
2596
2597int ath_tx_aggr_stop(struct ath_softc *sc,
2598 const u8 *addr,
2599 u16 tid)
2600{
2601 struct ath_node *an;
2602
2603 spin_lock_bh(&sc->node_lock);
2604 an = ath_node_find(sc, (u8 *) addr);
2605 spin_unlock_bh(&sc->node_lock);
2606
2607 if (!an) {
2608 DPRINTF(sc, ATH_DBG_AGGR,
2609 "%s: TX aggr stop for non-existent node\n", __func__);
2610 return -1;
2611 }
2612
2613 ath_tx_aggr_teardown(sc, an, tid);
2614 return 0;
2615}
2616
2617/*
2618 * Performs transmit side cleanup when TID changes from aggregated to
2619 * unaggregated.
2620 * - Pause the TID and mark cleanup in progress
2621 * - Discard all retry frames from the s/w queue.
2622 */
2623
2624void ath_tx_aggr_teardown(struct ath_softc *sc,
2625 struct ath_node *an, u8 tid)
2626{
2627 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
2628 struct ath_txq *txq = &sc->sc_txq[txtid->ac->qnum];
2629 struct ath_buf *bf;
2630 struct list_head bf_head;
2631 INIT_LIST_HEAD(&bf_head);
2632
2633 DPRINTF(sc, ATH_DBG_AGGR, "%s: teardown TX aggregation\n", __func__);
2634
2635 if (txtid->cleanup_inprogress) /* cleanup is in progress */
2636 return;
2637
2638 if (!txtid->addba_exchangecomplete) {
2639 txtid->addba_exchangeattempts = 0;
2640 return;
2641 }
2642
2643 /* TID must be paused first */
2644 ath_tx_pause_tid(sc, txtid);
2645
2646 /* drop all software retried frames and mark this TID */
2647 spin_lock_bh(&txq->axq_lock);
2648 while (!list_empty(&txtid->buf_q)) {
2649 bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
2650 if (!bf->bf_isretried) {
2651 /*
2652 * NB: it's based on the assumption that
2653 * software retried frame will always stay
2654 * at the head of software queue.
2655 */
2656 break;
2657 }
2658 list_cut_position(&bf_head,
2659 &txtid->buf_q, &bf->bf_lastfrm->list);
2660 ath_tx_update_baw(sc, txtid, bf->bf_seqno);
2661
2662 /* complete this sub-frame */
2663 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
2664 }
2665
2666 if (txtid->baw_head != txtid->baw_tail) {
2667 spin_unlock_bh(&txq->axq_lock);
2668 txtid->cleanup_inprogress = true;
2669 } else {
2670 txtid->addba_exchangecomplete = 0;
2671 txtid->addba_exchangeattempts = 0;
2672 spin_unlock_bh(&txq->axq_lock);
2673 ath_tx_flush_tid(sc, txtid);
2674 }
2675}
2676
2677/*
2678 * Tx scheduling logic
2679 * NB: must be called with txq lock held
2680 */
2681
2682void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
2683{
2684 struct ath_atx_ac *ac;
2685 struct ath_atx_tid *tid;
2686
2687 /* nothing to schedule */
2688 if (list_empty(&txq->axq_acq))
2689 return;
2690 /*
2691 * get the first node/ac pair on the queue
2692 */
2693 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
2694 list_del(&ac->list);
2695 ac->sched = false;
2696
2697 /*
2698 * process a single tid per destination
2699 */
2700 do {
2701 /* nothing to schedule */
2702 if (list_empty(&ac->tid_q))
2703 return;
2704
2705 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
2706 list_del(&tid->list);
2707 tid->sched = false;
2708
2709 if (tid->paused) /* check next tid to keep h/w busy */
2710 continue;
2711
2712 if (!(tid->an->an_smmode == ATH_SM_PWRSAV_DYNAMIC) ||
2713 ((txq->axq_depth % 2) == 0)) {
2714 ath_tx_sched_aggr(sc, txq, tid);
2715 }
2716
2717 /*
2718 * add tid to round-robin queue if more frames
2719 * are pending for the tid
2720 */
2721 if (!list_empty(&tid->buf_q))
2722 ath_tx_queue_tid(txq, tid);
2723
2724 /* only schedule one TID at a time */
2725 break;
2726 } while (!list_empty(&ac->tid_q));
2727
2728 /*
2729 * schedule AC if more TIDs need processing
2730 */
2731 if (!list_empty(&ac->tid_q)) {
2732 /*
2733 * add dest ac to txq if not already added
2734 */
2735 if (!ac->sched) {
2736 ac->sched = true;
2737 list_add_tail(&ac->list, &txq->axq_acq);
2738 }
2739 }
2740}
2741
2742/* Initialize per-node transmit state */
2743
2744void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2745{
2746 if (sc->sc_txaggr) {
2747 struct ath_atx_tid *tid;
2748 struct ath_atx_ac *ac;
2749 int tidno, acno;
2750
2751 sc->sc_ht_info.maxampdu = ATH_AMPDU_LIMIT_DEFAULT;
2752
2753 /*
2754 * Init per tid tx state
2755 */
2756 for (tidno = 0, tid = &an->an_aggr.tx.tid[tidno];
2757 tidno < WME_NUM_TID;
2758 tidno++, tid++) {
2759 tid->an = an;
2760 tid->tidno = tidno;
2761 tid->seq_start = tid->seq_next = 0;
2762 tid->baw_size = WME_MAX_BA;
2763 tid->baw_head = tid->baw_tail = 0;
2764 tid->sched = false;
2765 tid->paused = false;
2766 tid->cleanup_inprogress = false;
2767 INIT_LIST_HEAD(&tid->buf_q);
2768
2769 acno = TID_TO_WME_AC(tidno);
2770 tid->ac = &an->an_aggr.tx.ac[acno];
2771
2772 /* ADDBA state */
2773 tid->addba_exchangecomplete = 0;
2774 tid->addba_exchangeinprogress = 0;
2775 tid->addba_exchangeattempts = 0;
2776 }
2777
2778 /*
2779 * Init per ac tx state
2780 */
2781 for (acno = 0, ac = &an->an_aggr.tx.ac[acno];
2782 acno < WME_NUM_AC; acno++, ac++) {
2783 ac->sched = false;
2784 INIT_LIST_HEAD(&ac->tid_q);
2785
2786 switch (acno) {
2787 case WME_AC_BE:
2788 ac->qnum = ath_tx_get_qnum(sc,
2789 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
2790 break;
2791 case WME_AC_BK:
2792 ac->qnum = ath_tx_get_qnum(sc,
2793 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BK);
2794 break;
2795 case WME_AC_VI:
2796 ac->qnum = ath_tx_get_qnum(sc,
2797 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VI);
2798 break;
2799 case WME_AC_VO:
2800 ac->qnum = ath_tx_get_qnum(sc,
2801 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VO);
2802 break;
2803 }
2804 }
2805 }
2806}
2807
2808/* Cleanupthe pending buffers for the node. */
2809
2810void ath_tx_node_cleanup(struct ath_softc *sc,
2811 struct ath_node *an, bool bh_flag)
2812{
2813 int i;
2814 struct ath_atx_ac *ac, *ac_tmp;
2815 struct ath_atx_tid *tid, *tid_tmp;
2816 struct ath_txq *txq;
2817 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2818 if (ATH_TXQ_SETUP(sc, i)) {
2819 txq = &sc->sc_txq[i];
2820
2821 if (likely(bh_flag))
2822 spin_lock_bh(&txq->axq_lock);
2823 else
2824 spin_lock(&txq->axq_lock);
2825
2826 list_for_each_entry_safe(ac,
2827 ac_tmp, &txq->axq_acq, list) {
2828 tid = list_first_entry(&ac->tid_q,
2829 struct ath_atx_tid, list);
2830 if (tid && tid->an != an)
2831 continue;
2832 list_del(&ac->list);
2833 ac->sched = false;
2834
2835 list_for_each_entry_safe(tid,
2836 tid_tmp, &ac->tid_q, list) {
2837 list_del(&tid->list);
2838 tid->sched = false;
2839 ath_tid_drain(sc, txq, tid, bh_flag);
2840 tid->addba_exchangecomplete = 0;
2841 tid->addba_exchangeattempts = 0;
2842 tid->cleanup_inprogress = false;
2843 }
2844 }
2845
2846 if (likely(bh_flag))
2847 spin_unlock_bh(&txq->axq_lock);
2848 else
2849 spin_unlock(&txq->axq_lock);
2850 }
2851 }
2852}
2853
2854/* Cleanup per node transmit state */
2855
2856void ath_tx_node_free(struct ath_softc *sc, struct ath_node *an)
2857{
2858 if (sc->sc_txaggr) {
2859 struct ath_atx_tid *tid;
2860 int tidno, i;
2861
2862 /* Init per tid rx state */
2863 for (tidno = 0, tid = &an->an_aggr.tx.tid[tidno];
2864 tidno < WME_NUM_TID;
2865 tidno++, tid++) {
2866
2867 for (i = 0; i < ATH_TID_MAX_BUFS; i++)
2868 ASSERT(tid->tx_buf[i] == NULL);
2869 }
2870 }
2871}
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index e78319aa47c1..3bf3a869361f 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -4645,8 +4645,7 @@ static int b43_wireless_init(struct ssb_device *dev)
4645 } 4645 }
4646 4646
4647 /* fill hw info */ 4647 /* fill hw info */
4648 hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE | 4648 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
4649 IEEE80211_HW_RX_INCLUDES_FCS |
4650 IEEE80211_HW_SIGNAL_DBM | 4649 IEEE80211_HW_SIGNAL_DBM |
4651 IEEE80211_HW_NOISE_DBM; 4650 IEEE80211_HW_NOISE_DBM;
4652 4651
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 8d54502222a6..9dda8169f7cc 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -192,7 +192,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
192 const struct b43_phy *phy = &dev->phy; 192 const struct b43_phy *phy = &dev->phy;
193 const struct ieee80211_hdr *wlhdr = 193 const struct ieee80211_hdr *wlhdr =
194 (const struct ieee80211_hdr *)fragment_data; 194 (const struct ieee80211_hdr *)fragment_data;
195 int use_encryption = (!(info->flags & IEEE80211_TX_CTL_DO_NOT_ENCRYPT)); 195 int use_encryption = !!info->control.hw_key;
196 __le16 fctl = wlhdr->frame_control; 196 __le16 fctl = wlhdr->frame_control;
197 struct ieee80211_rate *fbrate; 197 struct ieee80211_rate *fbrate;
198 u8 rate, rate_fb; 198 u8 rate, rate_fb;
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index a1b8bf3ee732..2541c81932f0 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -3702,8 +3702,7 @@ static int b43legacy_wireless_init(struct ssb_device *dev)
3702 } 3702 }
3703 3703
3704 /* fill hw info */ 3704 /* fill hw info */
3705 hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE | 3705 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
3706 IEEE80211_HW_RX_INCLUDES_FCS |
3707 IEEE80211_HW_SIGNAL_DBM | 3706 IEEE80211_HW_SIGNAL_DBM |
3708 IEEE80211_HW_NOISE_DBM; 3707 IEEE80211_HW_NOISE_DBM;
3709 hw->queues = 1; /* FIXME: hardware has more queues */ 3708 hw->queues = 1; /* FIXME: hardware has more queues */
@@ -3846,10 +3845,10 @@ static int b43legacy_resume(struct ssb_device *dev)
3846 goto out; 3845 goto out;
3847 } 3846 }
3848 } 3847 }
3849 mutex_unlock(&wl->mutex);
3850 3848
3851 b43legacydbg(wl, "Device resumed.\n"); 3849 b43legacydbg(wl, "Device resumed.\n");
3852out: 3850out:
3851 mutex_unlock(&wl->mutex);
3853 return err; 3852 return err;
3854} 3853}
3855 3854
diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c
index e969ed8d412d..68e1f8c78727 100644
--- a/drivers/net/wireless/b43legacy/xmit.c
+++ b/drivers/net/wireless/b43legacy/xmit.c
@@ -192,7 +192,7 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
192 u16 cookie) 192 u16 cookie)
193{ 193{
194 const struct ieee80211_hdr *wlhdr; 194 const struct ieee80211_hdr *wlhdr;
195 int use_encryption = (!(info->flags & IEEE80211_TX_CTL_DO_NOT_ENCRYPT)); 195 int use_encryption = !!info->control.hw_key;
196 u16 fctl; 196 u16 fctl;
197 u8 rate; 197 u8 rate;
198 struct ieee80211_rate *rate_fb; 198 struct ieee80211_rate *rate_fb;
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c
index 5bf9e00b070c..c6f886ec08a3 100644
--- a/drivers/net/wireless/ipw2100.c
+++ b/drivers/net/wireless/ipw2100.c
@@ -6442,6 +6442,7 @@ static int ipw2100_resume(struct pci_dev *pci_dev)
6442 if (err) { 6442 if (err) {
6443 printk(KERN_ERR "%s: pci_enable_device failed on resume\n", 6443 printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
6444 dev->name); 6444 dev->name);
6445 mutex_unlock(&priv->action_mutex);
6445 return err; 6446 return err;
6446 } 6447 }
6447 pci_restore_state(pci_dev); 6448 pci_restore_state(pci_dev);
@@ -7146,7 +7147,7 @@ static int ipw2100_wx_get_rate(struct net_device *dev,
7146 err = ipw2100_get_ordinal(priv, IPW_ORD_CURRENT_TX_RATE, &val, &len); 7147 err = ipw2100_get_ordinal(priv, IPW_ORD_CURRENT_TX_RATE, &val, &len);
7147 if (err) { 7148 if (err) {
7148 IPW_DEBUG_WX("failed querying ordinals.\n"); 7149 IPW_DEBUG_WX("failed querying ordinals.\n");
7149 return err; 7150 goto done;
7150 } 7151 }
7151 7152
7152 switch (val & TX_RATE_MASK) { 7153 switch (val & TX_RATE_MASK) {
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index 1acfbcd3703c..36e8d2f6e7b4 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -305,9 +305,10 @@ static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
305#define _ipw_write8(ipw, ofs, val) writeb((val), (ipw)->hw_base + (ofs)) 305#define _ipw_write8(ipw, ofs, val) writeb((val), (ipw)->hw_base + (ofs))
306 306
307/* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */ 307/* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
308#define ipw_write8(ipw, ofs, val) \ 308#define ipw_write8(ipw, ofs, val) do { \
309 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \ 309 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
310 _ipw_write8(ipw, ofs, val) 310 _ipw_write8(ipw, ofs, val); \
311 } while (0)
311 312
312/* 16-bit direct write (low 4K) */ 313/* 16-bit direct write (low 4K) */
313#define _ipw_write16(ipw, ofs, val) writew((val), (ipw)->hw_base + (ofs)) 314#define _ipw_write16(ipw, ofs, val) writew((val), (ipw)->hw_base + (ofs))
@@ -11946,7 +11947,7 @@ module_param(auto_create, int, 0444);
11946MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)"); 11947MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
11947 11948
11948module_param(led, int, 0444); 11949module_param(led, int, 0444);
11949MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)\n"); 11950MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)");
11950 11951
11951module_param(debug, int, 0444); 11952module_param(debug, int, 0444);
11952MODULE_PARM_DESC(debug, "debug output mask"); 11953MODULE_PARM_DESC(debug, "debug output mask");
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 82b66a3d3a5d..b0ac0ce3fb9f 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -14,18 +14,49 @@ config IWLWIFI_LEDS
14 default n 14 default n
15 15
16config IWLWIFI_RFKILL 16config IWLWIFI_RFKILL
17 boolean "IWLWIFI RF kill support" 17 boolean "Iwlwifi RF kill support"
18 depends on IWLCORE 18 depends on IWLCORE
19 19
20config IWL4965 20config IWLWIFI_DEBUG
21 tristate "Intel Wireless WiFi 4965AGN" 21 bool "Enable full debugging output in iwlagn driver"
22 depends on IWLCORE
23 ---help---
24 This option will enable debug tracing output for the iwlwifi drivers
25
26 This will result in the kernel module being ~100k larger. You can
27 control which debug output is sent to the kernel log by setting the
28 value in
29
30 /sys/class/net/wlan0/device/debug_level
31
32 This entry will only exist if this option is enabled.
33
34 To set a value, simply echo an 8-byte hex value to the same file:
35
36 % echo 0x43fff > /sys/class/net/wlan0/device/debug_level
37
38 You can find the list of debug mask values in:
39 drivers/net/wireless/iwlwifi/iwl-debug.h
40
41 If this is your first time using this driver, you should say Y here
42 as the debug information can assist others in helping you resolve
43 any problems you may encounter.
44
45config IWLWIFI_DEBUGFS
46 bool "Iwlwifi debugfs support"
47 depends on IWLCORE && IWLWIFI_DEBUG && MAC80211_DEBUGFS
48 ---help---
49 Enable creation of debugfs files for the iwlwifi drivers.
50
51config IWLAGN
52 tristate "Intel Wireless WiFi Next Gen AGN"
22 depends on PCI && MAC80211 && WLAN_80211 && EXPERIMENTAL 53 depends on PCI && MAC80211 && WLAN_80211 && EXPERIMENTAL
23 select FW_LOADER 54 select FW_LOADER
24 select IWLCORE 55 select IWLCORE
25 ---help--- 56 ---help---
26 Select to build the driver supporting the: 57 Select to build the driver supporting the:
27 58
28 Intel Wireless WiFi Link 4965AGN 59 Intel Wireless WiFi Link Next-Gen AGN
29 60
30 This driver uses the kernel's mac80211 subsystem. 61 This driver uses the kernel's mac80211 subsystem.
31 62
@@ -42,60 +73,33 @@ config IWL4965
42 If you want to compile the driver as a module ( = code which can be 73 If you want to compile the driver as a module ( = code which can be
43 inserted in and removed from the running kernel whenever you want), 74 inserted in and removed from the running kernel whenever you want),
44 say M here and read <file:Documentation/kbuild/modules.txt>. The 75 say M here and read <file:Documentation/kbuild/modules.txt>. The
45 module will be called iwl4965.ko. 76 module will be called iwlagn.ko.
46
47config IWL4965_LEDS
48 bool "Enable LEDS features in iwl4965 driver"
49 depends on IWL4965
50 select IWLWIFI_LEDS
51 ---help---
52 This option enables LEDS for the iwlwifi drivers
53 77
54 78config IWLAGN_SPECTRUM_MEASUREMENT
55config IWL4965_SPECTRUM_MEASUREMENT 79 bool "Enable Spectrum Measurement in iwlagn driver"
56 bool "Enable Spectrum Measurement in iwl4965 driver" 80 depends on IWLAGN
57 depends on IWL4965
58 ---help--- 81 ---help---
59 This option will enable spectrum measurement for the iwl4965 driver. 82 This option will enable spectrum measurement for the iwlagn driver.
60 83
61config IWLWIFI_DEBUG 84config IWLAGN_LEDS
62 bool "Enable full debugging output in iwl4965 driver" 85 bool "Enable LEDS features in iwlagn driver"
63 depends on IWL4965 86 depends on IWLAGN
87 select IWLWIFI_LEDS
64 ---help--- 88 ---help---
65 This option will enable debug tracing output for the iwl4965 89 This option enables LEDS for the iwlagn drivers
66 driver.
67
68 This will result in the kernel module being ~100k larger. You can
69 control which debug output is sent to the kernel log by setting the
70 value in
71
72 /sys/class/net/wlan0/device/debug_level
73
74 This entry will only exist if this option is enabled.
75
76 To set a value, simply echo an 8-byte hex value to the same file:
77
78 % echo 0x43fff > /sys/class/net/wlan0/device/debug_level
79 90
80 You can find the list of debug mask values in:
81 drivers/net/wireless/iwlwifi/iwl-4965-debug.h
82 91
83 If this is your first time using this driver, you should say Y here 92config IWL4965
84 as the debug information can assist others in helping you resolve 93 bool "Intel Wireless WiFi 4965AGN"
85 any problems you may encounter. 94 depends on IWLAGN
95 ---help---
96 This option enables support for Intel Wireless WiFi Link 4965AGN
86 97
87config IWL5000 98config IWL5000
88 bool "Intel Wireless WiFi 5000AGN" 99 bool "Intel Wireless WiFi 5000AGN"
89 depends on IWL4965 100 depends on IWLAGN
90 ---help--- 101 ---help---
91 This option enables support for Intel Wireless WiFi Link 5000AGN Family 102 This option enables support for Intel Wireless WiFi Link 5000AGN Family
92 Dependency on 4965 is temporary
93
94config IWLWIFI_DEBUGFS
95 bool "Iwlwifi debugfs support"
96 depends on IWLCORE && IWLWIFI_DEBUG && MAC80211_DEBUGFS
97 ---help---
98 Enable creation of debugfs files for the iwlwifi drivers.
99 103
100config IWL3945 104config IWL3945
101 tristate "Intel PRO/Wireless 3945ABG/BG Network Connection" 105 tristate "Intel PRO/Wireless 3945ABG/BG Network Connection"
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 1f52b92f08b5..47aa28f6a513 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -6,15 +6,14 @@ iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
6iwlcore-$(CONFIG_IWLWIFI_LEDS) += iwl-led.o 6iwlcore-$(CONFIG_IWLWIFI_LEDS) += iwl-led.o
7iwlcore-$(CONFIG_IWLWIFI_RFKILL) += iwl-rfkill.o 7iwlcore-$(CONFIG_IWLWIFI_RFKILL) += iwl-rfkill.o
8 8
9obj-$(CONFIG_IWLAGN) += iwlagn.o
10iwlagn-objs := iwl-agn.o iwl-agn-rs.o
11
12iwlagn-$(CONFIG_IWL4965) += iwl-4965.o
13iwlagn-$(CONFIG_IWL5000) += iwl-5000.o
14
9obj-$(CONFIG_IWL3945) += iwl3945.o 15obj-$(CONFIG_IWL3945) += iwl3945.o
10iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o 16iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o
11iwl3945-$(CONFIG_IWL3945_LEDS) += iwl-3945-led.o 17iwl3945-$(CONFIG_IWL3945_LEDS) += iwl-3945-led.o
12 18
13obj-$(CONFIG_IWL4965) += iwl4965.o
14iwl4965-objs := iwl4965-base.o iwl-4965.o iwl-4965-rs.o
15
16ifeq ($(CONFIG_IWL5000),y)
17 iwl4965-objs += iwl-5000.o
18endif
19
20 19
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.c b/drivers/net/wireless/iwlwifi/iwl-3945-led.c
index 6be1fe13fa57..d3336966b6b5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-led.c
@@ -206,12 +206,12 @@ static void iwl3945_led_brightness_set(struct led_classdev *led_cdev,
206static int iwl3945_led_register_led(struct iwl3945_priv *priv, 206static int iwl3945_led_register_led(struct iwl3945_priv *priv,
207 struct iwl3945_led *led, 207 struct iwl3945_led *led,
208 enum led_type type, u8 set_led, 208 enum led_type type, u8 set_led,
209 const char *name, char *trigger) 209 char *trigger)
210{ 210{
211 struct device *device = wiphy_dev(priv->hw->wiphy); 211 struct device *device = wiphy_dev(priv->hw->wiphy);
212 int ret; 212 int ret;
213 213
214 led->led_dev.name = name; 214 led->led_dev.name = led->name;
215 led->led_dev.brightness_set = iwl3945_led_brightness_set; 215 led->led_dev.brightness_set = iwl3945_led_brightness_set;
216 led->led_dev.default_trigger = trigger; 216 led->led_dev.default_trigger = trigger;
217 217
@@ -308,7 +308,6 @@ void iwl3945_led_background(struct iwl3945_priv *priv)
308int iwl3945_led_register(struct iwl3945_priv *priv) 308int iwl3945_led_register(struct iwl3945_priv *priv)
309{ 309{
310 char *trigger; 310 char *trigger;
311 char name[32];
312 int ret; 311 int ret;
313 312
314 priv->last_blink_rate = 0; 313 priv->last_blink_rate = 0;
@@ -318,7 +317,8 @@ int iwl3945_led_register(struct iwl3945_priv *priv)
318 priv->allow_blinking = 0; 317 priv->allow_blinking = 0;
319 318
320 trigger = ieee80211_get_radio_led_name(priv->hw); 319 trigger = ieee80211_get_radio_led_name(priv->hw);
321 snprintf(name, sizeof(name), "iwl-%s:radio", 320 snprintf(priv->led[IWL_LED_TRG_RADIO].name,
321 sizeof(priv->led[IWL_LED_TRG_RADIO].name), "iwl-%s:radio",
322 wiphy_name(priv->hw->wiphy)); 322 wiphy_name(priv->hw->wiphy));
323 323
324 priv->led[IWL_LED_TRG_RADIO].led_on = iwl3945_led_on; 324 priv->led[IWL_LED_TRG_RADIO].led_on = iwl3945_led_on;
@@ -327,19 +327,20 @@ int iwl3945_led_register(struct iwl3945_priv *priv)
327 327
328 ret = iwl3945_led_register_led(priv, 328 ret = iwl3945_led_register_led(priv,
329 &priv->led[IWL_LED_TRG_RADIO], 329 &priv->led[IWL_LED_TRG_RADIO],
330 IWL_LED_TRG_RADIO, 1, 330 IWL_LED_TRG_RADIO, 1, trigger);
331 name, trigger); 331
332 if (ret) 332 if (ret)
333 goto exit_fail; 333 goto exit_fail;
334 334
335 trigger = ieee80211_get_assoc_led_name(priv->hw); 335 trigger = ieee80211_get_assoc_led_name(priv->hw);
336 snprintf(name, sizeof(name), "iwl-%s:assoc", 336 snprintf(priv->led[IWL_LED_TRG_ASSOC].name,
337 sizeof(priv->led[IWL_LED_TRG_ASSOC].name), "iwl-%s:assoc",
337 wiphy_name(priv->hw->wiphy)); 338 wiphy_name(priv->hw->wiphy));
338 339
339 ret = iwl3945_led_register_led(priv, 340 ret = iwl3945_led_register_led(priv,
340 &priv->led[IWL_LED_TRG_ASSOC], 341 &priv->led[IWL_LED_TRG_ASSOC],
341 IWL_LED_TRG_ASSOC, 0, 342 IWL_LED_TRG_ASSOC, 0, trigger);
342 name, trigger); 343
343 /* for assoc always turn led on */ 344 /* for assoc always turn led on */
344 priv->led[IWL_LED_TRG_ASSOC].led_on = iwl3945_led_on; 345 priv->led[IWL_LED_TRG_ASSOC].led_on = iwl3945_led_on;
345 priv->led[IWL_LED_TRG_ASSOC].led_off = iwl3945_led_on; 346 priv->led[IWL_LED_TRG_ASSOC].led_off = iwl3945_led_on;
@@ -349,14 +350,13 @@ int iwl3945_led_register(struct iwl3945_priv *priv)
349 goto exit_fail; 350 goto exit_fail;
350 351
351 trigger = ieee80211_get_rx_led_name(priv->hw); 352 trigger = ieee80211_get_rx_led_name(priv->hw);
352 snprintf(name, sizeof(name), "iwl-%s:RX", 353 snprintf(priv->led[IWL_LED_TRG_RX].name,
354 sizeof(priv->led[IWL_LED_TRG_RX].name), "iwl-%s:RX",
353 wiphy_name(priv->hw->wiphy)); 355 wiphy_name(priv->hw->wiphy));
354 356
355
356 ret = iwl3945_led_register_led(priv, 357 ret = iwl3945_led_register_led(priv,
357 &priv->led[IWL_LED_TRG_RX], 358 &priv->led[IWL_LED_TRG_RX],
358 IWL_LED_TRG_RX, 0, 359 IWL_LED_TRG_RX, 0, trigger);
359 name, trigger);
360 360
361 priv->led[IWL_LED_TRG_RX].led_on = iwl3945_led_associated; 361 priv->led[IWL_LED_TRG_RX].led_on = iwl3945_led_associated;
362 priv->led[IWL_LED_TRG_RX].led_off = iwl3945_led_associated; 362 priv->led[IWL_LED_TRG_RX].led_off = iwl3945_led_associated;
@@ -366,13 +366,14 @@ int iwl3945_led_register(struct iwl3945_priv *priv)
366 goto exit_fail; 366 goto exit_fail;
367 367
368 trigger = ieee80211_get_tx_led_name(priv->hw); 368 trigger = ieee80211_get_tx_led_name(priv->hw);
369 snprintf(name, sizeof(name), "iwl-%s:TX", 369 snprintf(priv->led[IWL_LED_TRG_TX].name,
370 sizeof(priv->led[IWL_LED_TRG_TX].name), "iwl-%s:TX",
370 wiphy_name(priv->hw->wiphy)); 371 wiphy_name(priv->hw->wiphy));
371 372
372 ret = iwl3945_led_register_led(priv, 373 ret = iwl3945_led_register_led(priv,
373 &priv->led[IWL_LED_TRG_TX], 374 &priv->led[IWL_LED_TRG_TX],
374 IWL_LED_TRG_TX, 0, 375 IWL_LED_TRG_TX, 0, trigger);
375 name, trigger); 376
376 priv->led[IWL_LED_TRG_TX].led_on = iwl3945_led_associated; 377 priv->led[IWL_LED_TRG_TX].led_on = iwl3945_led_associated;
377 priv->led[IWL_LED_TRG_TX].led_off = iwl3945_led_associated; 378 priv->led[IWL_LED_TRG_TX].led_off = iwl3945_led_associated;
378 priv->led[IWL_LED_TRG_TX].led_pattern = iwl3945_led_pattern; 379 priv->led[IWL_LED_TRG_TX].led_pattern = iwl3945_led_pattern;
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.h b/drivers/net/wireless/iwlwifi/iwl-3945-led.h
index 47b7e0bac802..2fbd126c1347 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-led.h
@@ -50,6 +50,7 @@ enum led_type {
50struct iwl3945_led { 50struct iwl3945_led {
51 struct iwl3945_priv *priv; 51 struct iwl3945_priv *priv;
52 struct led_classdev led_dev; 52 struct led_classdev led_dev;
53 char name[32];
53 54
54 int (*led_on) (struct iwl3945_priv *priv, int led_id); 55 int (*led_on) (struct iwl3945_priv *priv, int led_id);
55 int (*led_off) (struct iwl3945_priv *priv, int led_id); 56 int (*led_off) (struct iwl3945_priv *priv, int led_id);
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index c2a76785b665..b3931f6135a4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -630,7 +630,9 @@ static void iwl3945_pass_packet_to_mac80211(struct iwl3945_priv *priv,
630 struct ieee80211_rx_status *stats) 630 struct ieee80211_rx_status *stats)
631{ 631{
632 struct iwl3945_rx_packet *pkt = (struct iwl3945_rx_packet *)rxb->skb->data; 632 struct iwl3945_rx_packet *pkt = (struct iwl3945_rx_packet *)rxb->skb->data;
633#ifdef CONFIG_IWL3945_LEDS
633 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IWL_RX_DATA(pkt); 634 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
635#endif
634 struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt); 636 struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
635 struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt); 637 struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
636 short len = le16_to_cpu(rx_hdr->len); 638 short len = le16_to_cpu(rx_hdr->len);
@@ -708,10 +710,7 @@ static void iwl3945_rx_reply_rx(struct iwl3945_priv *priv,
708 return; 710 return;
709 } 711 }
710 712
711 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) { 713
712 iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status);
713 return;
714 }
715 714
716 /* Convert 3945's rssi indicator to dBm */ 715 /* Convert 3945's rssi indicator to dBm */
717 rx_status.signal = rx_stats->rssi - IWL_RSSI_OFFSET; 716 rx_status.signal = rx_stats->rssi - IWL_RSSI_OFFSET;
@@ -773,6 +772,11 @@ static void iwl3945_rx_reply_rx(struct iwl3945_priv *priv,
773 priv->last_rx_noise = rx_status.noise; 772 priv->last_rx_noise = rx_status.noise;
774 } 773 }
775 774
775 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) {
776 iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status);
777 return;
778 }
779
776 switch (le16_to_cpu(header->frame_control) & IEEE80211_FCTL_FTYPE) { 780 switch (le16_to_cpu(header->frame_control) & IEEE80211_FCTL_FTYPE) {
777 case IEEE80211_FTYPE_MGMT: 781 case IEEE80211_FTYPE_MGMT:
778 switch (le16_to_cpu(header->frame_control) & 782 switch (le16_to_cpu(header->frame_control) &
@@ -791,8 +795,7 @@ static void iwl3945_rx_reply_rx(struct iwl3945_priv *priv,
791 struct ieee80211_mgmt *mgmt = 795 struct ieee80211_mgmt *mgmt =
792 (struct ieee80211_mgmt *)header; 796 (struct ieee80211_mgmt *)header;
793 __le32 *pos; 797 __le32 *pos;
794 pos = 798 pos = (__le32 *)&mgmt->u.beacon.
795 (__le32 *) & mgmt->u.beacon.
796 timestamp; 799 timestamp;
797 priv->timestamp0 = le32_to_cpu(pos[0]); 800 priv->timestamp0 = le32_to_cpu(pos[0]);
798 priv->timestamp1 = le32_to_cpu(pos[1]); 801 priv->timestamp1 = le32_to_cpu(pos[1]);
@@ -1505,7 +1508,7 @@ static int iwl3945_hw_reg_adjust_power_by_temp(int new_reading, int old_reading)
1505 */ 1508 */
1506static inline int iwl3945_hw_reg_temp_out_of_range(int temperature) 1509static inline int iwl3945_hw_reg_temp_out_of_range(int temperature)
1507{ 1510{
1508 return (((temperature < -260) || (temperature > 25)) ? 1 : 0); 1511 return ((temperature < -260) || (temperature > 25)) ? 1 : 0;
1509} 1512}
1510 1513
1511int iwl3945_hw_get_temperature(struct iwl3945_priv *priv) 1514int iwl3945_hw_get_temperature(struct iwl3945_priv *priv)
@@ -2626,7 +2629,7 @@ unsigned int iwl3945_hw_get_beacon_cmd(struct iwl3945_priv *priv,
2626 tx_beacon_cmd->tx.supp_rates[1] = 2629 tx_beacon_cmd->tx.supp_rates[1] =
2627 (IWL_CCK_BASIC_RATES_MASK & 0xF); 2630 (IWL_CCK_BASIC_RATES_MASK & 0xF);
2628 2631
2629 return (sizeof(struct iwl3945_tx_beacon_cmd) + frame_size); 2632 return sizeof(struct iwl3945_tx_beacon_cmd) + frame_size;
2630} 2633}
2631 2634
2632void iwl3945_hw_rx_handler_setup(struct iwl3945_priv *priv) 2635void iwl3945_hw_rx_handler_setup(struct iwl3945_priv *priv)
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 9afecb813716..22bb26985c2e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -341,39 +341,6 @@ err:
341 return -EINVAL; 341 return -EINVAL;
342 342
343} 343}
344int iwl4965_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src)
345{
346 int ret;
347 unsigned long flags;
348
349 spin_lock_irqsave(&priv->lock, flags);
350 ret = iwl_grab_nic_access(priv);
351 if (ret) {
352 spin_unlock_irqrestore(&priv->lock, flags);
353 return ret;
354 }
355
356 if (src == IWL_PWR_SRC_VAUX) {
357 u32 val;
358 ret = pci_read_config_dword(priv->pci_dev, PCI_POWER_SOURCE,
359 &val);
360
361 if (val & PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT) {
362 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
363 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
364 ~APMG_PS_CTRL_MSK_PWR_SRC);
365 }
366 } else {
367 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
368 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
369 ~APMG_PS_CTRL_MSK_PWR_SRC);
370 }
371
372 iwl_release_nic_access(priv);
373 spin_unlock_irqrestore(&priv->lock, flags);
374
375 return ret;
376}
377 344
378/* 345/*
379 * Activate/Deactivat Tx DMA/FIFO channels according tx fifos mask 346 * Activate/Deactivat Tx DMA/FIFO channels according tx fifos mask
@@ -875,18 +842,6 @@ static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
875 return 0; 842 return 0;
876} 843}
877 844
878/* set card power command */
879static int iwl4965_set_power(struct iwl_priv *priv,
880 void *cmd)
881{
882 int ret = 0;
883
884 ret = iwl_send_cmd_pdu_async(priv, POWER_TABLE_CMD,
885 sizeof(struct iwl4965_powertable_cmd),
886 cmd, NULL);
887 return ret;
888}
889
890static s32 iwl4965_math_div_round(s32 num, s32 denom, s32 *res) 845static s32 iwl4965_math_div_round(s32 num, s32 denom, s32 *res)
891{ 846{
892 s32 sign = 1; 847 s32 sign = 1;
@@ -1560,11 +1515,11 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
1560 c, atten_value, power_index, 1515 c, atten_value, power_index,
1561 tx_power.s.radio_tx_gain[c], 1516 tx_power.s.radio_tx_gain[c],
1562 tx_power.s.dsp_predis_atten[c]); 1517 tx_power.s.dsp_predis_atten[c]);
1563 }/* for each chain */ 1518 } /* for each chain */
1564 1519
1565 tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw); 1520 tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw);
1566 1521
1567 }/* for each rate */ 1522 } /* for each rate */
1568 1523
1569 return 0; 1524 return 0;
1570} 1525}
@@ -1701,38 +1656,6 @@ static int iwl4965_shared_mem_rx_idx(struct iwl_priv *priv)
1701 return le32_to_cpu(s->rb_closed) & 0xFFF; 1656 return le32_to_cpu(s->rb_closed) & 0xFFF;
1702} 1657}
1703 1658
1704unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
1705 struct iwl_frame *frame, u8 rate)
1706{
1707 struct iwl4965_tx_beacon_cmd *tx_beacon_cmd;
1708 unsigned int frame_size;
1709
1710 tx_beacon_cmd = &frame->u.beacon;
1711 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
1712
1713 tx_beacon_cmd->tx.sta_id = priv->hw_params.bcast_sta_id;
1714 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
1715
1716 frame_size = iwl4965_fill_beacon_frame(priv,
1717 tx_beacon_cmd->frame,
1718 iwl_bcast_addr,
1719 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
1720
1721 BUG_ON(frame_size > MAX_MPDU_SIZE);
1722 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
1723
1724 if ((rate == IWL_RATE_1M_PLCP) || (rate >= IWL_RATE_2M_PLCP))
1725 tx_beacon_cmd->tx.rate_n_flags =
1726 iwl_hw_set_rate_n_flags(rate, RATE_MCS_CCK_MSK);
1727 else
1728 tx_beacon_cmd->tx.rate_n_flags =
1729 iwl_hw_set_rate_n_flags(rate, 0);
1730
1731 tx_beacon_cmd->tx.tx_flags = (TX_CMD_FLG_SEQ_CTL_MSK |
1732 TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK);
1733 return (sizeof(*tx_beacon_cmd) + frame_size);
1734}
1735
1736static int iwl4965_alloc_shared_mem(struct iwl_priv *priv) 1659static int iwl4965_alloc_shared_mem(struct iwl_priv *priv)
1737{ 1660{
1738 priv->shared_virt = pci_alloc_consistent(priv->pci_dev, 1661 priv->shared_virt = pci_alloc_consistent(priv->pci_dev,
@@ -2079,39 +2002,6 @@ static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
2079 return 0; 2002 return 0;
2080} 2003}
2081 2004
2082int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
2083 enum ieee80211_ampdu_mlme_action action,
2084 const u8 *addr, u16 tid, u16 *ssn)
2085{
2086 struct iwl_priv *priv = hw->priv;
2087 DECLARE_MAC_BUF(mac);
2088
2089 IWL_DEBUG_HT("A-MPDU action on addr %s tid %d\n",
2090 print_mac(mac, addr), tid);
2091
2092 if (!(priv->cfg->sku & IWL_SKU_N))
2093 return -EACCES;
2094
2095 switch (action) {
2096 case IEEE80211_AMPDU_RX_START:
2097 IWL_DEBUG_HT("start Rx\n");
2098 return iwl_rx_agg_start(priv, addr, tid, *ssn);
2099 case IEEE80211_AMPDU_RX_STOP:
2100 IWL_DEBUG_HT("stop Rx\n");
2101 return iwl_rx_agg_stop(priv, addr, tid);
2102 case IEEE80211_AMPDU_TX_START:
2103 IWL_DEBUG_HT("start Tx\n");
2104 return iwl_tx_agg_start(priv, addr, tid, ssn);
2105 case IEEE80211_AMPDU_TX_STOP:
2106 IWL_DEBUG_HT("stop Tx\n");
2107 return iwl_tx_agg_stop(priv, addr, tid);
2108 default:
2109 IWL_DEBUG_HT("unknown\n");
2110 return -EINVAL;
2111 break;
2112 }
2113 return 0;
2114}
2115 2005
2116static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len) 2006static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len)
2117{ 2007{
@@ -2240,9 +2130,9 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
2240 bitmap = bitmap << sh; 2130 bitmap = bitmap << sh;
2241 sh = 0; 2131 sh = 0;
2242 } 2132 }
2243 bitmap |= (1 << sh); 2133 bitmap |= 1ULL << sh;
2244 IWL_DEBUG_TX_REPLY("start=%d bitmap=0x%x\n", 2134 IWL_DEBUG_TX_REPLY("start=%d bitmap=0x%llx\n",
2245 start, (u32)(bitmap & 0xFFFFFFFF)); 2135 start, (unsigned long long)bitmap);
2246 } 2136 }
2247 2137
2248 agg->bitmap = bitmap; 2138 agg->bitmap = bitmap;
@@ -2368,6 +2258,40 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2368 IWL_ERROR("TODO: Implement Tx ABORT REQUIRED!!!\n"); 2258 IWL_ERROR("TODO: Implement Tx ABORT REQUIRED!!!\n");
2369} 2259}
2370 2260
2261static int iwl4965_calc_rssi(struct iwl_priv *priv,
2262 struct iwl_rx_phy_res *rx_resp)
2263{
2264 /* data from PHY/DSP regarding signal strength, etc.,
2265 * contents are always there, not configurable by host. */
2266 struct iwl4965_rx_non_cfg_phy *ncphy =
2267 (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
2268 u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL49_AGC_DB_MASK)
2269 >> IWL49_AGC_DB_POS;
2270
2271 u32 valid_antennae =
2272 (le16_to_cpu(rx_resp->phy_flags) & IWL49_RX_PHY_FLAGS_ANTENNAE_MASK)
2273 >> IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
2274 u8 max_rssi = 0;
2275 u32 i;
2276
2277 /* Find max rssi among 3 possible receivers.
2278 * These values are measured by the digital signal processor (DSP).
2279 * They should stay fairly constant even as the signal strength varies,
2280 * if the radio's automatic gain control (AGC) is working right.
2281 * AGC value (see below) will provide the "interesting" info. */
2282 for (i = 0; i < 3; i++)
2283 if (valid_antennae & (1 << i))
2284 max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
2285
2286 IWL_DEBUG_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n",
2287 ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
2288 max_rssi, agc);
2289
2290 /* dBm = max_rssi dB - agc dB - constant.
2291 * Higher AGC (higher radio gain) means lower signal. */
2292 return max_rssi - agc - IWL_RSSI_OFFSET;
2293}
2294
2371 2295
2372/* Set up 4965-specific Rx frame reply handlers */ 2296/* Set up 4965-specific Rx frame reply handlers */
2373static void iwl4965_rx_handler_setup(struct iwl_priv *priv) 2297static void iwl4965_rx_handler_setup(struct iwl_priv *priv)
@@ -2399,6 +2323,7 @@ static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
2399 .chain_noise_reset = iwl4965_chain_noise_reset, 2323 .chain_noise_reset = iwl4965_chain_noise_reset,
2400 .gain_computation = iwl4965_gain_computation, 2324 .gain_computation = iwl4965_gain_computation,
2401 .rts_tx_cmd_flag = iwl4965_rts_tx_cmd_flag, 2325 .rts_tx_cmd_flag = iwl4965_rts_tx_cmd_flag,
2326 .calc_rssi = iwl4965_calc_rssi,
2402}; 2327};
2403 2328
2404static struct iwl_lib_ops iwl4965_lib = { 2329static struct iwl_lib_ops iwl4965_lib = {
@@ -2440,7 +2365,6 @@ static struct iwl_lib_ops iwl4965_lib = {
2440 .check_version = iwl4965_eeprom_check_version, 2365 .check_version = iwl4965_eeprom_check_version,
2441 .query_addr = iwlcore_eeprom_query_addr, 2366 .query_addr = iwlcore_eeprom_query_addr,
2442 }, 2367 },
2443 .set_power = iwl4965_set_power,
2444 .send_tx_power = iwl4965_send_tx_power, 2368 .send_tx_power = iwl4965_send_tx_power,
2445 .update_chain_flags = iwl4965_update_chain_flags, 2369 .update_chain_flags = iwl4965_update_chain_flags,
2446 .temperature = iwl4965_temperature_calib, 2370 .temperature = iwl4965_temperature_calib,
@@ -2469,7 +2393,7 @@ MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
2469module_param_named(disable, iwl4965_mod_params.disable, int, 0444); 2393module_param_named(disable, iwl4965_mod_params.disable, int, 0444);
2470MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])"); 2394MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
2471module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, 0444); 2395module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, 0444);
2472MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])\n"); 2396MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
2473module_param_named(debug, iwl4965_mod_params.debug, int, 0444); 2397module_param_named(debug, iwl4965_mod_params.debug, int, 0444);
2474MODULE_PARM_DESC(debug, "debug output mask"); 2398MODULE_PARM_DESC(debug, "debug output mask");
2475module_param_named( 2399module_param_named(
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 878d6193b232..f3d139b663e6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -93,6 +93,13 @@ static int iwl5000_apm_init(struct iwl_priv *priv)
93 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS, 93 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
94 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); 94 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
95 95
96 /* Set FH wait treshold to maximum (HW error during stress W/A) */
97 iwl_set_bit(priv, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
98
99 /* enable HAP INTA to move device L1a -> L0s */
100 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
101 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
102
96 iwl_set_bit(priv, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL); 103 iwl_set_bit(priv, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
97 104
98 /* set "initialization complete" bit to move adapter 105 /* set "initialization complete" bit to move adapter
@@ -230,6 +237,16 @@ static void iwl5000_nic_config(struct iwl_priv *priv)
230 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | 237 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
231 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI); 238 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
232 239
240 /* W/A : NIC is stuck in a reset state after Early PCIe power off
241 * (PCIe power is lost before PERST# is asserted),
242 * causing ME FW to lose ownership and not being able to obtain it back.
243 */
244 iwl_grab_nic_access(priv);
245 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
246 APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
247 ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
248 iwl_release_nic_access(priv);
249
233 spin_unlock_irqrestore(&priv->lock, flags); 250 spin_unlock_irqrestore(&priv->lock, flags);
234} 251}
235 252
@@ -924,8 +941,8 @@ static void iwl5000_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
924 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; 941 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
925 942
926 if (txq_id != IWL_CMD_QUEUE_NUM) { 943 if (txq_id != IWL_CMD_QUEUE_NUM) {
927 sta = txq->cmd[txq->q.write_ptr].cmd.tx.sta_id; 944 sta = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
928 sec_ctl = txq->cmd[txq->q.write_ptr].cmd.tx.sec_ctl; 945 sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl;
929 946
930 switch (sec_ctl & TX_CMD_SEC_MSK) { 947 switch (sec_ctl & TX_CMD_SEC_MSK) {
931 case TX_CMD_SEC_CCM: 948 case TX_CMD_SEC_CCM:
@@ -964,7 +981,7 @@ static void iwl5000_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
964 u8 sta = 0; 981 u8 sta = 0;
965 982
966 if (txq_id != IWL_CMD_QUEUE_NUM) 983 if (txq_id != IWL_CMD_QUEUE_NUM)
967 sta = txq->cmd[txq->q.read_ptr].cmd.tx.sta_id; 984 sta = txq->cmd[txq->q.read_ptr]->cmd.tx.sta_id;
968 985
969 shared_data->queues_byte_cnt_tbls[txq_id].tfd_offset[txq->q.read_ptr]. 986 shared_data->queues_byte_cnt_tbls[txq_id].tfd_offset[txq->q.read_ptr].
970 val = cpu_to_le16(1 | (sta << 12)); 987 val = cpu_to_le16(1 | (sta << 12));
@@ -1131,7 +1148,7 @@ static void iwl5000_txq_set_sched(struct iwl_priv *priv, u32 mask)
1131 1148
1132static inline u32 iwl5000_get_scd_ssn(struct iwl5000_tx_resp *tx_resp) 1149static inline u32 iwl5000_get_scd_ssn(struct iwl5000_tx_resp *tx_resp)
1133{ 1150{
1134 return le32_to_cpup((__le32*)&tx_resp->status + 1151 return le32_to_cpup((__le32 *)&tx_resp->status +
1135 tx_resp->frame_count) & MAX_SN; 1152 tx_resp->frame_count) & MAX_SN;
1136} 1153}
1137 1154
@@ -1228,9 +1245,9 @@ static int iwl5000_tx_status_reply_tx(struct iwl_priv *priv,
1228 bitmap = bitmap << sh; 1245 bitmap = bitmap << sh;
1229 sh = 0; 1246 sh = 0;
1230 } 1247 }
1231 bitmap |= (1 << sh); 1248 bitmap |= 1ULL << sh;
1232 IWL_DEBUG_TX_REPLY("start=%d bitmap=0x%x\n", 1249 IWL_DEBUG_TX_REPLY("start=%d bitmap=0x%llx\n",
1233 start, (u32)(bitmap & 0xFFFFFFFF)); 1250 start, (unsigned long long)bitmap);
1234 } 1251 }
1235 1252
1236 agg->bitmap = bitmap; 1253 agg->bitmap = bitmap;
@@ -1444,6 +1461,44 @@ static void iwl5000_temperature(struct iwl_priv *priv)
1444 priv->temperature = le32_to_cpu(priv->statistics.general.temperature); 1461 priv->temperature = le32_to_cpu(priv->statistics.general.temperature);
1445} 1462}
1446 1463
1464/* Calc max signal level (dBm) among 3 possible receivers */
1465static int iwl5000_calc_rssi(struct iwl_priv *priv,
1466 struct iwl_rx_phy_res *rx_resp)
1467{
1468 /* data from PHY/DSP regarding signal strength, etc.,
1469 * contents are always there, not configurable by host
1470 */
1471 struct iwl5000_non_cfg_phy *ncphy =
1472 (struct iwl5000_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
1473 u32 val, rssi_a, rssi_b, rssi_c, max_rssi;
1474 u8 agc;
1475
1476 val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_AGC_IDX]);
1477 agc = (val & IWL50_OFDM_AGC_MSK) >> IWL50_OFDM_AGC_BIT_POS;
1478
1479 /* Find max rssi among 3 possible receivers.
1480 * These values are measured by the digital signal processor (DSP).
1481 * They should stay fairly constant even as the signal strength varies,
1482 * if the radio's automatic gain control (AGC) is working right.
1483 * AGC value (see below) will provide the "interesting" info.
1484 */
1485 val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_RSSI_AB_IDX]);
1486 rssi_a = (val & IWL50_OFDM_RSSI_A_MSK) >> IWL50_OFDM_RSSI_A_BIT_POS;
1487 rssi_b = (val & IWL50_OFDM_RSSI_B_MSK) >> IWL50_OFDM_RSSI_B_BIT_POS;
1488 val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_RSSI_C_IDX]);
1489 rssi_c = (val & IWL50_OFDM_RSSI_C_MSK) >> IWL50_OFDM_RSSI_C_BIT_POS;
1490
1491 max_rssi = max_t(u32, rssi_a, rssi_b);
1492 max_rssi = max_t(u32, max_rssi, rssi_c);
1493
1494 IWL_DEBUG_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n",
1495 rssi_a, rssi_b, rssi_c, max_rssi, agc);
1496
1497 /* dBm = max_rssi dB - agc dB - constant.
1498 * Higher AGC (higher radio gain) means lower signal. */
1499 return max_rssi - agc - IWL_RSSI_OFFSET;
1500}
1501
1447static struct iwl_hcmd_ops iwl5000_hcmd = { 1502static struct iwl_hcmd_ops iwl5000_hcmd = {
1448 .rxon_assoc = iwl5000_send_rxon_assoc, 1503 .rxon_assoc = iwl5000_send_rxon_assoc,
1449}; 1504};
@@ -1454,6 +1509,7 @@ static struct iwl_hcmd_utils_ops iwl5000_hcmd_utils = {
1454 .gain_computation = iwl5000_gain_computation, 1509 .gain_computation = iwl5000_gain_computation,
1455 .chain_noise_reset = iwl5000_chain_noise_reset, 1510 .chain_noise_reset = iwl5000_chain_noise_reset,
1456 .rts_tx_cmd_flag = iwl5000_rts_tx_cmd_flag, 1511 .rts_tx_cmd_flag = iwl5000_rts_tx_cmd_flag,
1512 .calc_rssi = iwl5000_calc_rssi,
1457}; 1513};
1458 1514
1459static struct iwl_lib_ops iwl5000_lib = { 1515static struct iwl_lib_ops iwl5000_lib = {
@@ -1474,6 +1530,7 @@ static struct iwl_lib_ops iwl5000_lib = {
1474 .alive_notify = iwl5000_alive_notify, 1530 .alive_notify = iwl5000_alive_notify,
1475 .send_tx_power = iwl5000_send_tx_power, 1531 .send_tx_power = iwl5000_send_tx_power,
1476 .temperature = iwl5000_temperature, 1532 .temperature = iwl5000_temperature,
1533 .update_chain_flags = iwl4965_update_chain_flags,
1477 .apm_ops = { 1534 .apm_ops = {
1478 .init = iwl5000_apm_init, 1535 .init = iwl5000_apm_init,
1479 .reset = iwl5000_apm_reset, 1536 .reset = iwl5000_apm_reset,
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 3ccb84aa5dbc..754fef5b592f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -42,7 +42,7 @@
42#include "iwl-core.h" 42#include "iwl-core.h"
43#include "iwl-helpers.h" 43#include "iwl-helpers.h"
44 44
45#define RS_NAME "iwl-4965-rs" 45#define RS_NAME "iwl-agn-rs"
46 46
47#define NUM_TRY_BEFORE_ANT_TOGGLE 1 47#define NUM_TRY_BEFORE_ANT_TOGGLE 1
48#define IWL_NUMBER_TRY 1 48#define IWL_NUMBER_TRY 1
@@ -77,9 +77,9 @@ static const u8 ant_toggle_lookup[] = {
77}; 77};
78 78
79/** 79/**
80 * struct iwl4965_rate_scale_data -- tx success history for one rate 80 * struct iwl_rate_scale_data -- tx success history for one rate
81 */ 81 */
82struct iwl4965_rate_scale_data { 82struct iwl_rate_scale_data {
83 u64 data; /* bitmap of successful frames */ 83 u64 data; /* bitmap of successful frames */
84 s32 success_counter; /* number of frames successful */ 84 s32 success_counter; /* number of frames successful */
85 s32 success_ratio; /* per-cent * 128 */ 85 s32 success_ratio; /* per-cent * 128 */
@@ -89,12 +89,12 @@ struct iwl4965_rate_scale_data {
89}; 89};
90 90
91/** 91/**
92 * struct iwl4965_scale_tbl_info -- tx params and success history for all rates 92 * struct iwl_scale_tbl_info -- tx params and success history for all rates
93 * 93 *
94 * There are two of these in struct iwl4965_lq_sta, 94 * There are two of these in struct iwl_lq_sta,
95 * one for "active", and one for "search". 95 * one for "active", and one for "search".
96 */ 96 */
97struct iwl4965_scale_tbl_info { 97struct iwl_scale_tbl_info {
98 enum iwl_table_type lq_type; 98 enum iwl_table_type lq_type;
99 u8 ant_type; 99 u8 ant_type;
100 u8 is_SGI; /* 1 = short guard interval */ 100 u8 is_SGI; /* 1 = short guard interval */
@@ -103,10 +103,10 @@ struct iwl4965_scale_tbl_info {
103 u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */ 103 u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
104 s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */ 104 s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
105 u32 current_rate; /* rate_n_flags, uCode API format */ 105 u32 current_rate; /* rate_n_flags, uCode API format */
106 struct iwl4965_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */ 106 struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
107}; 107};
108 108
109struct iwl4965_traffic_load { 109struct iwl_traffic_load {
110 unsigned long time_stamp; /* age of the oldest statistics */ 110 unsigned long time_stamp; /* age of the oldest statistics */
111 u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time 111 u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time
112 * slice */ 112 * slice */
@@ -118,11 +118,11 @@ struct iwl4965_traffic_load {
118}; 118};
119 119
120/** 120/**
121 * struct iwl4965_lq_sta -- driver's rate scaling private structure 121 * struct iwl_lq_sta -- driver's rate scaling private structure
122 * 122 *
123 * Pointer to this gets passed back and forth between driver and mac80211. 123 * Pointer to this gets passed back and forth between driver and mac80211.
124 */ 124 */
125struct iwl4965_lq_sta { 125struct iwl_lq_sta {
126 u8 active_tbl; /* index of active table, range 0-1 */ 126 u8 active_tbl; /* index of active table, range 0-1 */
127 u8 enable_counter; /* indicates HT mode */ 127 u8 enable_counter; /* indicates HT mode */
128 u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */ 128 u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */
@@ -153,8 +153,8 @@ struct iwl4965_lq_sta {
153 u16 active_rate_basic; 153 u16 active_rate_basic;
154 154
155 struct iwl_link_quality_cmd lq; 155 struct iwl_link_quality_cmd lq;
156 struct iwl4965_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */ 156 struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
157 struct iwl4965_traffic_load load[TID_MAX_LOAD_COUNT]; 157 struct iwl_traffic_load load[TID_MAX_LOAD_COUNT];
158 u8 tx_agg_tid_en; 158 u8 tx_agg_tid_en;
159#ifdef CONFIG_MAC80211_DEBUGFS 159#ifdef CONFIG_MAC80211_DEBUGFS
160 struct dentry *rs_sta_dbgfs_scale_table_file; 160 struct dentry *rs_sta_dbgfs_scale_table_file;
@@ -170,16 +170,15 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
170 struct ieee80211_hdr *hdr, 170 struct ieee80211_hdr *hdr,
171 struct sta_info *sta); 171 struct sta_info *sta);
172static void rs_fill_link_cmd(const struct iwl_priv *priv, 172static void rs_fill_link_cmd(const struct iwl_priv *priv,
173 struct iwl4965_lq_sta *lq_sta, 173 struct iwl_lq_sta *lq_sta, u32 rate_n_flags);
174 u32 rate_n_flags);
175 174
176 175
177#ifdef CONFIG_MAC80211_DEBUGFS 176#ifdef CONFIG_MAC80211_DEBUGFS
178static void rs_dbgfs_set_mcs(struct iwl4965_lq_sta *lq_sta, 177static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
179 u32 *rate_n_flags, int index); 178 u32 *rate_n_flags, int index);
180#else 179#else
181static void rs_dbgfs_set_mcs(struct iwl4965_lq_sta *lq_sta, 180static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
182 u32 *rate_n_flags, int index) 181 u32 *rate_n_flags, int index)
183{} 182{}
184#endif 183#endif
185 184
@@ -234,7 +233,7 @@ static inline u8 rs_extract_rate(u32 rate_n_flags)
234 return (u8)(rate_n_flags & 0xFF); 233 return (u8)(rate_n_flags & 0xFF);
235} 234}
236 235
237static void rs_rate_scale_clear_window(struct iwl4965_rate_scale_data *window) 236static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
238{ 237{
239 window->data = 0; 238 window->data = 0;
240 window->success_counter = 0; 239 window->success_counter = 0;
@@ -246,14 +245,14 @@ static void rs_rate_scale_clear_window(struct iwl4965_rate_scale_data *window)
246 245
247static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type) 246static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
248{ 247{
249 return ((ant_type & valid_antenna) == ant_type); 248 return (ant_type & valid_antenna) == ant_type;
250} 249}
251 250
252/* 251/*
253 * removes the old data from the statistics. All data that is older than 252 * removes the old data from the statistics. All data that is older than
254 * TID_MAX_TIME_DIFF, will be deleted. 253 * TID_MAX_TIME_DIFF, will be deleted.
255 */ 254 */
256static void rs_tl_rm_old_stats(struct iwl4965_traffic_load *tl, u32 curr_time) 255static void rs_tl_rm_old_stats(struct iwl_traffic_load *tl, u32 curr_time)
257{ 256{
258 /* The oldest age we want to keep */ 257 /* The oldest age we want to keep */
259 u32 oldest_time = curr_time - TID_MAX_TIME_DIFF; 258 u32 oldest_time = curr_time - TID_MAX_TIME_DIFF;
@@ -274,13 +273,13 @@ static void rs_tl_rm_old_stats(struct iwl4965_traffic_load *tl, u32 curr_time)
274 * increment traffic load value for tid and also remove 273 * increment traffic load value for tid and also remove
275 * any old values if passed the certain time period 274 * any old values if passed the certain time period
276 */ 275 */
277static u8 rs_tl_add_packet(struct iwl4965_lq_sta *lq_data, 276static u8 rs_tl_add_packet(struct iwl_lq_sta *lq_data,
278 struct ieee80211_hdr *hdr) 277 struct ieee80211_hdr *hdr)
279{ 278{
280 u32 curr_time = jiffies_to_msecs(jiffies); 279 u32 curr_time = jiffies_to_msecs(jiffies);
281 u32 time_diff; 280 u32 time_diff;
282 s32 index; 281 s32 index;
283 struct iwl4965_traffic_load *tl = NULL; 282 struct iwl_traffic_load *tl = NULL;
284 __le16 fc = hdr->frame_control; 283 __le16 fc = hdr->frame_control;
285 u8 tid; 284 u8 tid;
286 285
@@ -325,12 +324,12 @@ static u8 rs_tl_add_packet(struct iwl4965_lq_sta *lq_data,
325/* 324/*
326 get the traffic load value for tid 325 get the traffic load value for tid
327*/ 326*/
328static u32 rs_tl_get_load(struct iwl4965_lq_sta *lq_data, u8 tid) 327static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
329{ 328{
330 u32 curr_time = jiffies_to_msecs(jiffies); 329 u32 curr_time = jiffies_to_msecs(jiffies);
331 u32 time_diff; 330 u32 time_diff;
332 s32 index; 331 s32 index;
333 struct iwl4965_traffic_load *tl = NULL; 332 struct iwl_traffic_load *tl = NULL;
334 333
335 if (tid >= TID_MAX_LOAD_COUNT) 334 if (tid >= TID_MAX_LOAD_COUNT)
336 return 0; 335 return 0;
@@ -354,8 +353,8 @@ static u32 rs_tl_get_load(struct iwl4965_lq_sta *lq_data, u8 tid)
354} 353}
355 354
356static void rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv, 355static void rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
357 struct iwl4965_lq_sta *lq_data, u8 tid, 356 struct iwl_lq_sta *lq_data, u8 tid,
358 struct sta_info *sta) 357 struct sta_info *sta)
359{ 358{
360 unsigned long state; 359 unsigned long state;
361 DECLARE_MAC_BUF(mac); 360 DECLARE_MAC_BUF(mac);
@@ -373,8 +372,8 @@ static void rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
373} 372}
374 373
375static void rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid, 374static void rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
376 struct iwl4965_lq_sta *lq_data, 375 struct iwl_lq_sta *lq_data,
377 struct sta_info *sta) 376 struct sta_info *sta)
378{ 377{
379 if ((tid < TID_MAX_LOAD_COUNT)) 378 if ((tid < TID_MAX_LOAD_COUNT))
380 rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta); 379 rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
@@ -385,9 +384,9 @@ static void rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
385 384
386static inline int get_num_of_ant_from_rate(u32 rate_n_flags) 385static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
387{ 386{
388 return (!!(rate_n_flags & RATE_MCS_ANT_A_MSK) + 387 return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
389 !!(rate_n_flags & RATE_MCS_ANT_B_MSK) + 388 !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
390 !!(rate_n_flags & RATE_MCS_ANT_C_MSK)); 389 !!(rate_n_flags & RATE_MCS_ANT_C_MSK);
391} 390}
392 391
393/** 392/**
@@ -397,11 +396,11 @@ static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
397 * at this rate. window->data contains the bitmask of successful 396 * at this rate. window->data contains the bitmask of successful
398 * packets. 397 * packets.
399 */ 398 */
400static int rs_collect_tx_data(struct iwl4965_rate_scale_data *windows, 399static int rs_collect_tx_data(struct iwl_rate_scale_data *windows,
401 int scale_index, s32 tpt, int retries, 400 int scale_index, s32 tpt, int retries,
402 int successes) 401 int successes)
403{ 402{
404 struct iwl4965_rate_scale_data *window = NULL; 403 struct iwl_rate_scale_data *window = NULL;
405 static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1)); 404 static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1));
406 s32 fail_count; 405 s32 fail_count;
407 406
@@ -473,7 +472,7 @@ static int rs_collect_tx_data(struct iwl4965_rate_scale_data *windows,
473 * Fill uCode API rate_n_flags field, based on "search" or "active" table. 472 * Fill uCode API rate_n_flags field, based on "search" or "active" table.
474 */ 473 */
475/* FIXME:RS:remove this function and put the flags statically in the table */ 474/* FIXME:RS:remove this function and put the flags statically in the table */
476static u32 rate_n_flags_from_tbl(struct iwl4965_scale_tbl_info *tbl, 475static u32 rate_n_flags_from_tbl(struct iwl_scale_tbl_info *tbl,
477 int index, u8 use_green) 476 int index, u8 use_green)
478{ 477{
479 u32 rate_n_flags = 0; 478 u32 rate_n_flags = 0;
@@ -530,7 +529,7 @@ static u32 rate_n_flags_from_tbl(struct iwl4965_scale_tbl_info *tbl,
530 */ 529 */
531static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags, 530static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
532 enum ieee80211_band band, 531 enum ieee80211_band band,
533 struct iwl4965_scale_tbl_info *tbl, 532 struct iwl_scale_tbl_info *tbl,
534 int *rate_idx) 533 int *rate_idx)
535{ 534{
536 u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK); 535 u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
@@ -591,7 +590,7 @@ static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
591/* switch to another antenna/antennas and return 1 */ 590/* switch to another antenna/antennas and return 1 */
592/* if no other valid antenna found, return 0 */ 591/* if no other valid antenna found, return 0 */
593static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags, 592static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
594 struct iwl4965_scale_tbl_info *tbl) 593 struct iwl_scale_tbl_info *tbl)
595{ 594{
596 u8 new_ant_type; 595 u8 new_ant_type;
597 596
@@ -621,9 +620,9 @@ static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
621#if 0 620#if 0
622static inline u8 rs_use_green(struct iwl_priv *priv, struct ieee80211_conf *conf) 621static inline u8 rs_use_green(struct iwl_priv *priv, struct ieee80211_conf *conf)
623{ 622{
624 return ((conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) && 623 return (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) &&
625 priv->current_ht_config.is_green_field && 624 priv->current_ht_config.is_green_field &&
626 !priv->current_ht_config.non_GF_STA_present); 625 !priv->current_ht_config.non_GF_STA_present;
627} 626}
628#endif 627#endif
629static inline u8 rs_use_green(struct iwl_priv *priv, struct ieee80211_conf *conf) 628static inline u8 rs_use_green(struct iwl_priv *priv, struct ieee80211_conf *conf)
@@ -638,9 +637,9 @@ static inline u8 rs_use_green(struct iwl_priv *priv, struct ieee80211_conf *conf
638 * basic available rates. 637 * basic available rates.
639 * 638 *
640 */ 639 */
641static u16 rs_get_supported_rates(struct iwl4965_lq_sta *lq_sta, 640static u16 rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
642 struct ieee80211_hdr *hdr, 641 struct ieee80211_hdr *hdr,
643 enum iwl_table_type rate_type) 642 enum iwl_table_type rate_type)
644{ 643{
645 if (hdr && is_multicast_ether_addr(hdr->addr1) && 644 if (hdr && is_multicast_ether_addr(hdr->addr1) &&
646 lq_sta->active_rate_basic) 645 lq_sta->active_rate_basic)
@@ -714,9 +713,9 @@ static u16 rs_get_adjacent_rate(struct iwl_priv *priv, u8 index, u16 rate_mask,
714 return (high << 8) | low; 713 return (high << 8) | low;
715} 714}
716 715
717static u32 rs_get_lower_rate(struct iwl4965_lq_sta *lq_sta, 716static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
718 struct iwl4965_scale_tbl_info *tbl, u8 scale_index, 717 struct iwl_scale_tbl_info *tbl,
719 u8 ht_possible) 718 u8 scale_index, u8 ht_possible)
720{ 719{
721 s32 low; 720 s32 low;
722 u16 rate_mask; 721 u16 rate_mask;
@@ -780,7 +779,7 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
780 int status; 779 int status;
781 u8 retries; 780 u8 retries;
782 int rs_index, index = 0; 781 int rs_index, index = 0;
783 struct iwl4965_lq_sta *lq_sta; 782 struct iwl_lq_sta *lq_sta;
784 struct iwl_link_quality_cmd *table; 783 struct iwl_link_quality_cmd *table;
785 struct sta_info *sta; 784 struct sta_info *sta;
786 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 785 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -788,11 +787,11 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
788 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 787 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
789 struct ieee80211_hw *hw = local_to_hw(local); 788 struct ieee80211_hw *hw = local_to_hw(local);
790 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 789 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
791 struct iwl4965_rate_scale_data *window = NULL; 790 struct iwl_rate_scale_data *window = NULL;
792 struct iwl4965_rate_scale_data *search_win = NULL; 791 struct iwl_rate_scale_data *search_win = NULL;
793 u32 tx_rate; 792 u32 tx_rate;
794 struct iwl4965_scale_tbl_info tbl_type; 793 struct iwl_scale_tbl_info tbl_type;
795 struct iwl4965_scale_tbl_info *curr_tbl, *search_tbl; 794 struct iwl_scale_tbl_info *curr_tbl, *search_tbl;
796 u8 active_index = 0; 795 u8 active_index = 0;
797 __le16 fc = hdr->frame_control; 796 __le16 fc = hdr->frame_control;
798 s32 tpt = 0; 797 s32 tpt = 0;
@@ -820,7 +819,7 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
820 goto out; 819 goto out;
821 820
822 821
823 lq_sta = (struct iwl4965_lq_sta *)sta->rate_ctrl_priv; 822 lq_sta = (struct iwl_lq_sta *)sta->rate_ctrl_priv;
824 823
825 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) && 824 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) &&
826 !lq_sta->ibss_sta_added) 825 !lq_sta->ibss_sta_added)
@@ -831,10 +830,8 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
831 830
832 curr_tbl = &(lq_sta->lq_info[active_index]); 831 curr_tbl = &(lq_sta->lq_info[active_index]);
833 search_tbl = &(lq_sta->lq_info[(1 - active_index)]); 832 search_tbl = &(lq_sta->lq_info[(1 - active_index)]);
834 window = (struct iwl4965_rate_scale_data *) 833 window = (struct iwl_rate_scale_data *)&(curr_tbl->win[0]);
835 &(curr_tbl->win[0]); 834 search_win = (struct iwl_rate_scale_data *)&(search_tbl->win[0]);
836 search_win = (struct iwl4965_rate_scale_data *)
837 &(search_tbl->win[0]);
838 835
839 /* 836 /*
840 * Ignore this Tx frame response if its initial rate doesn't match 837 * Ignore this Tx frame response if its initial rate doesn't match
@@ -983,7 +980,7 @@ out:
983 * searching for a new mode. 980 * searching for a new mode.
984 */ 981 */
985static void rs_set_stay_in_table(struct iwl_priv *priv, u8 is_legacy, 982static void rs_set_stay_in_table(struct iwl_priv *priv, u8 is_legacy,
986 struct iwl4965_lq_sta *lq_sta) 983 struct iwl_lq_sta *lq_sta)
987{ 984{
988 IWL_DEBUG_RATE("we are staying in the same table\n"); 985 IWL_DEBUG_RATE("we are staying in the same table\n");
989 lq_sta->stay_in_tbl = 1; /* only place this gets set */ 986 lq_sta->stay_in_tbl = 1; /* only place this gets set */
@@ -1004,8 +1001,8 @@ static void rs_set_stay_in_table(struct iwl_priv *priv, u8 is_legacy,
1004/* 1001/*
1005 * Find correct throughput table for given mode of modulation 1002 * Find correct throughput table for given mode of modulation
1006 */ 1003 */
1007static void rs_set_expected_tpt_table(struct iwl4965_lq_sta *lq_sta, 1004static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
1008 struct iwl4965_scale_tbl_info *tbl) 1005 struct iwl_scale_tbl_info *tbl)
1009{ 1006{
1010 if (is_legacy(tbl->lq_type)) { 1007 if (is_legacy(tbl->lq_type)) {
1011 if (!is_a_band(tbl->lq_type)) 1008 if (!is_a_band(tbl->lq_type))
@@ -1050,12 +1047,12 @@ static void rs_set_expected_tpt_table(struct iwl4965_lq_sta *lq_sta,
1050 * bit rate will typically need to increase, but not if performance was bad. 1047 * bit rate will typically need to increase, but not if performance was bad.
1051 */ 1048 */
1052static s32 rs_get_best_rate(struct iwl_priv *priv, 1049static s32 rs_get_best_rate(struct iwl_priv *priv,
1053 struct iwl4965_lq_sta *lq_sta, 1050 struct iwl_lq_sta *lq_sta,
1054 struct iwl4965_scale_tbl_info *tbl, /* "search" */ 1051 struct iwl_scale_tbl_info *tbl, /* "search" */
1055 u16 rate_mask, s8 index) 1052 u16 rate_mask, s8 index)
1056{ 1053{
1057 /* "active" values */ 1054 /* "active" values */
1058 struct iwl4965_scale_tbl_info *active_tbl = 1055 struct iwl_scale_tbl_info *active_tbl =
1059 &(lq_sta->lq_info[lq_sta->active_tbl]); 1056 &(lq_sta->lq_info[lq_sta->active_tbl]);
1060 s32 active_sr = active_tbl->win[index].success_ratio; 1057 s32 active_sr = active_tbl->win[index].success_ratio;
1061 s32 active_tpt = active_tbl->expected_tpt[index]; 1058 s32 active_tpt = active_tbl->expected_tpt[index];
@@ -1143,10 +1140,10 @@ static s32 rs_get_best_rate(struct iwl_priv *priv,
1143 * Set up search table for MIMO 1140 * Set up search table for MIMO
1144 */ 1141 */
1145static int rs_switch_to_mimo2(struct iwl_priv *priv, 1142static int rs_switch_to_mimo2(struct iwl_priv *priv,
1146 struct iwl4965_lq_sta *lq_sta, 1143 struct iwl_lq_sta *lq_sta,
1147 struct ieee80211_conf *conf, 1144 struct ieee80211_conf *conf,
1148 struct sta_info *sta, 1145 struct sta_info *sta,
1149 struct iwl4965_scale_tbl_info *tbl, int index) 1146 struct iwl_scale_tbl_info *tbl, int index)
1150{ 1147{
1151 u16 rate_mask; 1148 u16 rate_mask;
1152 s32 rate; 1149 s32 rate;
@@ -1210,10 +1207,10 @@ static int rs_switch_to_mimo2(struct iwl_priv *priv,
1210 * Set up search table for SISO 1207 * Set up search table for SISO
1211 */ 1208 */
1212static int rs_switch_to_siso(struct iwl_priv *priv, 1209static int rs_switch_to_siso(struct iwl_priv *priv,
1213 struct iwl4965_lq_sta *lq_sta, 1210 struct iwl_lq_sta *lq_sta,
1214 struct ieee80211_conf *conf, 1211 struct ieee80211_conf *conf,
1215 struct sta_info *sta, 1212 struct sta_info *sta,
1216 struct iwl4965_scale_tbl_info *tbl, int index) 1213 struct iwl_scale_tbl_info *tbl, int index)
1217{ 1214{
1218 u16 rate_mask; 1215 u16 rate_mask;
1219 u8 is_green = lq_sta->is_green; 1216 u8 is_green = lq_sta->is_green;
@@ -1270,18 +1267,17 @@ static int rs_switch_to_siso(struct iwl_priv *priv,
1270 * Try to switch to new modulation mode from legacy 1267 * Try to switch to new modulation mode from legacy
1271 */ 1268 */
1272static int rs_move_legacy_other(struct iwl_priv *priv, 1269static int rs_move_legacy_other(struct iwl_priv *priv,
1273 struct iwl4965_lq_sta *lq_sta, 1270 struct iwl_lq_sta *lq_sta,
1274 struct ieee80211_conf *conf, 1271 struct ieee80211_conf *conf,
1275 struct sta_info *sta, 1272 struct sta_info *sta,
1276 int index) 1273 int index)
1277{ 1274{
1278 struct iwl4965_scale_tbl_info *tbl = 1275 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1279 &(lq_sta->lq_info[lq_sta->active_tbl]); 1276 struct iwl_scale_tbl_info *search_tbl =
1280 struct iwl4965_scale_tbl_info *search_tbl = 1277 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1281 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); 1278 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1282 struct iwl4965_rate_scale_data *window = &(tbl->win[index]); 1279 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1283 u32 sz = (sizeof(struct iwl4965_scale_tbl_info) - 1280 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1284 (sizeof(struct iwl4965_rate_scale_data) * IWL_RATE_COUNT));
1285 u8 start_action = tbl->action; 1281 u8 start_action = tbl->action;
1286 u8 valid_tx_ant = priv->hw_params.valid_tx_ant; 1282 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1287 int ret = 0; 1283 int ret = 0;
@@ -1360,19 +1356,17 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1360 * Try to switch to new modulation mode from SISO 1356 * Try to switch to new modulation mode from SISO
1361 */ 1357 */
1362static int rs_move_siso_to_other(struct iwl_priv *priv, 1358static int rs_move_siso_to_other(struct iwl_priv *priv,
1363 struct iwl4965_lq_sta *lq_sta, 1359 struct iwl_lq_sta *lq_sta,
1364 struct ieee80211_conf *conf, 1360 struct ieee80211_conf *conf,
1365 struct sta_info *sta, 1361 struct sta_info *sta, int index)
1366 int index)
1367{ 1362{
1368 u8 is_green = lq_sta->is_green; 1363 u8 is_green = lq_sta->is_green;
1369 struct iwl4965_scale_tbl_info *tbl = 1364 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1370 &(lq_sta->lq_info[lq_sta->active_tbl]); 1365 struct iwl_scale_tbl_info *search_tbl =
1371 struct iwl4965_scale_tbl_info *search_tbl = 1366 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1372 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); 1367 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1373 struct iwl4965_rate_scale_data *window = &(tbl->win[index]); 1368 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1374 u32 sz = (sizeof(struct iwl4965_scale_tbl_info) - 1369 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1375 (sizeof(struct iwl4965_rate_scale_data) * IWL_RATE_COUNT));
1376 u8 start_action = tbl->action; 1370 u8 start_action = tbl->action;
1377 u8 valid_tx_ant = priv->hw_params.valid_tx_ant; 1371 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1378 int ret; 1372 int ret;
@@ -1455,18 +1449,16 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
1455 * Try to switch to new modulation mode from MIMO 1449 * Try to switch to new modulation mode from MIMO
1456 */ 1450 */
1457static int rs_move_mimo_to_other(struct iwl_priv *priv, 1451static int rs_move_mimo_to_other(struct iwl_priv *priv,
1458 struct iwl4965_lq_sta *lq_sta, 1452 struct iwl_lq_sta *lq_sta,
1459 struct ieee80211_conf *conf, 1453 struct ieee80211_conf *conf,
1460 struct sta_info *sta, 1454 struct sta_info *sta, int index)
1461 int index)
1462{ 1455{
1463 s8 is_green = lq_sta->is_green; 1456 s8 is_green = lq_sta->is_green;
1464 struct iwl4965_scale_tbl_info *tbl = 1457 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1465 &(lq_sta->lq_info[lq_sta->active_tbl]); 1458 struct iwl_scale_tbl_info *search_tbl =
1466 struct iwl4965_scale_tbl_info *search_tbl = 1459 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1467 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); 1460 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1468 u32 sz = (sizeof(struct iwl4965_scale_tbl_info) - 1461 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1469 (sizeof(struct iwl4965_rate_scale_data) * IWL_RATE_COUNT));
1470 u8 start_action = tbl->action; 1462 u8 start_action = tbl->action;
1471 /*u8 valid_tx_ant = priv->hw_params.valid_tx_ant;*/ 1463 /*u8 valid_tx_ant = priv->hw_params.valid_tx_ant;*/
1472 int ret; 1464 int ret;
@@ -1552,9 +1544,9 @@ static int rs_move_mimo_to_other(struct iwl_priv *priv,
1552 * 2) # times calling this function 1544 * 2) # times calling this function
1553 * 3) elapsed time in this mode (not used, for now) 1545 * 3) elapsed time in this mode (not used, for now)
1554 */ 1546 */
1555static void rs_stay_in_table(struct iwl4965_lq_sta *lq_sta) 1547static void rs_stay_in_table(struct iwl_lq_sta *lq_sta)
1556{ 1548{
1557 struct iwl4965_scale_tbl_info *tbl; 1549 struct iwl_scale_tbl_info *tbl;
1558 int i; 1550 int i;
1559 int active_tbl; 1551 int active_tbl;
1560 int flush_interval_passed = 0; 1552 int flush_interval_passed = 0;
@@ -1642,7 +1634,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1642 int high = IWL_RATE_INVALID; 1634 int high = IWL_RATE_INVALID;
1643 int index; 1635 int index;
1644 int i; 1636 int i;
1645 struct iwl4965_rate_scale_data *window = NULL; 1637 struct iwl_rate_scale_data *window = NULL;
1646 int current_tpt = IWL_INVALID_VALUE; 1638 int current_tpt = IWL_INVALID_VALUE;
1647 int low_tpt = IWL_INVALID_VALUE; 1639 int low_tpt = IWL_INVALID_VALUE;
1648 int high_tpt = IWL_INVALID_VALUE; 1640 int high_tpt = IWL_INVALID_VALUE;
@@ -1651,8 +1643,8 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1651 __le16 fc; 1643 __le16 fc;
1652 u16 rate_mask; 1644 u16 rate_mask;
1653 u8 update_lq = 0; 1645 u8 update_lq = 0;
1654 struct iwl4965_lq_sta *lq_sta; 1646 struct iwl_lq_sta *lq_sta;
1655 struct iwl4965_scale_tbl_info *tbl, *tbl1; 1647 struct iwl_scale_tbl_info *tbl, *tbl1;
1656 u16 rate_scale_index_msk = 0; 1648 u16 rate_scale_index_msk = 0;
1657 u32 rate; 1649 u32 rate;
1658 u8 is_green = 0; 1650 u8 is_green = 0;
@@ -1675,7 +1667,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1675 if (!sta || !sta->rate_ctrl_priv) 1667 if (!sta || !sta->rate_ctrl_priv)
1676 return; 1668 return;
1677 1669
1678 lq_sta = (struct iwl4965_lq_sta *)sta->rate_ctrl_priv; 1670 lq_sta = (struct iwl_lq_sta *)sta->rate_ctrl_priv;
1679 1671
1680 tid = rs_tl_add_packet(lq_sta, hdr); 1672 tid = rs_tl_add_packet(lq_sta, hdr);
1681 1673
@@ -2030,8 +2022,8 @@ static void rs_initialize_lq(struct iwl_priv *priv,
2030 struct ieee80211_conf *conf, 2022 struct ieee80211_conf *conf,
2031 struct sta_info *sta) 2023 struct sta_info *sta)
2032{ 2024{
2033 struct iwl4965_lq_sta *lq_sta; 2025 struct iwl_lq_sta *lq_sta;
2034 struct iwl4965_scale_tbl_info *tbl; 2026 struct iwl_scale_tbl_info *tbl;
2035 int rate_idx; 2027 int rate_idx;
2036 int i; 2028 int i;
2037 u32 rate; 2029 u32 rate;
@@ -2042,7 +2034,7 @@ static void rs_initialize_lq(struct iwl_priv *priv,
2042 if (!sta || !sta->rate_ctrl_priv) 2034 if (!sta || !sta->rate_ctrl_priv)
2043 goto out; 2035 goto out;
2044 2036
2045 lq_sta = (struct iwl4965_lq_sta *)sta->rate_ctrl_priv; 2037 lq_sta = (struct iwl_lq_sta *)sta->rate_ctrl_priv;
2046 i = sta->last_txrate_idx; 2038 i = sta->last_txrate_idx;
2047 2039
2048 if ((lq_sta->lq.sta_id == 0xff) && 2040 if ((lq_sta->lq.sta_id == 0xff) &&
@@ -2096,7 +2088,7 @@ static void rs_get_rate(void *priv_rate, struct net_device *dev,
2096 struct sta_info *sta; 2088 struct sta_info *sta;
2097 __le16 fc; 2089 __le16 fc;
2098 struct iwl_priv *priv = (struct iwl_priv *)priv_rate; 2090 struct iwl_priv *priv = (struct iwl_priv *)priv_rate;
2099 struct iwl4965_lq_sta *lq_sta; 2091 struct iwl_lq_sta *lq_sta;
2100 2092
2101 IWL_DEBUG_RATE_LIMIT("rate scale calculate new rate for skb\n"); 2093 IWL_DEBUG_RATE_LIMIT("rate scale calculate new rate for skb\n");
2102 2094
@@ -2113,7 +2105,7 @@ static void rs_get_rate(void *priv_rate, struct net_device *dev,
2113 goto out; 2105 goto out;
2114 } 2106 }
2115 2107
2116 lq_sta = (struct iwl4965_lq_sta *)sta->rate_ctrl_priv; 2108 lq_sta = (struct iwl_lq_sta *)sta->rate_ctrl_priv;
2117 i = sta->last_txrate_idx; 2109 i = sta->last_txrate_idx;
2118 2110
2119 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) && 2111 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) &&
@@ -2149,14 +2141,14 @@ out:
2149 2141
2150static void *rs_alloc_sta(void *priv_rate, gfp_t gfp) 2142static void *rs_alloc_sta(void *priv_rate, gfp_t gfp)
2151{ 2143{
2152 struct iwl4965_lq_sta *lq_sta; 2144 struct iwl_lq_sta *lq_sta;
2153 struct iwl_priv *priv; 2145 struct iwl_priv *priv;
2154 int i, j; 2146 int i, j;
2155 2147
2156 priv = (struct iwl_priv *)priv_rate; 2148 priv = (struct iwl_priv *)priv_rate;
2157 IWL_DEBUG_RATE("create station rate scale window\n"); 2149 IWL_DEBUG_RATE("create station rate scale window\n");
2158 2150
2159 lq_sta = kzalloc(sizeof(struct iwl4965_lq_sta), gfp); 2151 lq_sta = kzalloc(sizeof(struct iwl_lq_sta), gfp);
2160 2152
2161 if (lq_sta == NULL) 2153 if (lq_sta == NULL)
2162 return NULL; 2154 return NULL;
@@ -2165,7 +2157,7 @@ static void *rs_alloc_sta(void *priv_rate, gfp_t gfp)
2165 2157
2166 for (j = 0; j < LQ_SIZE; j++) 2158 for (j = 0; j < LQ_SIZE; j++)
2167 for (i = 0; i < IWL_RATE_COUNT; i++) 2159 for (i = 0; i < IWL_RATE_COUNT; i++)
2168 rs_rate_scale_clear_window(&(lq_sta->lq_info[j].win[i])); 2160 rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
2169 2161
2170 return lq_sta; 2162 return lq_sta;
2171} 2163}
@@ -2178,7 +2170,7 @@ static void rs_rate_init(void *priv_rate, void *priv_sta,
2178 struct ieee80211_conf *conf = &local->hw.conf; 2170 struct ieee80211_conf *conf = &local->hw.conf;
2179 struct ieee80211_supported_band *sband; 2171 struct ieee80211_supported_band *sband;
2180 struct iwl_priv *priv = (struct iwl_priv *)priv_rate; 2172 struct iwl_priv *priv = (struct iwl_priv *)priv_rate;
2181 struct iwl4965_lq_sta *lq_sta = priv_sta; 2173 struct iwl_lq_sta *lq_sta = priv_sta;
2182 2174
2183 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 2175 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
2184 2176
@@ -2187,7 +2179,7 @@ static void rs_rate_init(void *priv_rate, void *priv_sta,
2187 sta->txrate_idx = 3; 2179 sta->txrate_idx = 3;
2188 for (j = 0; j < LQ_SIZE; j++) 2180 for (j = 0; j < LQ_SIZE; j++)
2189 for (i = 0; i < IWL_RATE_COUNT; i++) 2181 for (i = 0; i < IWL_RATE_COUNT; i++)
2190 rs_rate_scale_clear_window(&(lq_sta->lq_info[j].win[i])); 2182 rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
2191 2183
2192 IWL_DEBUG_RATE("LQ: *** rate scale global init ***\n"); 2184 IWL_DEBUG_RATE("LQ: *** rate scale global init ***\n");
2193 /* TODO: what is a good starting rate for STA? About middle? Maybe not 2185 /* TODO: what is a good starting rate for STA? About middle? Maybe not
@@ -2271,10 +2263,9 @@ static void rs_rate_init(void *priv_rate, void *priv_sta,
2271} 2263}
2272 2264
2273static void rs_fill_link_cmd(const struct iwl_priv *priv, 2265static void rs_fill_link_cmd(const struct iwl_priv *priv,
2274 struct iwl4965_lq_sta *lq_sta, 2266 struct iwl_lq_sta *lq_sta, u32 new_rate)
2275 u32 new_rate)
2276{ 2267{
2277 struct iwl4965_scale_tbl_info tbl_type; 2268 struct iwl_scale_tbl_info tbl_type;
2278 int index = 0; 2269 int index = 0;
2279 int rate_idx; 2270 int rate_idx;
2280 int repeat_rate = 0; 2271 int repeat_rate = 0;
@@ -2402,6 +2393,7 @@ static void rs_free(void *priv_rate)
2402 2393
2403static void rs_clear(void *priv_rate) 2394static void rs_clear(void *priv_rate)
2404{ 2395{
2396#ifdef CONFIG_IWLWIFI_DEBUG
2405 struct iwl_priv *priv = (struct iwl_priv *) priv_rate; 2397 struct iwl_priv *priv = (struct iwl_priv *) priv_rate;
2406 2398
2407 IWL_DEBUG_RATE("enter\n"); 2399 IWL_DEBUG_RATE("enter\n");
@@ -2409,11 +2401,12 @@ static void rs_clear(void *priv_rate)
2409 /* TODO - add rate scale state reset */ 2401 /* TODO - add rate scale state reset */
2410 2402
2411 IWL_DEBUG_RATE("leave\n"); 2403 IWL_DEBUG_RATE("leave\n");
2404#endif /* CONFIG_IWLWIFI_DEBUG */
2412} 2405}
2413 2406
2414static void rs_free_sta(void *priv_rate, void *priv_sta) 2407static void rs_free_sta(void *priv_rate, void *priv_sta)
2415{ 2408{
2416 struct iwl4965_lq_sta *lq_sta = priv_sta; 2409 struct iwl_lq_sta *lq_sta = priv_sta;
2417 struct iwl_priv *priv; 2410 struct iwl_priv *priv;
2418 2411
2419 priv = (struct iwl_priv *)priv_rate; 2412 priv = (struct iwl_priv *)priv_rate;
@@ -2429,8 +2422,8 @@ static int open_file_generic(struct inode *inode, struct file *file)
2429 file->private_data = inode->i_private; 2422 file->private_data = inode->i_private;
2430 return 0; 2423 return 0;
2431} 2424}
2432static void rs_dbgfs_set_mcs(struct iwl4965_lq_sta *lq_sta, 2425static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
2433 u32 *rate_n_flags, int index) 2426 u32 *rate_n_flags, int index)
2434{ 2427{
2435 struct iwl_priv *priv; 2428 struct iwl_priv *priv;
2436 2429
@@ -2453,7 +2446,7 @@ static void rs_dbgfs_set_mcs(struct iwl4965_lq_sta *lq_sta,
2453static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file, 2446static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file,
2454 const char __user *user_buf, size_t count, loff_t *ppos) 2447 const char __user *user_buf, size_t count, loff_t *ppos)
2455{ 2448{
2456 struct iwl4965_lq_sta *lq_sta = file->private_data; 2449 struct iwl_lq_sta *lq_sta = file->private_data;
2457 struct iwl_priv *priv; 2450 struct iwl_priv *priv;
2458 char buf[64]; 2451 char buf[64];
2459 int buf_size; 2452 int buf_size;
@@ -2493,7 +2486,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
2493 int desc = 0; 2486 int desc = 0;
2494 int i = 0; 2487 int i = 0;
2495 2488
2496 struct iwl4965_lq_sta *lq_sta = file->private_data; 2489 struct iwl_lq_sta *lq_sta = file->private_data;
2497 2490
2498 desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id); 2491 desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id);
2499 desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n", 2492 desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n",
@@ -2541,7 +2534,7 @@ static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file,
2541 int desc = 0; 2534 int desc = 0;
2542 int i, j; 2535 int i, j;
2543 2536
2544 struct iwl4965_lq_sta *lq_sta = file->private_data; 2537 struct iwl_lq_sta *lq_sta = file->private_data;
2545 for (i = 0; i < LQ_SIZE; i++) { 2538 for (i = 0; i < LQ_SIZE; i++) {
2546 desc += sprintf(buff+desc, "%s type=%d SGI=%d FAT=%d DUP=%d\n" 2539 desc += sprintf(buff+desc, "%s type=%d SGI=%d FAT=%d DUP=%d\n"
2547 "rate=0x%X\n", 2540 "rate=0x%X\n",
@@ -2570,7 +2563,7 @@ static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
2570static void rs_add_debugfs(void *priv, void *priv_sta, 2563static void rs_add_debugfs(void *priv, void *priv_sta,
2571 struct dentry *dir) 2564 struct dentry *dir)
2572{ 2565{
2573 struct iwl4965_lq_sta *lq_sta = priv_sta; 2566 struct iwl_lq_sta *lq_sta = priv_sta;
2574 lq_sta->rs_sta_dbgfs_scale_table_file = 2567 lq_sta->rs_sta_dbgfs_scale_table_file =
2575 debugfs_create_file("rate_scale_table", 0600, dir, 2568 debugfs_create_file("rate_scale_table", 0600, dir,
2576 lq_sta, &rs_sta_dbgfs_scale_table_ops); 2569 lq_sta, &rs_sta_dbgfs_scale_table_ops);
@@ -2585,7 +2578,7 @@ static void rs_add_debugfs(void *priv, void *priv_sta,
2585 2578
2586static void rs_remove_debugfs(void *priv, void *priv_sta) 2579static void rs_remove_debugfs(void *priv, void *priv_sta)
2587{ 2580{
2588 struct iwl4965_lq_sta *lq_sta = priv_sta; 2581 struct iwl_lq_sta *lq_sta = priv_sta;
2589 debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file); 2582 debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
2590 debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file); 2583 debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
2591 debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file); 2584 debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
@@ -2609,104 +2602,12 @@ static struct rate_control_ops rs_ops = {
2609#endif 2602#endif
2610}; 2603};
2611 2604
2612int iwl4965_fill_rs_info(struct ieee80211_hw *hw, char *buf, u8 sta_id) 2605int iwlagn_rate_control_register(void)
2613{
2614 struct ieee80211_local *local = hw_to_local(hw);
2615 struct iwl_priv *priv = hw->priv;
2616 struct iwl4965_lq_sta *lq_sta;
2617 struct sta_info *sta;
2618 int cnt = 0, i;
2619 u32 samples = 0, success = 0, good = 0;
2620 unsigned long now = jiffies;
2621 u32 max_time = 0;
2622 u8 lq_type, antenna;
2623
2624 rcu_read_lock();
2625
2626 sta = sta_info_get(local, priv->stations[sta_id].sta.sta.addr);
2627 if (!sta || !sta->rate_ctrl_priv) {
2628 if (sta)
2629 IWL_DEBUG_RATE("leave - no private rate data!\n");
2630 else
2631 IWL_DEBUG_RATE("leave - no station!\n");
2632 rcu_read_unlock();
2633 return sprintf(buf, "station %d not found\n", sta_id);
2634 }
2635
2636 lq_sta = (void *)sta->rate_ctrl_priv;
2637
2638 lq_type = lq_sta->lq_info[lq_sta->active_tbl].lq_type;
2639 antenna = lq_sta->lq_info[lq_sta->active_tbl].ant_type;
2640
2641 if (is_legacy(lq_type))
2642 i = IWL_RATE_54M_INDEX;
2643 else
2644 i = IWL_RATE_60M_INDEX;
2645 while (1) {
2646 u64 mask;
2647 int j;
2648 int active = lq_sta->active_tbl;
2649
2650 cnt +=
2651 sprintf(&buf[cnt], " %2dMbs: ", iwl_rates[i].ieee / 2);
2652
2653 mask = (1ULL << (IWL_RATE_MAX_WINDOW - 1));
2654 for (j = 0; j < IWL_RATE_MAX_WINDOW; j++, mask >>= 1)
2655 buf[cnt++] =
2656 (lq_sta->lq_info[active].win[i].data & mask)
2657 ? '1' : '0';
2658
2659 samples += lq_sta->lq_info[active].win[i].counter;
2660 good += lq_sta->lq_info[active].win[i].success_counter;
2661 success += lq_sta->lq_info[active].win[i].success_counter *
2662 iwl_rates[i].ieee;
2663
2664 if (lq_sta->lq_info[active].win[i].stamp) {
2665 int delta =
2666 jiffies_to_msecs(now -
2667 lq_sta->lq_info[active].win[i].stamp);
2668
2669 if (delta > max_time)
2670 max_time = delta;
2671
2672 cnt += sprintf(&buf[cnt], "%5dms\n", delta);
2673 } else
2674 buf[cnt++] = '\n';
2675
2676 j = iwl4965_get_prev_ieee_rate(i);
2677 if (j == i)
2678 break;
2679 i = j;
2680 }
2681
2682 /*
2683 * Display the average rate of all samples taken.
2684 * NOTE: We multiply # of samples by 2 since the IEEE measurement
2685 * added from iwl_rates is actually 2X the rate.
2686 */
2687 if (samples)
2688 cnt += sprintf(&buf[cnt],
2689 "\nAverage rate is %3d.%02dMbs over last %4dms\n"
2690 "%3d%% success (%d good packets over %d tries)\n",
2691 success / (2 * samples), (success * 5 / samples) % 10,
2692 max_time, good * 100 / samples, good, samples);
2693 else
2694 cnt += sprintf(&buf[cnt], "\nAverage rate: 0Mbs\n");
2695
2696 cnt += sprintf(&buf[cnt], "\nrate scale type %d antenna %d "
2697 "active_search %d rate index %d\n", lq_type, antenna,
2698 lq_sta->search_better_tbl, sta->last_txrate_idx);
2699
2700 rcu_read_unlock();
2701 return cnt;
2702}
2703
2704int iwl4965_rate_control_register(void)
2705{ 2606{
2706 return ieee80211_rate_control_register(&rs_ops); 2607 return ieee80211_rate_control_register(&rs_ops);
2707} 2608}
2708 2609
2709void iwl4965_rate_control_unregister(void) 2610void iwlagn_rate_control_unregister(void)
2710{ 2611{
2711 ieee80211_rate_control_unregister(&rs_ops); 2612 ieee80211_rate_control_unregister(&rs_ops);
2712} 2613}
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
index 9b9972885aa5..84d4d1e33755 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
@@ -24,8 +24,8 @@
24 * 24 *
25 *****************************************************************************/ 25 *****************************************************************************/
26 26
27#ifndef __iwl_4965_rs_h__ 27#ifndef __iwl_agn_rs_h__
28#define __iwl_4965_rs_h__ 28#define __iwl_agn_rs_h__
29 29
30#include "iwl-dev.h" 30#include "iwl-dev.h"
31 31
@@ -88,7 +88,7 @@ enum {
88#define IWL_RATE_5M_MASK (1 << IWL_RATE_5M_INDEX) 88#define IWL_RATE_5M_MASK (1 << IWL_RATE_5M_INDEX)
89#define IWL_RATE_11M_MASK (1 << IWL_RATE_11M_INDEX) 89#define IWL_RATE_11M_MASK (1 << IWL_RATE_11M_INDEX)
90 90
91/* 4965 uCode API values for legacy bit rates, both OFDM and CCK */ 91/* uCode API values for legacy bit rates, both OFDM and CCK */
92enum { 92enum {
93 IWL_RATE_6M_PLCP = 13, 93 IWL_RATE_6M_PLCP = 13,
94 IWL_RATE_9M_PLCP = 15, 94 IWL_RATE_9M_PLCP = 15,
@@ -107,7 +107,7 @@ enum {
107 /*FIXME:RS:add IWL_RATE_LEGACY_INVM_PLCP = 0,*/ 107 /*FIXME:RS:add IWL_RATE_LEGACY_INVM_PLCP = 0,*/
108}; 108};
109 109
110/* 4965 uCode API values for OFDM high-throughput (HT) bit rates */ 110/* uCode API values for OFDM high-throughput (HT) bit rates */
111enum { 111enum {
112 IWL_RATE_SISO_6M_PLCP = 0, 112 IWL_RATE_SISO_6M_PLCP = 0,
113 IWL_RATE_SISO_12M_PLCP = 1, 113 IWL_RATE_SISO_12M_PLCP = 1,
@@ -287,15 +287,6 @@ static inline u8 iwl4965_get_prev_ieee_rate(u8 rate_index)
287} 287}
288 288
289/** 289/**
290 * iwl4965_fill_rs_info - Fill an output text buffer with the rate representation
291 *
292 * NOTE: This is provided as a quick mechanism for a user to visualize
293 * the performance of the rate control algorithm and is not meant to be
294 * parsed software.
295 */
296extern int iwl4965_fill_rs_info(struct ieee80211_hw *, char *buf, u8 sta_id);
297
298/**
299 * iwl4965_rate_control_register - Register the rate control algorithm callbacks 290 * iwl4965_rate_control_register - Register the rate control algorithm callbacks
300 * 291 *
301 * Since the rate control algorithm is hardware specific, there is no need 292 * Since the rate control algorithm is hardware specific, there is no need
@@ -305,7 +296,7 @@ extern int iwl4965_fill_rs_info(struct ieee80211_hw *, char *buf, u8 sta_id);
305 * ieee80211_register_hw 296 * ieee80211_register_hw
306 * 297 *
307 */ 298 */
308extern int iwl4965_rate_control_register(void); 299extern int iwlagn_rate_control_register(void);
309 300
310/** 301/**
311 * iwl4965_rate_control_unregister - Unregister the rate control callbacks 302 * iwl4965_rate_control_unregister - Unregister the rate control callbacks
@@ -313,6 +304,6 @@ extern int iwl4965_rate_control_register(void);
313 * This should be called after calling ieee80211_unregister_hw, but before 304 * This should be called after calling ieee80211_unregister_hw, but before
314 * the driver is unloaded. 305 * the driver is unloaded.
315 */ 306 */
316extern void iwl4965_rate_control_unregister(void); 307extern void iwlagn_rate_control_unregister(void);
317 308
318#endif 309#endif /* __iwl_agn__rs__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl4965-base.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 71f5da3fe5c4..ed09e48b1b61 100644
--- a/drivers/net/wireless/iwlwifi/iwl4965-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -65,7 +65,7 @@
65 * NOTE: DRV_NAME is defined in iwlwifi.h for use by iwl-debug.h and printk 65 * NOTE: DRV_NAME is defined in iwlwifi.h for use by iwl-debug.h and printk
66 */ 66 */
67 67
68#define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link 4965AGN driver for Linux" 68#define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link AGN driver for Linux"
69 69
70#ifdef CONFIG_IWLWIFI_DEBUG 70#ifdef CONFIG_IWLWIFI_DEBUG
71#define VD "d" 71#define VD "d"
@@ -73,7 +73,7 @@
73#define VD 73#define VD
74#endif 74#endif
75 75
76#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT 76#ifdef CONFIG_IWLAGN_SPECTRUM_MEASUREMENT
77#define VS "s" 77#define VS "s"
78#else 78#else
79#define VS 79#define VS
@@ -86,6 +86,7 @@ MODULE_DESCRIPTION(DRV_DESCRIPTION);
86MODULE_VERSION(DRV_VERSION); 86MODULE_VERSION(DRV_VERSION);
87MODULE_AUTHOR(DRV_COPYRIGHT); 87MODULE_AUTHOR(DRV_COPYRIGHT);
88MODULE_LICENSE("GPL"); 88MODULE_LICENSE("GPL");
89MODULE_ALIAS("iwl4965");
89 90
90/*************** STATION TABLE MANAGEMENT **** 91/*************** STATION TABLE MANAGEMENT ****
91 * mac80211 should be examined to determine if sta_info is duplicating 92 * mac80211 should be examined to determine if sta_info is duplicating
@@ -444,11 +445,10 @@ static void iwl_free_frame(struct iwl_priv *priv, struct iwl_frame *frame)
444 list_add(&frame->list, &priv->free_frames); 445 list_add(&frame->list, &priv->free_frames);
445} 446}
446 447
447unsigned int iwl4965_fill_beacon_frame(struct iwl_priv *priv, 448static unsigned int iwl_fill_beacon_frame(struct iwl_priv *priv,
448 struct ieee80211_hdr *hdr, 449 struct ieee80211_hdr *hdr,
449 const u8 *dest, int left) 450 const u8 *dest, int left)
450{ 451{
451
452 if (!iwl_is_associated(priv) || !priv->ibss_beacon || 452 if (!iwl_is_associated(priv) || !priv->ibss_beacon ||
453 ((priv->iw_mode != IEEE80211_IF_TYPE_IBSS) && 453 ((priv->iw_mode != IEEE80211_IF_TYPE_IBSS) &&
454 (priv->iw_mode != IEEE80211_IF_TYPE_AP))) 454 (priv->iw_mode != IEEE80211_IF_TYPE_AP)))
@@ -487,6 +487,38 @@ static u8 iwl4965_rate_get_lowest_plcp(struct iwl_priv *priv)
487 return IWL_RATE_6M_PLCP; 487 return IWL_RATE_6M_PLCP;
488} 488}
489 489
490unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
491 struct iwl_frame *frame, u8 rate)
492{
493 struct iwl_tx_beacon_cmd *tx_beacon_cmd;
494 unsigned int frame_size;
495
496 tx_beacon_cmd = &frame->u.beacon;
497 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
498
499 tx_beacon_cmd->tx.sta_id = priv->hw_params.bcast_sta_id;
500 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
501
502 frame_size = iwl_fill_beacon_frame(priv, tx_beacon_cmd->frame,
503 iwl_bcast_addr,
504 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
505
506 BUG_ON(frame_size > MAX_MPDU_SIZE);
507 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
508
509 if ((rate == IWL_RATE_1M_PLCP) || (rate >= IWL_RATE_2M_PLCP))
510 tx_beacon_cmd->tx.rate_n_flags =
511 iwl_hw_set_rate_n_flags(rate, RATE_MCS_CCK_MSK);
512 else
513 tx_beacon_cmd->tx.rate_n_flags =
514 iwl_hw_set_rate_n_flags(rate, 0);
515
516 tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK |
517 TX_CMD_FLG_TSF_MSK |
518 TX_CMD_FLG_STA_RATE_MSK;
519
520 return sizeof(*tx_beacon_cmd) + frame_size;
521}
490static int iwl4965_send_beacon_cmd(struct iwl_priv *priv) 522static int iwl4965_send_beacon_cmd(struct iwl_priv *priv)
491{ 523{
492 struct iwl_frame *frame; 524 struct iwl_frame *frame;
@@ -608,7 +640,6 @@ static void iwl_activate_qos(struct iwl_priv *priv, u8 force)
608} 640}
609 641
610#define MAX_UCODE_BEACON_INTERVAL 4096 642#define MAX_UCODE_BEACON_INTERVAL 4096
611#define INTEL_CONN_LISTEN_INTERVAL __constant_cpu_to_le16(0xA)
612 643
613static __le16 iwl4965_adjust_beacon_interval(u16 beacon_val) 644static __le16 iwl4965_adjust_beacon_interval(u16 beacon_val)
614{ 645{
@@ -638,7 +669,7 @@ static void iwl4965_setup_rxon_timing(struct iwl_priv *priv)
638 priv->rxon_timing.timestamp.dw[0] = 669 priv->rxon_timing.timestamp.dw[0] =
639 cpu_to_le32(priv->timestamp & 0xFFFFFFFF); 670 cpu_to_le32(priv->timestamp & 0xFFFFFFFF);
640 671
641 priv->rxon_timing.listen_interval = INTEL_CONN_LISTEN_INTERVAL; 672 priv->rxon_timing.listen_interval = cpu_to_le16(conf->listen_interval);
642 673
643 tsf = priv->timestamp; 674 tsf = priv->timestamp;
644 675
@@ -853,7 +884,7 @@ static void iwl4965_set_rate(struct iwl_priv *priv)
853 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; 884 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
854} 885}
855 886
856#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT 887#ifdef CONFIG_IWLAGN_SPECTRUM_MEASUREMENT
857 888
858#include "iwl-spectrum.h" 889#include "iwl-spectrum.h"
859 890
@@ -1057,7 +1088,7 @@ static void iwl4965_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1057static void iwl4965_rx_spectrum_measure_notif(struct iwl_priv *priv, 1088static void iwl4965_rx_spectrum_measure_notif(struct iwl_priv *priv,
1058 struct iwl_rx_mem_buffer *rxb) 1089 struct iwl_rx_mem_buffer *rxb)
1059{ 1090{
1060#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT 1091#ifdef CONFIG_IWLAGN_SPECTRUM_MEASUREMENT
1061 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 1092 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
1062 struct iwl4965_spectrum_notification *report = &(pkt->u.spectrum_notif); 1093 struct iwl4965_spectrum_notification *report = &(pkt->u.spectrum_notif);
1063 1094
@@ -1231,6 +1262,37 @@ static void iwl4965_rx_card_state_notif(struct iwl_priv *priv,
1231 wake_up_interruptible(&priv->wait_command_queue); 1262 wake_up_interruptible(&priv->wait_command_queue);
1232} 1263}
1233 1264
1265int iwl4965_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src)
1266{
1267 int ret;
1268 unsigned long flags;
1269
1270 spin_lock_irqsave(&priv->lock, flags);
1271 ret = iwl_grab_nic_access(priv);
1272 if (ret)
1273 goto err;
1274
1275 if (src == IWL_PWR_SRC_VAUX) {
1276 u32 val;
1277 ret = pci_read_config_dword(priv->pci_dev, PCI_POWER_SOURCE,
1278 &val);
1279
1280 if (val & PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT)
1281 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
1282 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
1283 ~APMG_PS_CTRL_MSK_PWR_SRC);
1284 } else {
1285 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
1286 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
1287 ~APMG_PS_CTRL_MSK_PWR_SRC);
1288 }
1289
1290 iwl_release_nic_access(priv);
1291err:
1292 spin_unlock_irqrestore(&priv->lock, flags);
1293 return ret;
1294}
1295
1234/** 1296/**
1235 * iwl4965_setup_rx_handlers - Initialize Rx handler callbacks 1297 * iwl4965_setup_rx_handlers - Initialize Rx handler callbacks
1236 * 1298 *
@@ -2170,17 +2232,16 @@ static int __iwl4965_up(struct iwl_priv *priv)
2170 } 2232 }
2171 2233
2172 /* If platform's RF_KILL switch is NOT set to KILL */ 2234 /* If platform's RF_KILL switch is NOT set to KILL */
2173 if (iwl_read32(priv, CSR_GP_CNTRL) & 2235 if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
2174 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
2175 clear_bit(STATUS_RF_KILL_HW, &priv->status); 2236 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2176 else 2237 else
2177 set_bit(STATUS_RF_KILL_HW, &priv->status); 2238 set_bit(STATUS_RF_KILL_HW, &priv->status);
2178 2239
2179 if (!test_bit(STATUS_IN_SUSPEND, &priv->status) && 2240 if (iwl_is_rfkill(priv)) {
2180 iwl_is_rfkill(priv)) { 2241 iwl4965_enable_interrupts(priv);
2181 IWL_WARNING("Radio disabled by %s RF Kill switch\n", 2242 IWL_WARNING("Radio disabled by %s RF Kill switch\n",
2182 test_bit(STATUS_RF_KILL_HW, &priv->status) ? "HW" : "SW"); 2243 test_bit(STATUS_RF_KILL_HW, &priv->status) ? "HW" : "SW");
2183 return -ENODEV; 2244 return 0;
2184 } 2245 }
2185 2246
2186 iwl_write32(priv, CSR_INT, 0xFFFFFFFF); 2247 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
@@ -2216,11 +2277,6 @@ static int __iwl4965_up(struct iwl_priv *priv)
2216 memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr, 2277 memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
2217 priv->ucode_data.len); 2278 priv->ucode_data.len);
2218 2279
2219 /* We return success when we resume from suspend and rf_kill is on. */
2220 if (test_bit(STATUS_RF_KILL_HW, &priv->status) ||
2221 test_bit(STATUS_RF_KILL_SW, &priv->status))
2222 return 0;
2223
2224 for (i = 0; i < MAX_HW_RESTARTS; i++) { 2280 for (i = 0; i < MAX_HW_RESTARTS; i++) {
2225 2281
2226 iwl_clear_stations_table(priv); 2282 iwl_clear_stations_table(priv);
@@ -2415,7 +2471,7 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
2415 unsigned long flags; 2471 unsigned long flags;
2416 2472
2417 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { 2473 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
2418 IWL_ERROR("%s Should not be called in AP mode\n", __FUNCTION__); 2474 IWL_ERROR("%s Should not be called in AP mode\n", __func__);
2419 return; 2475 return;
2420 } 2476 }
2421 2477
@@ -2491,7 +2547,7 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
2491 2547
2492 default: 2548 default:
2493 IWL_ERROR("%s Should not be called in %d mode\n", 2549 IWL_ERROR("%s Should not be called in %d mode\n",
2494 __FUNCTION__, priv->iw_mode); 2550 __func__, priv->iw_mode);
2495 break; 2551 break;
2496 } 2552 }
2497 2553
@@ -2589,6 +2645,9 @@ static int iwl4965_mac_start(struct ieee80211_hw *hw)
2589 if (ret) 2645 if (ret)
2590 goto out_release_irq; 2646 goto out_release_irq;
2591 2647
2648 if (iwl_is_rfkill(priv))
2649 goto out;
2650
2592 IWL_DEBUG_INFO("Start UP work done.\n"); 2651 IWL_DEBUG_INFO("Start UP work done.\n");
2593 2652
2594 if (test_bit(STATUS_IN_SUSPEND, &priv->status)) 2653 if (test_bit(STATUS_IN_SUSPEND, &priv->status))
@@ -2608,6 +2667,7 @@ static int iwl4965_mac_start(struct ieee80211_hw *hw)
2608 } 2667 }
2609 } 2668 }
2610 2669
2670out:
2611 priv->is_open = 1; 2671 priv->is_open = 1;
2612 IWL_DEBUG_MAC80211("leave\n"); 2672 IWL_DEBUG_MAC80211("leave\n");
2613 return 0; 2673 return 0;
@@ -2659,7 +2719,7 @@ static int iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2659{ 2719{
2660 struct iwl_priv *priv = hw->priv; 2720 struct iwl_priv *priv = hw->priv;
2661 2721
2662 IWL_DEBUG_MAC80211("enter\n"); 2722 IWL_DEBUG_MACDUMP("enter\n");
2663 2723
2664 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) { 2724 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) {
2665 IWL_DEBUG_MAC80211("leave - monitor\n"); 2725 IWL_DEBUG_MAC80211("leave - monitor\n");
@@ -2673,7 +2733,7 @@ static int iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2673 if (iwl_tx_skb(priv, skb)) 2733 if (iwl_tx_skb(priv, skb))
2674 dev_kfree_skb_any(skb); 2734 dev_kfree_skb_any(skb);
2675 2735
2676 IWL_DEBUG_MAC80211("leave\n"); 2736 IWL_DEBUG_MACDUMP("leave\n");
2677 return 0; 2737 return 0;
2678} 2738}
2679 2739
@@ -2773,6 +2833,7 @@ static int iwl4965_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *co
2773 2833
2774 spin_lock_irqsave(&priv->lock, flags); 2834 spin_lock_irqsave(&priv->lock, flags);
2775 2835
2836
2776 /* if we are switching from ht to 2.4 clear flags 2837 /* if we are switching from ht to 2.4 clear flags
2777 * from any ht related info since 2.4 does not 2838 * from any ht related info since 2.4 does not
2778 * support ht */ 2839 * support ht */
@@ -3102,6 +3163,7 @@ static void iwl4965_bss_info_changed(struct ieee80211_hw *hw,
3102 if (bss_conf->assoc) { 3163 if (bss_conf->assoc) {
3103 priv->assoc_id = bss_conf->aid; 3164 priv->assoc_id = bss_conf->aid;
3104 priv->beacon_int = bss_conf->beacon_int; 3165 priv->beacon_int = bss_conf->beacon_int;
3166 priv->power_data.dtim_period = bss_conf->dtim_period;
3105 priv->timestamp = bss_conf->timestamp; 3167 priv->timestamp = bss_conf->timestamp;
3106 priv->assoc_capability = bss_conf->assoc_capability; 3168 priv->assoc_capability = bss_conf->assoc_capability;
3107 priv->next_scan_jiffies = jiffies + 3169 priv->next_scan_jiffies = jiffies +
@@ -3345,6 +3407,39 @@ static int iwl4965_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
3345 return 0; 3407 return 0;
3346} 3408}
3347 3409
3410static int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
3411 enum ieee80211_ampdu_mlme_action action,
3412 const u8 *addr, u16 tid, u16 *ssn)
3413{
3414 struct iwl_priv *priv = hw->priv;
3415 DECLARE_MAC_BUF(mac);
3416
3417 IWL_DEBUG_HT("A-MPDU action on addr %s tid %d\n",
3418 print_mac(mac, addr), tid);
3419
3420 if (!(priv->cfg->sku & IWL_SKU_N))
3421 return -EACCES;
3422
3423 switch (action) {
3424 case IEEE80211_AMPDU_RX_START:
3425 IWL_DEBUG_HT("start Rx\n");
3426 return iwl_rx_agg_start(priv, addr, tid, *ssn);
3427 case IEEE80211_AMPDU_RX_STOP:
3428 IWL_DEBUG_HT("stop Rx\n");
3429 return iwl_rx_agg_stop(priv, addr, tid);
3430 case IEEE80211_AMPDU_TX_START:
3431 IWL_DEBUG_HT("start Tx\n");
3432 return iwl_tx_agg_start(priv, addr, tid, ssn);
3433 case IEEE80211_AMPDU_TX_STOP:
3434 IWL_DEBUG_HT("stop Tx\n");
3435 return iwl_tx_agg_stop(priv, addr, tid);
3436 default:
3437 IWL_DEBUG_HT("unknown\n");
3438 return -EINVAL;
3439 break;
3440 }
3441 return 0;
3442}
3348static int iwl4965_mac_get_tx_stats(struct ieee80211_hw *hw, 3443static int iwl4965_mac_get_tx_stats(struct ieee80211_hw *hw,
3349 struct ieee80211_tx_queue_stats *stats) 3444 struct ieee80211_tx_queue_stats *stats)
3350{ 3445{
@@ -3592,15 +3687,6 @@ static ssize_t show_temperature(struct device *d,
3592 3687
3593static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL); 3688static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL);
3594 3689
3595static ssize_t show_rs_window(struct device *d,
3596 struct device_attribute *attr,
3597 char *buf)
3598{
3599 struct iwl_priv *priv = d->driver_data;
3600 return iwl4965_fill_rs_info(priv->hw, buf, IWL_AP_ID);
3601}
3602static DEVICE_ATTR(rs_window, S_IRUGO, show_rs_window, NULL);
3603
3604static ssize_t show_tx_power(struct device *d, 3690static ssize_t show_tx_power(struct device *d,
3605 struct device_attribute *attr, char *buf) 3691 struct device_attribute *attr, char *buf)
3606{ 3692{
@@ -3699,7 +3785,7 @@ static ssize_t store_filter_flags(struct device *d,
3699static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags, 3785static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags,
3700 store_filter_flags); 3786 store_filter_flags);
3701 3787
3702#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT 3788#ifdef CONFIG_IWLAGN_SPECTRUM_MEASUREMENT
3703 3789
3704static ssize_t show_measurement(struct device *d, 3790static ssize_t show_measurement(struct device *d,
3705 struct device_attribute *attr, char *buf) 3791 struct device_attribute *attr, char *buf)
@@ -3707,7 +3793,7 @@ static ssize_t show_measurement(struct device *d,
3707 struct iwl_priv *priv = dev_get_drvdata(d); 3793 struct iwl_priv *priv = dev_get_drvdata(d);
3708 struct iwl4965_spectrum_notification measure_report; 3794 struct iwl4965_spectrum_notification measure_report;
3709 u32 size = sizeof(measure_report), len = 0, ofs = 0; 3795 u32 size = sizeof(measure_report), len = 0, ofs = 0;
3710 u8 *data = (u8 *) & measure_report; 3796 u8 *data = (u8 *)&measure_report;
3711 unsigned long flags; 3797 unsigned long flags;
3712 3798
3713 spin_lock_irqsave(&priv->lock, flags); 3799 spin_lock_irqsave(&priv->lock, flags);
@@ -3770,7 +3856,7 @@ static ssize_t store_measurement(struct device *d,
3770 3856
3771static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR, 3857static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR,
3772 show_measurement, store_measurement); 3858 show_measurement, store_measurement);
3773#endif /* CONFIG_IWL4965_SPECTRUM_MEASUREMENT */ 3859#endif /* CONFIG_IWLAGN_SPECTRUM_MEASUREMENT */
3774 3860
3775static ssize_t store_retry_rate(struct device *d, 3861static ssize_t store_retry_rate(struct device *d,
3776 struct device_attribute *attr, 3862 struct device_attribute *attr,
@@ -3800,77 +3886,54 @@ static ssize_t store_power_level(struct device *d,
3800 const char *buf, size_t count) 3886 const char *buf, size_t count)
3801{ 3887{
3802 struct iwl_priv *priv = dev_get_drvdata(d); 3888 struct iwl_priv *priv = dev_get_drvdata(d);
3803 int rc; 3889 int ret;
3804 int mode; 3890 int mode;
3805 3891
3806 mode = simple_strtoul(buf, NULL, 0); 3892 mode = simple_strtoul(buf, NULL, 0);
3807 mutex_lock(&priv->mutex); 3893 mutex_lock(&priv->mutex);
3808 3894
3809 if (!iwl_is_ready(priv)) { 3895 if (!iwl_is_ready(priv)) {
3810 rc = -EAGAIN; 3896 ret = -EAGAIN;
3811 goto out; 3897 goto out;
3812 } 3898 }
3813 3899
3814 rc = iwl_power_set_user_mode(priv, mode); 3900 ret = iwl_power_set_user_mode(priv, mode);
3815 if (rc) { 3901 if (ret) {
3816 IWL_DEBUG_MAC80211("failed setting power mode.\n"); 3902 IWL_DEBUG_MAC80211("failed setting power mode.\n");
3817 goto out; 3903 goto out;
3818 } 3904 }
3819 rc = count; 3905 ret = count;
3820 3906
3821 out: 3907 out:
3822 mutex_unlock(&priv->mutex); 3908 mutex_unlock(&priv->mutex);
3823 return rc; 3909 return ret;
3824} 3910}
3825 3911
3826#define MAX_WX_STRING 80
3827
3828/* Values are in microsecond */
3829static const s32 timeout_duration[] = {
3830 350000,
3831 250000,
3832 75000,
3833 37000,
3834 25000,
3835};
3836static const s32 period_duration[] = {
3837 400000,
3838 700000,
3839 1000000,
3840 1000000,
3841 1000000
3842};
3843
3844static ssize_t show_power_level(struct device *d, 3912static ssize_t show_power_level(struct device *d,
3845 struct device_attribute *attr, char *buf) 3913 struct device_attribute *attr, char *buf)
3846{ 3914{
3847 struct iwl_priv *priv = dev_get_drvdata(d); 3915 struct iwl_priv *priv = dev_get_drvdata(d);
3916 int mode = priv->power_data.user_power_setting;
3917 int system = priv->power_data.system_power_setting;
3848 int level = priv->power_data.power_mode; 3918 int level = priv->power_data.power_mode;
3849 char *p = buf; 3919 char *p = buf;
3850 3920
3851 p += sprintf(p, "%d ", level); 3921 switch (system) {
3852 switch (level) { 3922 case IWL_POWER_SYS_AUTO:
3853 case IWL_POWER_MODE_CAM: 3923 p += sprintf(p, "SYSTEM:auto");
3854 case IWL_POWER_AC:
3855 p += sprintf(p, "(AC)");
3856 break; 3924 break;
3857 case IWL_POWER_BATTERY: 3925 case IWL_POWER_SYS_AC:
3858 p += sprintf(p, "(BATTERY)"); 3926 p += sprintf(p, "SYSTEM:ac");
3927 break;
3928 case IWL_POWER_SYS_BATTERY:
3929 p += sprintf(p, "SYSTEM:battery");
3859 break; 3930 break;
3860 default:
3861 p += sprintf(p,
3862 "(Timeout %dms, Period %dms)",
3863 timeout_duration[level - 1] / 1000,
3864 period_duration[level - 1] / 1000);
3865 } 3931 }
3866/* 3932
3867 if (!(priv->power_mode & IWL_POWER_ENABLED)) 3933 p += sprintf(p, "\tMODE:%s", (mode < IWL_POWER_AUTO)?"fixed":"auto");
3868 p += sprintf(p, " OFF\n"); 3934 p += sprintf(p, "\tINDEX:%d", level);
3869 else 3935 p += sprintf(p, "\n");
3870 p += sprintf(p, " \n"); 3936 return p - buf + 1;
3871*/
3872 p += sprintf(p, " \n");
3873 return (p - buf + 1);
3874} 3937}
3875 3938
3876static DEVICE_ATTR(power_level, S_IWUSR | S_IRUSR, show_power_level, 3939static DEVICE_ATTR(power_level, S_IWUSR | S_IRUSR, show_power_level,
@@ -3945,7 +4008,7 @@ static ssize_t show_statistics(struct device *d,
3945 struct iwl_priv *priv = dev_get_drvdata(d); 4008 struct iwl_priv *priv = dev_get_drvdata(d);
3946 u32 size = sizeof(struct iwl_notif_statistics); 4009 u32 size = sizeof(struct iwl_notif_statistics);
3947 u32 len = 0, ofs = 0; 4010 u32 len = 0, ofs = 0;
3948 u8 *data = (u8 *) & priv->statistics; 4011 u8 *data = (u8 *)&priv->statistics;
3949 int rc = 0; 4012 int rc = 0;
3950 4013
3951 if (!iwl_is_alive(priv)) 4014 if (!iwl_is_alive(priv))
@@ -4041,12 +4104,11 @@ static struct attribute *iwl4965_sysfs_entries[] = {
4041 &dev_attr_channels.attr, 4104 &dev_attr_channels.attr,
4042 &dev_attr_flags.attr, 4105 &dev_attr_flags.attr,
4043 &dev_attr_filter_flags.attr, 4106 &dev_attr_filter_flags.attr,
4044#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT 4107#ifdef CONFIG_IWLAGN_SPECTRUM_MEASUREMENT
4045 &dev_attr_measurement.attr, 4108 &dev_attr_measurement.attr,
4046#endif 4109#endif
4047 &dev_attr_power_level.attr, 4110 &dev_attr_power_level.attr,
4048 &dev_attr_retry_rate.attr, 4111 &dev_attr_retry_rate.attr,
4049 &dev_attr_rs_window.attr,
4050 &dev_attr_statistics.attr, 4112 &dev_attr_statistics.attr,
4051 &dev_attr_status.attr, 4113 &dev_attr_status.attr,
4052 &dev_attr_temperature.attr, 4114 &dev_attr_temperature.attr,
@@ -4394,8 +4456,10 @@ static int iwl4965_pci_resume(struct pci_dev *pdev)
4394 4456
4395/* Hardware specific file defines the PCI IDs table for that hardware module */ 4457/* Hardware specific file defines the PCI IDs table for that hardware module */
4396static struct pci_device_id iwl_hw_card_ids[] = { 4458static struct pci_device_id iwl_hw_card_ids[] = {
4459#ifdef CONFIG_IWL4965
4397 {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_agn_cfg)}, 4460 {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_agn_cfg)},
4398 {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_agn_cfg)}, 4461 {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_agn_cfg)},
4462#endif /* CONFIG_IWL4965 */
4399#ifdef CONFIG_IWL5000 4463#ifdef CONFIG_IWL5000
4400 {IWL_PCI_DEVICE(0x4232, 0x1205, iwl5100_bg_cfg)}, 4464 {IWL_PCI_DEVICE(0x4232, 0x1205, iwl5100_bg_cfg)},
4401 {IWL_PCI_DEVICE(0x4232, 0x1305, iwl5100_bg_cfg)}, 4465 {IWL_PCI_DEVICE(0x4232, 0x1305, iwl5100_bg_cfg)},
@@ -4431,7 +4495,7 @@ static int __init iwl4965_init(void)
4431 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n"); 4495 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
4432 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n"); 4496 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
4433 4497
4434 ret = iwl4965_rate_control_register(); 4498 ret = iwlagn_rate_control_register();
4435 if (ret) { 4499 if (ret) {
4436 IWL_ERROR("Unable to register rate control algorithm: %d\n", ret); 4500 IWL_ERROR("Unable to register rate control algorithm: %d\n", ret);
4437 return ret; 4501 return ret;
@@ -4446,14 +4510,14 @@ static int __init iwl4965_init(void)
4446 return ret; 4510 return ret;
4447 4511
4448error_register: 4512error_register:
4449 iwl4965_rate_control_unregister(); 4513 iwlagn_rate_control_unregister();
4450 return ret; 4514 return ret;
4451} 4515}
4452 4516
4453static void __exit iwl4965_exit(void) 4517static void __exit iwl4965_exit(void)
4454{ 4518{
4455 pci_unregister_driver(&iwl_driver); 4519 pci_unregister_driver(&iwl_driver);
4456 iwl4965_rate_control_unregister(); 4520 iwlagn_rate_control_unregister();
4457} 4521}
4458 4522
4459module_exit(iwl4965_exit); 4523module_exit(iwl4965_exit);
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index e9bb1de0ce3f..28b5b09996ed 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -666,8 +666,7 @@ struct iwl4965_rxon_assoc_cmd {
666 __le16 reserved; 666 __le16 reserved;
667} __attribute__ ((packed)); 667} __attribute__ ((packed));
668 668
669 669#define IWL_CONN_MAX_LISTEN_INTERVAL 10
670
671 670
672/* 671/*
673 * REPLY_RXON_TIMING = 0x14 (command, has simple generic response) 672 * REPLY_RXON_TIMING = 0x14 (command, has simple generic response)
@@ -1076,10 +1075,12 @@ struct iwl4965_rx_frame {
1076} __attribute__ ((packed)); 1075} __attribute__ ((packed));
1077 1076
1078/* Fixed (non-configurable) rx data from phy */ 1077/* Fixed (non-configurable) rx data from phy */
1079#define RX_PHY_FLAGS_ANTENNAE_OFFSET (4) 1078
1080#define RX_PHY_FLAGS_ANTENNAE_MASK (0x70) 1079#define IWL49_RX_RES_PHY_CNT 14
1081#define IWL_AGC_DB_MASK (0x3f80) /* MASK(7,13) */ 1080#define IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET (4)
1082#define IWL_AGC_DB_POS (7) 1081#define IWL49_RX_PHY_FLAGS_ANTENNAE_MASK (0x70)
1082#define IWL49_AGC_DB_MASK (0x3f80) /* MASK(7,13) */
1083#define IWL49_AGC_DB_POS (7)
1083struct iwl4965_rx_non_cfg_phy { 1084struct iwl4965_rx_non_cfg_phy {
1084 __le16 ant_selection; /* ant A bit 4, ant B bit 5, ant C bit 6 */ 1085 __le16 ant_selection; /* ant A bit 4, ant B bit 5, ant C bit 6 */
1085 __le16 agc_info; /* agc code 0:6, agc dB 7:13, reserved 14:15 */ 1086 __le16 agc_info; /* agc code 0:6, agc dB 7:13, reserved 14:15 */
@@ -1087,12 +1088,30 @@ struct iwl4965_rx_non_cfg_phy {
1087 u8 pad[0]; 1088 u8 pad[0];
1088} __attribute__ ((packed)); 1089} __attribute__ ((packed));
1089 1090
1091
1092#define IWL50_RX_RES_PHY_CNT 8
1093#define IWL50_RX_RES_AGC_IDX 1
1094#define IWL50_RX_RES_RSSI_AB_IDX 2
1095#define IWL50_RX_RES_RSSI_C_IDX 3
1096#define IWL50_OFDM_AGC_MSK 0xfe00
1097#define IWL50_OFDM_AGC_BIT_POS 9
1098#define IWL50_OFDM_RSSI_A_MSK 0x00ff
1099#define IWL50_OFDM_RSSI_A_BIT_POS 0
1100#define IWL50_OFDM_RSSI_B_MSK 0xff0000
1101#define IWL50_OFDM_RSSI_B_BIT_POS 16
1102#define IWL50_OFDM_RSSI_C_MSK 0x00ff
1103#define IWL50_OFDM_RSSI_C_BIT_POS 0
1104
1105struct iwl5000_non_cfg_phy {
1106 __le32 non_cfg_phy[IWL50_RX_RES_PHY_CNT]; /* upto 8 phy entries */
1107} __attribute__ ((packed));
1108
1109
1090/* 1110/*
1091 * REPLY_RX = 0xc3 (response only, not a command) 1111 * REPLY_RX = 0xc3 (response only, not a command)
1092 * Used only for legacy (non 11n) frames. 1112 * Used only for legacy (non 11n) frames.
1093 */ 1113 */
1094#define RX_RES_PHY_CNT 14 1114struct iwl_rx_phy_res {
1095struct iwl4965_rx_phy_res {
1096 u8 non_cfg_phy_cnt; /* non configurable DSP phy data byte count */ 1115 u8 non_cfg_phy_cnt; /* non configurable DSP phy data byte count */
1097 u8 cfg_phy_cnt; /* configurable DSP phy data byte count */ 1116 u8 cfg_phy_cnt; /* configurable DSP phy data byte count */
1098 u8 stat_id; /* configurable DSP phy data set ID */ 1117 u8 stat_id; /* configurable DSP phy data set ID */
@@ -1101,8 +1120,7 @@ struct iwl4965_rx_phy_res {
1101 __le32 beacon_time_stamp; /* beacon at on-air rise */ 1120 __le32 beacon_time_stamp; /* beacon at on-air rise */
1102 __le16 phy_flags; /* general phy flags: band, modulation, ... */ 1121 __le16 phy_flags; /* general phy flags: band, modulation, ... */
1103 __le16 channel; /* channel number */ 1122 __le16 channel; /* channel number */
1104 __le16 non_cfg_phy[RX_RES_PHY_CNT]; /* upto 14 phy entries */ 1123 u8 non_cfg_phy_buf[32]; /* for various implementations of non_cfg_phy */
1105 __le32 reserved2;
1106 __le32 rate_n_flags; /* RATE_MCS_* */ 1124 __le32 rate_n_flags; /* RATE_MCS_* */
1107 __le16 byte_count; /* frame's byte-count */ 1125 __le16 byte_count; /* frame's byte-count */
1108 __le16 reserved3; 1126 __le16 reserved3;
@@ -1993,7 +2011,7 @@ struct iwl4965_spectrum_notification {
1993 *****************************************************************************/ 2011 *****************************************************************************/
1994 2012
1995/** 2013/**
1996 * struct iwl4965_powertable_cmd - Power Table Command 2014 * struct iwl_powertable_cmd - Power Table Command
1997 * @flags: See below: 2015 * @flags: See below:
1998 * 2016 *
1999 * POWER_TABLE_CMD = 0x77 (command, has simple generic response) 2017 * POWER_TABLE_CMD = 0x77 (command, has simple generic response)
@@ -2027,7 +2045,7 @@ struct iwl4965_spectrum_notification {
2027#define IWL_POWER_PCI_PM_MSK __constant_cpu_to_le16(1 << 3) 2045#define IWL_POWER_PCI_PM_MSK __constant_cpu_to_le16(1 << 3)
2028#define IWL_POWER_FAST_PD __constant_cpu_to_le16(1 << 4) 2046#define IWL_POWER_FAST_PD __constant_cpu_to_le16(1 << 4)
2029 2047
2030struct iwl4965_powertable_cmd { 2048struct iwl_powertable_cmd {
2031 __le16 flags; 2049 __le16 flags;
2032 u8 keep_alive_seconds; 2050 u8 keep_alive_seconds;
2033 u8 debug_flags; 2051 u8 debug_flags;
@@ -2324,7 +2342,7 @@ struct iwl4965_beacon_notif {
2324/* 2342/*
2325 * REPLY_TX_BEACON = 0x91 (command, has simple generic response) 2343 * REPLY_TX_BEACON = 0x91 (command, has simple generic response)
2326 */ 2344 */
2327struct iwl4965_tx_beacon_cmd { 2345struct iwl_tx_beacon_cmd {
2328 struct iwl_tx_cmd tx; 2346 struct iwl_tx_cmd tx;
2329 __le16 tim_idx; 2347 __le16 tim_idx;
2330 u8 tim_size; 2348 u8 tim_size;
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index a44188bf4459..9bd61809129f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -383,8 +383,8 @@ void iwl_reset_qos(struct iwl_priv *priv)
383} 383}
384EXPORT_SYMBOL(iwl_reset_qos); 384EXPORT_SYMBOL(iwl_reset_qos);
385 385
386#define MAX_BIT_RATE_40_MHZ 0x96; /* 150 Mbps */ 386#define MAX_BIT_RATE_40_MHZ 0x96 /* 150 Mbps */
387#define MAX_BIT_RATE_20_MHZ 0x48; /* 72 Mbps */ 387#define MAX_BIT_RATE_20_MHZ 0x48 /* 72 Mbps */
388static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv, 388static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
389 struct ieee80211_ht_info *ht_info, 389 struct ieee80211_ht_info *ht_info,
390 enum ieee80211_band band) 390 enum ieee80211_band band)
@@ -815,11 +815,10 @@ int iwl_setup_mac(struct iwl_priv *priv)
815{ 815{
816 int ret; 816 int ret;
817 struct ieee80211_hw *hw = priv->hw; 817 struct ieee80211_hw *hw = priv->hw;
818 hw->rate_control_algorithm = "iwl-4965-rs"; 818 hw->rate_control_algorithm = "iwl-agn-rs";
819 819
820 /* Tell mac80211 our characteristics */ 820 /* Tell mac80211 our characteristics */
821 hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE | 821 hw->flags = IEEE80211_HW_SIGNAL_DBM |
822 IEEE80211_HW_SIGNAL_DBM |
823 IEEE80211_HW_NOISE_DBM; 822 IEEE80211_HW_NOISE_DBM;
824 /* Default value; 4 EDCA QOS priorities */ 823 /* Default value; 4 EDCA QOS priorities */
825 hw->queues = 4; 824 hw->queues = 4;
@@ -828,6 +827,7 @@ int iwl_setup_mac(struct iwl_priv *priv)
828 hw->ampdu_queues = priv->cfg->mod_params->num_of_ampdu_queues; 827 hw->ampdu_queues = priv->cfg->mod_params->num_of_ampdu_queues;
829 828
830 hw->conf.beacon_int = 100; 829 hw->conf.beacon_int = 100;
830 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
831 831
832 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels) 832 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
833 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = 833 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index db66114f1e56..64f139e97444 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -95,6 +95,8 @@ struct iwl_hcmd_utils_ops {
95 void (*chain_noise_reset)(struct iwl_priv *priv); 95 void (*chain_noise_reset)(struct iwl_priv *priv);
96 void (*rts_tx_cmd_flag)(struct ieee80211_tx_info *info, 96 void (*rts_tx_cmd_flag)(struct ieee80211_tx_info *info,
97 __le32 *tx_flags); 97 __le32 *tx_flags);
98 int (*calc_rssi)(struct iwl_priv *priv,
99 struct iwl_rx_phy_res *rx_resp);
98}; 100};
99 101
100struct iwl_lib_ops { 102struct iwl_lib_ops {
@@ -139,7 +141,6 @@ struct iwl_lib_ops {
139 int (*set_pwr_src)(struct iwl_priv *priv, enum iwl_pwr_src src); 141 int (*set_pwr_src)(struct iwl_priv *priv, enum iwl_pwr_src src);
140 } apm_ops; 142 } apm_ops;
141 /* power */ 143 /* power */
142 int (*set_power)(struct iwl_priv *priv, void *cmd);
143 int (*send_tx_power) (struct iwl_priv *priv); 144 int (*send_tx_power) (struct iwl_priv *priv);
144 void (*update_chain_flags)(struct iwl_priv *priv); 145 void (*update_chain_flags)(struct iwl_priv *priv);
145 void (*temperature) (struct iwl_priv *priv); 146 void (*temperature) (struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 545ed692d889..52629fbd835a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -104,6 +104,7 @@
104 * 3-2: 0 = A, 1 = B, 2 = C, 3 = D step 104 * 3-2: 0 = A, 1 = B, 2 = C, 3 = D step
105 */ 105 */
106#define CSR_HW_REV_WA_REG (CSR_BASE+0x22C) 106#define CSR_HW_REV_WA_REG (CSR_BASE+0x22C)
107#define CSR_DBG_HPET_MEM_REG (CSR_BASE+0x240)
107 108
108/* Bits for CSR_HW_IF_CONFIG_REG */ 109/* Bits for CSR_HW_IF_CONFIG_REG */
109#define CSR49_HW_IF_CONFIG_REG_BIT_4965_R (0x00000010) 110#define CSR49_HW_IF_CONFIG_REG_BIT_4965_R (0x00000010)
@@ -118,7 +119,12 @@
118#define CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A (0x00000000) 119#define CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A (0x00000000)
119#define CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B (0x00001000) 120#define CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B (0x00001000)
120 121
121#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000) 122#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000)
123#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000)
124#define CSR_HW_IF_CONFIG_REG_BIT_PCI_OWN_SEM (0x00400000)
125#define CSR_HW_IF_CONFIG_REG_BIT_ME_OWN (0x02000000)
126#define CSR_HW_IF_CONFIG_REG_BIT_WAKE_ME (0x08000000)
127
122 128
123/* interrupt flags in INTA, set by uCode or hardware (e.g. dma), 129/* interrupt flags in INTA, set by uCode or hardware (e.g. dma),
124 * acknowledged (reset) by host writing "1" to flagged bits. */ 130 * acknowledged (reset) by host writing "1" to flagged bits. */
@@ -236,6 +242,8 @@
236#define CSR39_ANA_PLL_CFG_VAL (0x01000000) 242#define CSR39_ANA_PLL_CFG_VAL (0x01000000)
237#define CSR50_ANA_PLL_CFG_VAL (0x00880300) 243#define CSR50_ANA_PLL_CFG_VAL (0x00880300)
238 244
245/* HPET MEM debug */
246#define CSR_DBG_HPET_MEM_REG_VAL (0xFFFF0000)
239/*=== HBUS (Host-side Bus) ===*/ 247/*=== HBUS (Host-side Bus) ===*/
240#define HBUS_BASE (0x400) 248#define HBUS_BASE (0x400)
241/* 249/*
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index 58384805a494..d2daa174df22 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -33,12 +33,12 @@
33#define IWL_DEBUG(level, fmt, args...) \ 33#define IWL_DEBUG(level, fmt, args...) \
34do { if (priv->debug_level & (level)) \ 34do { if (priv->debug_level & (level)) \
35 dev_printk(KERN_ERR, &(priv->hw->wiphy->dev), "%c %s " fmt, \ 35 dev_printk(KERN_ERR, &(priv->hw->wiphy->dev), "%c %s " fmt, \
36 in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0) 36 in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0)
37 37
38#define IWL_DEBUG_LIMIT(level, fmt, args...) \ 38#define IWL_DEBUG_LIMIT(level, fmt, args...) \
39do { if ((priv->debug_level & (level)) && net_ratelimit()) \ 39do { if ((priv->debug_level & (level)) && net_ratelimit()) \
40 dev_printk(KERN_ERR, &(priv->hw->wiphy->dev), "%c %s " fmt, \ 40 dev_printk(KERN_ERR, &(priv->hw->wiphy->dev), "%c %s " fmt, \
41 in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0) 41 in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0)
42 42
43#ifdef CONFIG_IWLWIFI_DEBUGFS 43#ifdef CONFIG_IWLWIFI_DEBUGFS
44struct iwl_debugfs { 44struct iwl_debugfs {
@@ -68,12 +68,8 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv);
68#endif 68#endif
69 69
70#else 70#else
71static inline void IWL_DEBUG(int level, const char *fmt, ...) 71#define IWL_DEBUG(level, fmt, args...)
72{ 72#define IWL_DEBUG_LIMIT(level, fmt, args...)
73}
74static inline void IWL_DEBUG_LIMIT(int level, const char *fmt, ...)
75{
76}
77#endif /* CONFIG_IWLWIFI_DEBUG */ 73#endif /* CONFIG_IWLWIFI_DEBUG */
78 74
79 75
@@ -118,7 +114,7 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
118#define IWL_DL_MAC80211 (1 << 1) 114#define IWL_DL_MAC80211 (1 << 1)
119#define IWL_DL_HOST_COMMAND (1 << 2) 115#define IWL_DL_HOST_COMMAND (1 << 2)
120#define IWL_DL_STATE (1 << 3) 116#define IWL_DL_STATE (1 << 3)
121 117#define IWL_DL_MACDUMP (1 << 4)
122#define IWL_DL_RADIO (1 << 7) 118#define IWL_DL_RADIO (1 << 7)
123#define IWL_DL_POWER (1 << 8) 119#define IWL_DL_POWER (1 << 8)
124#define IWL_DL_TEMP (1 << 9) 120#define IWL_DL_TEMP (1 << 9)
@@ -158,6 +154,7 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
158#define IWL_DEBUG_INFO(f, a...) IWL_DEBUG(IWL_DL_INFO, f, ## a) 154#define IWL_DEBUG_INFO(f, a...) IWL_DEBUG(IWL_DL_INFO, f, ## a)
159 155
160#define IWL_DEBUG_MAC80211(f, a...) IWL_DEBUG(IWL_DL_MAC80211, f, ## a) 156#define IWL_DEBUG_MAC80211(f, a...) IWL_DEBUG(IWL_DL_MAC80211, f, ## a)
157#define IWL_DEBUG_MACDUMP(f, a...) IWL_DEBUG(IWL_DL_MACDUMP, f, ## a)
161#define IWL_DEBUG_TEMP(f, a...) IWL_DEBUG(IWL_DL_TEMP, f, ## a) 158#define IWL_DEBUG_TEMP(f, a...) IWL_DEBUG(IWL_DL_TEMP, f, ## a)
162#define IWL_DEBUG_SCAN(f, a...) IWL_DEBUG(IWL_DL_SCAN, f, ## a) 159#define IWL_DEBUG_SCAN(f, a...) IWL_DEBUG(IWL_DL_SCAN, f, ## a)
163#define IWL_DEBUG_RX(f, a...) IWL_DEBUG(IWL_DL_RX, f, ## a) 160#define IWL_DEBUG_RX(f, a...) IWL_DEBUG(IWL_DL_RX, f, ## a)
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index ed948dc59b3d..20db0eb636a8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -231,7 +231,7 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
231 DECLARE_MAC_BUF(mac); 231 DECLARE_MAC_BUF(mac);
232 232
233 buf = kmalloc(bufsz, GFP_KERNEL); 233 buf = kmalloc(bufsz, GFP_KERNEL);
234 if(!buf) 234 if (!buf)
235 return -ENOMEM; 235 return -ENOMEM;
236 236
237 pos += scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n", 237 pos += scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n",
@@ -364,16 +364,19 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
364{ 364{
365 struct iwl_debugfs *dbgfs; 365 struct iwl_debugfs *dbgfs;
366 struct dentry *phyd = priv->hw->wiphy->debugfsdir; 366 struct dentry *phyd = priv->hw->wiphy->debugfsdir;
367 int ret = 0;
367 368
368 dbgfs = kzalloc(sizeof(struct iwl_debugfs), GFP_KERNEL); 369 dbgfs = kzalloc(sizeof(struct iwl_debugfs), GFP_KERNEL);
369 if (!dbgfs) { 370 if (!dbgfs) {
371 ret = -ENOMEM;
370 goto err; 372 goto err;
371 } 373 }
372 374
373 priv->dbgfs = dbgfs; 375 priv->dbgfs = dbgfs;
374 dbgfs->name = name; 376 dbgfs->name = name;
375 dbgfs->dir_drv = debugfs_create_dir(name, phyd); 377 dbgfs->dir_drv = debugfs_create_dir(name, phyd);
376 if (!dbgfs->dir_drv || IS_ERR(dbgfs->dir_drv)){ 378 if (!dbgfs->dir_drv || IS_ERR(dbgfs->dir_drv)) {
379 ret = -ENOENT;
377 goto err; 380 goto err;
378 } 381 }
379 382
@@ -394,7 +397,7 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
394err: 397err:
395 IWL_ERROR("Can't open the debugfs directory\n"); 398 IWL_ERROR("Can't open the debugfs directory\n");
396 iwl_dbgfs_unregister(priv); 399 iwl_dbgfs_unregister(priv);
397 return -ENOENT; 400 return ret;
398} 401}
399EXPORT_SYMBOL(iwl_dbgfs_register); 402EXPORT_SYMBOL(iwl_dbgfs_register);
400 403
@@ -404,7 +407,7 @@ EXPORT_SYMBOL(iwl_dbgfs_register);
404 */ 407 */
405void iwl_dbgfs_unregister(struct iwl_priv *priv) 408void iwl_dbgfs_unregister(struct iwl_priv *priv)
406{ 409{
407 if (!(priv->dbgfs)) 410 if (!priv->dbgfs)
408 return; 411 return;
409 412
410 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_eeprom); 413 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_eeprom);
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 4d789e353e3a..c19db438306c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -36,7 +36,7 @@
36#include <linux/kernel.h> 36#include <linux/kernel.h>
37#include <net/ieee80211_radiotap.h> 37#include <net/ieee80211_radiotap.h>
38 38
39#define DRV_NAME "iwl4965" 39#define DRV_NAME "iwlagn"
40#include "iwl-rfkill.h" 40#include "iwl-rfkill.h"
41#include "iwl-eeprom.h" 41#include "iwl-eeprom.h"
42#include "iwl-4965-hw.h" 42#include "iwl-4965-hw.h"
@@ -45,6 +45,7 @@
45#include "iwl-debug.h" 45#include "iwl-debug.h"
46#include "iwl-led.h" 46#include "iwl-led.h"
47#include "iwl-power.h" 47#include "iwl-power.h"
48#include "iwl-agn-rs.h"
48 49
49/* configuration for the iwl4965 */ 50/* configuration for the iwl4965 */
50extern struct iwl_cfg iwl4965_agn_cfg; 51extern struct iwl_cfg iwl4965_agn_cfg;
@@ -134,8 +135,7 @@ struct iwl_tx_info {
134struct iwl_tx_queue { 135struct iwl_tx_queue {
135 struct iwl_queue q; 136 struct iwl_queue q;
136 struct iwl_tfd_frame *bd; 137 struct iwl_tfd_frame *bd;
137 struct iwl_cmd *cmd; 138 struct iwl_cmd *cmd[TFD_TX_CMD_SLOTS];
138 dma_addr_t dma_addr_cmd;
139 struct iwl_tx_info *txb; 139 struct iwl_tx_info *txb;
140 int need_update; 140 int need_update;
141 int sched_retry; 141 int sched_retry;
@@ -191,7 +191,6 @@ struct iwl4965_clip_group {
191 const s8 clip_powers[IWL_MAX_RATES]; 191 const s8 clip_powers[IWL_MAX_RATES];
192}; 192};
193 193
194#include "iwl-4965-rs.h"
195 194
196#define IWL_TX_FIFO_AC0 0 195#define IWL_TX_FIFO_AC0 0
197#define IWL_TX_FIFO_AC1 1 196#define IWL_TX_FIFO_AC1 1
@@ -219,7 +218,7 @@ enum iwl_pwr_src {
219struct iwl_frame { 218struct iwl_frame {
220 union { 219 union {
221 struct ieee80211_hdr frame; 220 struct ieee80211_hdr frame;
222 struct iwl4965_tx_beacon_cmd beacon; 221 struct iwl_tx_beacon_cmd beacon;
223 u8 raw[IEEE80211_FRAME_LEN]; 222 u8 raw[IEEE80211_FRAME_LEN];
224 u8 cmd[360]; 223 u8 cmd[360];
225 } u; 224 } u;
@@ -283,10 +282,9 @@ struct iwl_cmd {
283 u32 val32; 282 u32 val32;
284 struct iwl4965_bt_cmd bt; 283 struct iwl4965_bt_cmd bt;
285 struct iwl4965_rxon_time_cmd rxon_time; 284 struct iwl4965_rxon_time_cmd rxon_time;
286 struct iwl4965_powertable_cmd powertable; 285 struct iwl_powertable_cmd powertable;
287 struct iwl_qosparam_cmd qosparam; 286 struct iwl_qosparam_cmd qosparam;
288 struct iwl_tx_cmd tx; 287 struct iwl_tx_cmd tx;
289 struct iwl4965_tx_beacon_cmd tx_beacon;
290 struct iwl4965_rxon_assoc_cmd rxon_assoc; 288 struct iwl4965_rxon_assoc_cmd rxon_assoc;
291 struct iwl_rem_sta_cmd rm_sta; 289 struct iwl_rem_sta_cmd rm_sta;
292 u8 *indirect; 290 u8 *indirect;
@@ -590,6 +588,7 @@ extern unsigned int iwl4965_fill_beacon_frame(struct iwl_priv *priv,
590 const u8 *dest, int left); 588 const u8 *dest, int left);
591extern void iwl4965_update_chain_flags(struct iwl_priv *priv); 589extern void iwl4965_update_chain_flags(struct iwl_priv *priv);
592int iwl4965_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src); 590int iwl4965_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src);
591extern int iwl4965_set_power(struct iwl_priv *priv, void *cmd);
593 592
594extern const u8 iwl_bcast_addr[ETH_ALEN]; 593extern const u8 iwl_bcast_addr[ETH_ALEN];
595 594
@@ -642,10 +641,6 @@ struct iwl_priv;
642 * Forward declare iwl-4965.c functions for iwl-base.c 641 * Forward declare iwl-4965.c functions for iwl-base.c
643 */ 642 */
644extern void iwl4965_rf_kill_ct_config(struct iwl_priv *priv); 643extern void iwl4965_rf_kill_ct_config(struct iwl_priv *priv);
645
646int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
647 enum ieee80211_ampdu_mlme_action action,
648 const u8 *addr, u16 tid, u16 *ssn);
649int iwl4965_check_empty_hw_queue(struct iwl_priv *priv, int sta_id, 644int iwl4965_check_empty_hw_queue(struct iwl_priv *priv, int sta_id,
650 u8 tid, int txq_id); 645 u8 tid, int txq_id);
651 646
@@ -812,14 +807,11 @@ struct iwl_chain_noise_data {
812#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */ 807#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
813 808
814 809
815#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
816
817enum { 810enum {
818 MEASUREMENT_READY = (1 << 0), 811 MEASUREMENT_READY = (1 << 0),
819 MEASUREMENT_ACTIVE = (1 << 1), 812 MEASUREMENT_ACTIVE = (1 << 1),
820}; 813};
821 814
822#endif
823 815
824#define IWL_MAX_NUM_QUEUES 20 /* FIXME: do dynamic allocation */ 816#define IWL_MAX_NUM_QUEUES 20 /* FIXME: do dynamic allocation */
825 817
@@ -844,7 +836,7 @@ struct iwl_priv {
844 836
845 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS]; 837 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
846 838
847#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT 839#ifdef CONFIG_IWLAGN_SPECTRUM_MEASUREMENT
848 /* spectrum measurement report caching */ 840 /* spectrum measurement report caching */
849 struct iwl4965_spectrum_notification measure_report; 841 struct iwl4965_spectrum_notification measure_report;
850 u8 measurement_status; 842 u8 measurement_status;
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index 4a08a1b50979..bce53830b301 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -273,8 +273,7 @@ EXPORT_SYMBOL(iwl_eeprom_init);
273 273
274void iwl_eeprom_free(struct iwl_priv *priv) 274void iwl_eeprom_free(struct iwl_priv *priv)
275{ 275{
276 if(priv->eeprom) 276 kfree(priv->eeprom);
277 kfree(priv->eeprom);
278 priv->eeprom = NULL; 277 priv->eeprom = NULL;
279} 278}
280EXPORT_SYMBOL(iwl_eeprom_free); 279EXPORT_SYMBOL(iwl_eeprom_free);
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index 8fa991b7202a..6512834bb916 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -228,7 +228,7 @@ cancel:
228 * TX cmd queue. Otherwise in case the cmd comes 228 * TX cmd queue. Otherwise in case the cmd comes
229 * in later, it will possibly set an invalid 229 * in later, it will possibly set an invalid
230 * address (cmd->meta.source). */ 230 * address (cmd->meta.source). */
231 qcmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_idx]; 231 qcmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_idx];
232 qcmd->meta.flags &= ~CMD_WANT_SKB; 232 qcmd->meta.flags &= ~CMD_WANT_SKB;
233 } 233 }
234fail: 234fail:
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index 899d7a2567a8..cb11c4a4d691 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -161,12 +161,32 @@ int iwl4965_led_off(struct iwl_priv *priv, int led_id)
161/* Set led register off */ 161/* Set led register off */
162static int iwl4965_led_off_reg(struct iwl_priv *priv, int led_id) 162static int iwl4965_led_off_reg(struct iwl_priv *priv, int led_id)
163{ 163{
164 IWL_DEBUG_LED("radio off\n"); 164 IWL_DEBUG_LED("LED Reg off\n");
165 iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_OFF); 165 iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_OFF);
166 return 0; 166 return 0;
167} 167}
168 168
169/* 169/*
170 * Set led register in case of disassociation according to rfkill state
171 */
172static int iwl_led_associate(struct iwl_priv *priv, int led_id)
173{
174 IWL_DEBUG_LED("Associated\n");
175 priv->allow_blinking = 1;
176 return iwl4965_led_on_reg(priv, led_id);
177}
178static int iwl_led_disassociate(struct iwl_priv *priv, int led_id)
179{
180 priv->allow_blinking = 0;
181 if (iwl_is_rfkill(priv))
182 iwl4965_led_off_reg(priv, led_id);
183 else
184 iwl4965_led_on_reg(priv, led_id);
185
186 return 0;
187}
188
189/*
170 * brightness call back function for Tx/Rx LED 190 * brightness call back function for Tx/Rx LED
171 */ 191 */
172static int iwl_led_associated(struct iwl_priv *priv, int led_id) 192static int iwl_led_associated(struct iwl_priv *priv, int led_id)
@@ -199,16 +219,10 @@ static void iwl_led_brightness_set(struct led_classdev *led_cdev,
199 led_type_str[led->type], brightness); 219 led_type_str[led->type], brightness);
200 switch (brightness) { 220 switch (brightness) {
201 case LED_FULL: 221 case LED_FULL:
202 if (led->type == IWL_LED_TRG_ASSOC)
203 priv->allow_blinking = 1;
204
205 if (led->led_on) 222 if (led->led_on)
206 led->led_on(priv, IWL_LED_LINK); 223 led->led_on(priv, IWL_LED_LINK);
207 break; 224 break;
208 case LED_OFF: 225 case LED_OFF:
209 if (led->type == IWL_LED_TRG_ASSOC)
210 priv->allow_blinking = 0;
211
212 if (led->led_off) 226 if (led->led_off)
213 led->led_off(priv, IWL_LED_LINK); 227 led->led_off(priv, IWL_LED_LINK);
214 break; 228 break;
@@ -228,12 +242,12 @@ static void iwl_led_brightness_set(struct led_classdev *led_cdev,
228 */ 242 */
229static int iwl_leds_register_led(struct iwl_priv *priv, struct iwl_led *led, 243static int iwl_leds_register_led(struct iwl_priv *priv, struct iwl_led *led,
230 enum led_type type, u8 set_led, 244 enum led_type type, u8 set_led,
231 const char *name, char *trigger) 245 char *trigger)
232{ 246{
233 struct device *device = wiphy_dev(priv->hw->wiphy); 247 struct device *device = wiphy_dev(priv->hw->wiphy);
234 int ret; 248 int ret;
235 249
236 led->led_dev.name = name; 250 led->led_dev.name = led->name;
237 led->led_dev.brightness_set = iwl_led_brightness_set; 251 led->led_dev.brightness_set = iwl_led_brightness_set;
238 led->led_dev.default_trigger = trigger; 252 led->led_dev.default_trigger = trigger;
239 253
@@ -268,7 +282,9 @@ static int iwl_get_blink_rate(struct iwl_priv *priv)
268 if (tpt < 0) /* wrapparound */ 282 if (tpt < 0) /* wrapparound */
269 tpt = -tpt; 283 tpt = -tpt;
270 284
271 IWL_DEBUG_LED("tpt %lld current_tpt %lld\n", tpt, current_tpt); 285 IWL_DEBUG_LED("tpt %lld current_tpt %llu\n",
286 (long long)tpt,
287 (unsigned long long)current_tpt);
272 priv->led_tpt = current_tpt; 288 priv->led_tpt = current_tpt;
273 289
274 if (!priv->allow_blinking) 290 if (!priv->allow_blinking)
@@ -282,12 +298,6 @@ static int iwl_get_blink_rate(struct iwl_priv *priv)
282 return i; 298 return i;
283} 299}
284 300
285static inline int is_rf_kill(struct iwl_priv *priv)
286{
287 return test_bit(STATUS_RF_KILL_HW, &priv->status) ||
288 test_bit(STATUS_RF_KILL_SW, &priv->status);
289}
290
291/* 301/*
292 * this function called from handler. Since setting Led command can 302 * this function called from handler. Since setting Led command can
293 * happen very frequent we postpone led command to be called from 303 * happen very frequent we postpone led command to be called from
@@ -301,7 +311,7 @@ void iwl_leds_background(struct iwl_priv *priv)
301 priv->last_blink_time = 0; 311 priv->last_blink_time = 0;
302 return; 312 return;
303 } 313 }
304 if (is_rf_kill(priv)) { 314 if (iwl_is_rfkill(priv)) {
305 priv->last_blink_time = 0; 315 priv->last_blink_time = 0;
306 return; 316 return;
307 } 317 }
@@ -335,7 +345,6 @@ EXPORT_SYMBOL(iwl_leds_background);
335int iwl_leds_register(struct iwl_priv *priv) 345int iwl_leds_register(struct iwl_priv *priv)
336{ 346{
337 char *trigger; 347 char *trigger;
338 char name[32];
339 int ret; 348 int ret;
340 349
341 priv->last_blink_rate = 0; 350 priv->last_blink_rate = 0;
@@ -344,7 +353,8 @@ int iwl_leds_register(struct iwl_priv *priv)
344 priv->allow_blinking = 0; 353 priv->allow_blinking = 0;
345 354
346 trigger = ieee80211_get_radio_led_name(priv->hw); 355 trigger = ieee80211_get_radio_led_name(priv->hw);
347 snprintf(name, sizeof(name), "iwl-%s:radio", 356 snprintf(priv->led[IWL_LED_TRG_RADIO].name,
357 sizeof(priv->led[IWL_LED_TRG_RADIO].name), "iwl-%s:radio",
348 wiphy_name(priv->hw->wiphy)); 358 wiphy_name(priv->hw->wiphy));
349 359
350 priv->led[IWL_LED_TRG_RADIO].led_on = iwl4965_led_on_reg; 360 priv->led[IWL_LED_TRG_RADIO].led_on = iwl4965_led_on_reg;
@@ -352,31 +362,33 @@ int iwl_leds_register(struct iwl_priv *priv)
352 priv->led[IWL_LED_TRG_RADIO].led_pattern = NULL; 362 priv->led[IWL_LED_TRG_RADIO].led_pattern = NULL;
353 363
354 ret = iwl_leds_register_led(priv, &priv->led[IWL_LED_TRG_RADIO], 364 ret = iwl_leds_register_led(priv, &priv->led[IWL_LED_TRG_RADIO],
355 IWL_LED_TRG_RADIO, 1, name, trigger); 365 IWL_LED_TRG_RADIO, 1, trigger);
356 if (ret) 366 if (ret)
357 goto exit_fail; 367 goto exit_fail;
358 368
359 trigger = ieee80211_get_assoc_led_name(priv->hw); 369 trigger = ieee80211_get_assoc_led_name(priv->hw);
360 snprintf(name, sizeof(name), "iwl-%s:assoc", 370 snprintf(priv->led[IWL_LED_TRG_ASSOC].name,
371 sizeof(priv->led[IWL_LED_TRG_ASSOC].name), "iwl-%s:assoc",
361 wiphy_name(priv->hw->wiphy)); 372 wiphy_name(priv->hw->wiphy));
362 373
363 ret = iwl_leds_register_led(priv, &priv->led[IWL_LED_TRG_ASSOC], 374 ret = iwl_leds_register_led(priv, &priv->led[IWL_LED_TRG_ASSOC],
364 IWL_LED_TRG_ASSOC, 0, name, trigger); 375 IWL_LED_TRG_ASSOC, 0, trigger);
365 376
366 /* for assoc always turn led on */ 377 /* for assoc always turn led on */
367 priv->led[IWL_LED_TRG_ASSOC].led_on = iwl4965_led_on_reg; 378 priv->led[IWL_LED_TRG_ASSOC].led_on = iwl_led_associate;
368 priv->led[IWL_LED_TRG_ASSOC].led_off = iwl4965_led_on_reg; 379 priv->led[IWL_LED_TRG_ASSOC].led_off = iwl_led_disassociate;
369 priv->led[IWL_LED_TRG_ASSOC].led_pattern = NULL; 380 priv->led[IWL_LED_TRG_ASSOC].led_pattern = NULL;
370 381
371 if (ret) 382 if (ret)
372 goto exit_fail; 383 goto exit_fail;
373 384
374 trigger = ieee80211_get_rx_led_name(priv->hw); 385 trigger = ieee80211_get_rx_led_name(priv->hw);
375 snprintf(name, sizeof(name), "iwl-%s:RX", wiphy_name(priv->hw->wiphy)); 386 snprintf(priv->led[IWL_LED_TRG_RX].name,
376 387 sizeof(priv->led[IWL_LED_TRG_RX].name), "iwl-%s:RX",
388 wiphy_name(priv->hw->wiphy));
377 389
378 ret = iwl_leds_register_led(priv, &priv->led[IWL_LED_TRG_RX], 390 ret = iwl_leds_register_led(priv, &priv->led[IWL_LED_TRG_RX],
379 IWL_LED_TRG_RX, 0, name, trigger); 391 IWL_LED_TRG_RX, 0, trigger);
380 392
381 priv->led[IWL_LED_TRG_RX].led_on = iwl_led_associated; 393 priv->led[IWL_LED_TRG_RX].led_on = iwl_led_associated;
382 priv->led[IWL_LED_TRG_RX].led_off = iwl_led_associated; 394 priv->led[IWL_LED_TRG_RX].led_off = iwl_led_associated;
@@ -386,9 +398,12 @@ int iwl_leds_register(struct iwl_priv *priv)
386 goto exit_fail; 398 goto exit_fail;
387 399
388 trigger = ieee80211_get_tx_led_name(priv->hw); 400 trigger = ieee80211_get_tx_led_name(priv->hw);
389 snprintf(name, sizeof(name), "iwl-%s:TX", wiphy_name(priv->hw->wiphy)); 401 snprintf(priv->led[IWL_LED_TRG_TX].name,
402 sizeof(priv->led[IWL_LED_TRG_TX].name), "iwl-%s:TX",
403 wiphy_name(priv->hw->wiphy));
404
390 ret = iwl_leds_register_led(priv, &priv->led[IWL_LED_TRG_TX], 405 ret = iwl_leds_register_led(priv, &priv->led[IWL_LED_TRG_TX],
391 IWL_LED_TRG_TX, 0, name, trigger); 406 IWL_LED_TRG_TX, 0, trigger);
392 407
393 priv->led[IWL_LED_TRG_TX].led_on = iwl_led_associated; 408 priv->led[IWL_LED_TRG_TX].led_on = iwl_led_associated;
394 priv->led[IWL_LED_TRG_TX].led_off = iwl_led_associated; 409 priv->led[IWL_LED_TRG_TX].led_off = iwl_led_associated;
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.h b/drivers/net/wireless/iwlwifi/iwl-led.h
index 1980ae5a7e82..588c9ad20e83 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-led.h
@@ -52,6 +52,7 @@ enum led_type {
52struct iwl_led { 52struct iwl_led {
53 struct iwl_priv *priv; 53 struct iwl_priv *priv;
54 struct led_classdev led_dev; 54 struct led_classdev led_dev;
55 char name[32];
55 56
56 int (*led_on) (struct iwl_priv *priv, int led_id); 57 int (*led_on) (struct iwl_priv *priv, int led_id);
57 int (*led_off) (struct iwl_priv *priv, int led_id); 58 int (*led_off) (struct iwl_priv *priv, int led_id);
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 2e71803e09ba..028e3053c0ca 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -82,7 +82,7 @@
82 82
83/* default power management (not Tx power) table values */ 83/* default power management (not Tx power) table values */
84/* for tim 0-10 */ 84/* for tim 0-10 */
85static struct iwl_power_vec_entry range_0[IWL_POWER_AC] = { 85static struct iwl_power_vec_entry range_0[IWL_POWER_MAX] = {
86 {{NOSLP, SLP_TOUT(0), SLP_TOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0}, 86 {{NOSLP, SLP_TOUT(0), SLP_TOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
87 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0}, 87 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0},
88 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0}, 88 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0},
@@ -93,7 +93,7 @@ static struct iwl_power_vec_entry range_0[IWL_POWER_AC] = {
93 93
94 94
95/* for tim = 3-10 */ 95/* for tim = 3-10 */
96static struct iwl_power_vec_entry range_1[IWL_POWER_AC] = { 96static struct iwl_power_vec_entry range_1[IWL_POWER_MAX] = {
97 {{NOSLP, SLP_TOUT(0), SLP_TOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0}, 97 {{NOSLP, SLP_TOUT(0), SLP_TOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
98 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0}, 98 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0},
99 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 3, 4, 7)}, 0}, 99 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 3, 4, 7)}, 0},
@@ -103,7 +103,7 @@ static struct iwl_power_vec_entry range_1[IWL_POWER_AC] = {
103}; 103};
104 104
105/* for tim > 11 */ 105/* for tim > 11 */
106static struct iwl_power_vec_entry range_2[IWL_POWER_AC] = { 106static struct iwl_power_vec_entry range_2[IWL_POWER_MAX] = {
107 {{NOSLP, SLP_TOUT(0), SLP_TOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0}, 107 {{NOSLP, SLP_TOUT(0), SLP_TOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
108 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 0xFF)}, 0}, 108 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 0xFF)}, 0},
109 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(2, 4, 6, 7, 0xFF)}, 0}, 109 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(2, 4, 6, 7, 0xFF)}, 0},
@@ -112,12 +112,19 @@ static struct iwl_power_vec_entry range_2[IWL_POWER_AC] = {
112 {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(4, 7, 10, 10, 0xFF)}, 0} 112 {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(4, 7, 10, 10, 0xFF)}, 0}
113}; 113};
114 114
115/* set card power command */
116static int iwl_set_power(struct iwl_priv *priv, void *cmd)
117{
118 return iwl_send_cmd_pdu_async(priv, POWER_TABLE_CMD,
119 sizeof(struct iwl_powertable_cmd),
120 cmd, NULL);
121}
115/* decide the right power level according to association status 122/* decide the right power level according to association status
116 * and battery status 123 * and battery status
117 */ 124 */
118static u16 iwl_get_auto_power_mode(struct iwl_priv *priv) 125static u16 iwl_get_auto_power_mode(struct iwl_priv *priv)
119{ 126{
120 u16 mode = priv->power_data.user_power_setting; 127 u16 mode;
121 128
122 switch (priv->power_data.user_power_setting) { 129 switch (priv->power_data.user_power_setting) {
123 case IWL_POWER_AUTO: 130 case IWL_POWER_AUTO:
@@ -129,12 +136,16 @@ static u16 iwl_get_auto_power_mode(struct iwl_priv *priv)
129 else 136 else
130 mode = IWL_POWER_ON_AC_DISASSOC; 137 mode = IWL_POWER_ON_AC_DISASSOC;
131 break; 138 break;
139 /* FIXME: remove battery and ac from here */
132 case IWL_POWER_BATTERY: 140 case IWL_POWER_BATTERY:
133 mode = IWL_POWER_INDEX_3; 141 mode = IWL_POWER_INDEX_3;
134 break; 142 break;
135 case IWL_POWER_AC: 143 case IWL_POWER_AC:
136 mode = IWL_POWER_MODE_CAM; 144 mode = IWL_POWER_MODE_CAM;
137 break; 145 break;
146 default:
147 mode = priv->power_data.user_power_setting;
148 break;
138 } 149 }
139 return mode; 150 return mode;
140} 151}
@@ -144,7 +155,7 @@ static int iwl_power_init_handle(struct iwl_priv *priv)
144{ 155{
145 int ret = 0, i; 156 int ret = 0, i;
146 struct iwl_power_mgr *pow_data; 157 struct iwl_power_mgr *pow_data;
147 int size = sizeof(struct iwl_power_vec_entry) * IWL_POWER_AC; 158 int size = sizeof(struct iwl_power_vec_entry) * IWL_POWER_MAX;
148 u16 pci_pm; 159 u16 pci_pm;
149 160
150 IWL_DEBUG_POWER("Initialize power \n"); 161 IWL_DEBUG_POWER("Initialize power \n");
@@ -162,11 +173,11 @@ static int iwl_power_init_handle(struct iwl_priv *priv)
162 if (ret != 0) 173 if (ret != 0)
163 return 0; 174 return 0;
164 else { 175 else {
165 struct iwl4965_powertable_cmd *cmd; 176 struct iwl_powertable_cmd *cmd;
166 177
167 IWL_DEBUG_POWER("adjust power command flags\n"); 178 IWL_DEBUG_POWER("adjust power command flags\n");
168 179
169 for (i = 0; i < IWL_POWER_AC; i++) { 180 for (i = 0; i < IWL_POWER_MAX; i++) {
170 cmd = &pow_data->pwr_range_0[i].cmd; 181 cmd = &pow_data->pwr_range_0[i].cmd;
171 182
172 if (pci_pm & 0x1) 183 if (pci_pm & 0x1)
@@ -180,7 +191,7 @@ static int iwl_power_init_handle(struct iwl_priv *priv)
180 191
181/* adjust power command according to dtim period and power level*/ 192/* adjust power command according to dtim period and power level*/
182static int iwl_update_power_command(struct iwl_priv *priv, 193static int iwl_update_power_command(struct iwl_priv *priv,
183 struct iwl4965_powertable_cmd *cmd, 194 struct iwl_powertable_cmd *cmd,
184 u16 mode) 195 u16 mode)
185{ 196{
186 int ret = 0, i; 197 int ret = 0, i;
@@ -204,7 +215,7 @@ static int iwl_update_power_command(struct iwl_priv *priv,
204 range = &pow_data->pwr_range_2[0]; 215 range = &pow_data->pwr_range_2[0];
205 216
206 period = pow_data->dtim_period; 217 period = pow_data->dtim_period;
207 memcpy(cmd, &range[mode].cmd, sizeof(struct iwl4965_powertable_cmd)); 218 memcpy(cmd, &range[mode].cmd, sizeof(struct iwl_powertable_cmd));
208 219
209 if (period == 0) { 220 if (period == 0) {
210 period = 1; 221 period = 1;
@@ -258,17 +269,18 @@ int iwl_power_update_mode(struct iwl_priv *priv, u8 refresh)
258 * else user level */ 269 * else user level */
259 270
260 switch (setting->system_power_setting) { 271 switch (setting->system_power_setting) {
261 case IWL_POWER_AUTO: 272 case IWL_POWER_SYS_AUTO:
262 final_mode = iwl_get_auto_power_mode(priv); 273 final_mode = iwl_get_auto_power_mode(priv);
263 break; 274 break;
264 case IWL_POWER_BATTERY: 275 case IWL_POWER_SYS_BATTERY:
265 final_mode = IWL_POWER_INDEX_3; 276 final_mode = IWL_POWER_INDEX_3;
266 break; 277 break;
267 case IWL_POWER_AC: 278 case IWL_POWER_SYS_AC:
268 final_mode = IWL_POWER_MODE_CAM; 279 final_mode = IWL_POWER_MODE_CAM;
269 break; 280 break;
270 default: 281 default:
271 final_mode = setting->system_power_setting; 282 final_mode = IWL_POWER_INDEX_3;
283 WARN_ON(1);
272 } 284 }
273 285
274 if (setting->critical_power_setting > final_mode) 286 if (setting->critical_power_setting > final_mode)
@@ -280,7 +292,7 @@ int iwl_power_update_mode(struct iwl_priv *priv, u8 refresh)
280 292
281 if (!iwl_is_rfkill(priv) && !setting->power_disabled && 293 if (!iwl_is_rfkill(priv) && !setting->power_disabled &&
282 ((setting->power_mode != final_mode) || refresh)) { 294 ((setting->power_mode != final_mode) || refresh)) {
283 struct iwl4965_powertable_cmd cmd; 295 struct iwl_powertable_cmd cmd;
284 296
285 if (final_mode != IWL_POWER_MODE_CAM) 297 if (final_mode != IWL_POWER_MODE_CAM)
286 set_bit(STATUS_POWER_PMI, &priv->status); 298 set_bit(STATUS_POWER_PMI, &priv->status);
@@ -291,8 +303,7 @@ int iwl_power_update_mode(struct iwl_priv *priv, u8 refresh)
291 if (final_mode == IWL_POWER_INDEX_5) 303 if (final_mode == IWL_POWER_INDEX_5)
292 cmd.flags |= IWL_POWER_FAST_PD; 304 cmd.flags |= IWL_POWER_FAST_PD;
293 305
294 if (priv->cfg->ops->lib->set_power) 306 ret = iwl_set_power(priv, &cmd);
295 ret = priv->cfg->ops->lib->set_power(priv, &cmd);
296 307
297 if (final_mode == IWL_POWER_MODE_CAM) 308 if (final_mode == IWL_POWER_MODE_CAM)
298 clear_bit(STATUS_POWER_PMI, &priv->status); 309 clear_bit(STATUS_POWER_PMI, &priv->status);
@@ -388,7 +399,7 @@ void iwl_power_initialize(struct iwl_priv *priv)
388 iwl_power_init_handle(priv); 399 iwl_power_init_handle(priv);
389 priv->power_data.user_power_setting = IWL_POWER_AUTO; 400 priv->power_data.user_power_setting = IWL_POWER_AUTO;
390 priv->power_data.power_disabled = 0; 401 priv->power_data.power_disabled = 0;
391 priv->power_data.system_power_setting = IWL_POWER_AUTO; 402 priv->power_data.system_power_setting = IWL_POWER_SYS_AUTO;
392 priv->power_data.is_battery_active = 0; 403 priv->power_data.is_battery_active = 0;
393 priv->power_data.power_disabled = 0; 404 priv->power_data.power_disabled = 0;
394 priv->power_data.critical_power_setting = 0; 405 priv->power_data.critical_power_setting = 0;
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.h b/drivers/net/wireless/iwlwifi/iwl-power.h
index b066724a1c2b..abcbbf96a84e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.h
+++ b/drivers/net/wireless/iwlwifi/iwl-power.h
@@ -33,12 +33,25 @@
33 33
34struct iwl_priv; 34struct iwl_priv;
35 35
36#define IWL_POWER_MODE_CAM 0x00 /* Continuously Aware Mode, always on */ 36enum {
37#define IWL_POWER_INDEX_3 0x03 37 IWL_POWER_MODE_CAM, /* Continuously Aware Mode, always on */
38#define IWL_POWER_INDEX_5 0x05 38 IWL_POWER_INDEX_1,
39#define IWL_POWER_AC 0x06 39 IWL_POWER_INDEX_2,
40#define IWL_POWER_BATTERY 0x07 40 IWL_POWER_INDEX_3,
41#define IWL_POWER_AUTO 0x08 41 IWL_POWER_INDEX_4,
42 IWL_POWER_INDEX_5,
43 IWL_POWER_AUTO,
44 IWL_POWER_MAX = IWL_POWER_AUTO,
45 IWL_POWER_AC,
46 IWL_POWER_BATTERY,
47};
48
49enum {
50 IWL_POWER_SYS_AUTO,
51 IWL_POWER_SYS_AC,
52 IWL_POWER_SYS_BATTERY,
53};
54
42#define IWL_POWER_LIMIT 0x08 55#define IWL_POWER_LIMIT 0x08
43#define IWL_POWER_MASK 0x0F 56#define IWL_POWER_MASK 0x0F
44#define IWL_POWER_ENABLED 0x10 57#define IWL_POWER_ENABLED 0x10
@@ -46,15 +59,15 @@ struct iwl_priv;
46/* Power management (not Tx power) structures */ 59/* Power management (not Tx power) structures */
47 60
48struct iwl_power_vec_entry { 61struct iwl_power_vec_entry {
49 struct iwl4965_powertable_cmd cmd; 62 struct iwl_powertable_cmd cmd;
50 u8 no_dtim; 63 u8 no_dtim;
51}; 64};
52 65
53struct iwl_power_mgr { 66struct iwl_power_mgr {
54 spinlock_t lock; 67 spinlock_t lock;
55 struct iwl_power_vec_entry pwr_range_0[IWL_POWER_AC]; 68 struct iwl_power_vec_entry pwr_range_0[IWL_POWER_MAX];
56 struct iwl_power_vec_entry pwr_range_1[IWL_POWER_AC]; 69 struct iwl_power_vec_entry pwr_range_1[IWL_POWER_MAX];
57 struct iwl_power_vec_entry pwr_range_2[IWL_POWER_AC]; 70 struct iwl_power_vec_entry pwr_range_2[IWL_POWER_MAX];
58 u32 dtim_period; 71 u32 dtim_period;
59 /* final power level that used to calculate final power command */ 72 /* final power level that used to calculate final power command */
60 u8 power_mode; 73 u8 power_mode;
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 70d9c7568b98..ee5afd48d3af 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -84,14 +84,16 @@
84#define APMG_CLK_VAL_DMA_CLK_RQT (0x00000200) 84#define APMG_CLK_VAL_DMA_CLK_RQT (0x00000200)
85#define APMG_CLK_VAL_BSM_CLK_RQT (0x00000800) 85#define APMG_CLK_VAL_BSM_CLK_RQT (0x00000800)
86 86
87#define APMG_PS_CTRL_VAL_RESET_REQ (0x04000000)
88 87
89#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800) 88#define APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS (0x00400000)
89#define APMG_PS_CTRL_VAL_RESET_REQ (0x04000000)
90#define APMG_PS_CTRL_MSK_PWR_SRC (0x03000000)
91#define APMG_PS_CTRL_VAL_PWR_SRC_VMAIN (0x00000000)
92#define APMG_PS_CTRL_VAL_PWR_SRC_MAX (0x01000000) /* 3945 only */
93#define APMG_PS_CTRL_VAL_PWR_SRC_VAUX (0x02000000)
90 94
91#define APMG_PS_CTRL_MSK_PWR_SRC (0x03000000)
92#define APMG_PS_CTRL_VAL_PWR_SRC_VMAIN (0x00000000)
93#define APMG_PS_CTRL_VAL_PWR_SRC_VAUX (0x01000000)
94 95
96#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
95 97
96/** 98/**
97 * BSM (Bootstrap State Machine) 99 * BSM (Bootstrap State Machine)
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index e2d9afba38a5..f3f6ea49fdd2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -791,7 +791,7 @@ static inline void iwl_dbg_report_frame(struct iwl_priv *priv,
791 791
792static void iwl_add_radiotap(struct iwl_priv *priv, 792static void iwl_add_radiotap(struct iwl_priv *priv,
793 struct sk_buff *skb, 793 struct sk_buff *skb,
794 struct iwl4965_rx_phy_res *rx_start, 794 struct iwl_rx_phy_res *rx_start,
795 struct ieee80211_rx_status *stats, 795 struct ieee80211_rx_status *stats,
796 u32 ampdu_status) 796 u32 ampdu_status)
797{ 797{
@@ -1010,8 +1010,8 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
1010 struct ieee80211_rx_status *stats) 1010 struct ieee80211_rx_status *stats)
1011{ 1011{
1012 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 1012 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
1013 struct iwl4965_rx_phy_res *rx_start = (include_phy) ? 1013 struct iwl_rx_phy_res *rx_start = (include_phy) ?
1014 (struct iwl4965_rx_phy_res *)&(pkt->u.raw[0]) : NULL; 1014 (struct iwl_rx_phy_res *)&(pkt->u.raw[0]) : NULL;
1015 struct ieee80211_hdr *hdr; 1015 struct ieee80211_hdr *hdr;
1016 u16 len; 1016 u16 len;
1017 __le32 *rx_end; 1017 __le32 *rx_end;
@@ -1020,7 +1020,7 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
1020 u32 ampdu_status_legacy; 1020 u32 ampdu_status_legacy;
1021 1021
1022 if (!include_phy && priv->last_phy_res[0]) 1022 if (!include_phy && priv->last_phy_res[0])
1023 rx_start = (struct iwl4965_rx_phy_res *)&priv->last_phy_res[1]; 1023 rx_start = (struct iwl_rx_phy_res *)&priv->last_phy_res[1];
1024 1024
1025 if (!rx_start) { 1025 if (!rx_start) {
1026 IWL_ERROR("MPDU frame without a PHY data\n"); 1026 IWL_ERROR("MPDU frame without a PHY data\n");
@@ -1032,8 +1032,8 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
1032 1032
1033 len = le16_to_cpu(rx_start->byte_count); 1033 len = le16_to_cpu(rx_start->byte_count);
1034 1034
1035 rx_end = (__le32 *) ((u8 *) &pkt->u.raw[0] + 1035 rx_end = (__le32 *)((u8 *) &pkt->u.raw[0] +
1036 sizeof(struct iwl4965_rx_phy_res) + 1036 sizeof(struct iwl_rx_phy_res) +
1037 rx_start->cfg_phy_cnt + len); 1037 rx_start->cfg_phy_cnt + len);
1038 1038
1039 } else { 1039 } else {
@@ -1084,40 +1084,13 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
1084} 1084}
1085 1085
1086/* Calc max signal level (dBm) among 3 possible receivers */ 1086/* Calc max signal level (dBm) among 3 possible receivers */
1087static int iwl_calc_rssi(struct iwl_priv *priv, 1087static inline int iwl_calc_rssi(struct iwl_priv *priv,
1088 struct iwl4965_rx_phy_res *rx_resp) 1088 struct iwl_rx_phy_res *rx_resp)
1089{ 1089{
1090 /* data from PHY/DSP regarding signal strength, etc., 1090 return priv->cfg->ops->utils->calc_rssi(priv, rx_resp);
1091 * contents are always there, not configurable by host. */
1092 struct iwl4965_rx_non_cfg_phy *ncphy =
1093 (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy;
1094 u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL_AGC_DB_MASK)
1095 >> IWL_AGC_DB_POS;
1096
1097 u32 valid_antennae =
1098 (le16_to_cpu(rx_resp->phy_flags) & RX_PHY_FLAGS_ANTENNAE_MASK)
1099 >> RX_PHY_FLAGS_ANTENNAE_OFFSET;
1100 u8 max_rssi = 0;
1101 u32 i;
1102
1103 /* Find max rssi among 3 possible receivers.
1104 * These values are measured by the digital signal processor (DSP).
1105 * They should stay fairly constant even as the signal strength varies,
1106 * if the radio's automatic gain control (AGC) is working right.
1107 * AGC value (see below) will provide the "interesting" info. */
1108 for (i = 0; i < 3; i++)
1109 if (valid_antennae & (1 << i))
1110 max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
1111
1112 IWL_DEBUG_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n",
1113 ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
1114 max_rssi, agc);
1115
1116 /* dBm = max_rssi dB - agc dB - constant.
1117 * Higher AGC (higher radio gain) means lower signal. */
1118 return max_rssi - agc - IWL_RSSI_OFFSET;
1119} 1091}
1120 1092
1093
1121static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id) 1094static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
1122{ 1095{
1123 unsigned long flags; 1096 unsigned long flags;
@@ -1180,9 +1153,9 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
1180 * this rx packet for legacy frames, 1153 * this rx packet for legacy frames,
1181 * or phy data cached from REPLY_RX_PHY_CMD for HT frames. */ 1154 * or phy data cached from REPLY_RX_PHY_CMD for HT frames. */
1182 int include_phy = (pkt->hdr.cmd == REPLY_RX); 1155 int include_phy = (pkt->hdr.cmd == REPLY_RX);
1183 struct iwl4965_rx_phy_res *rx_start = (include_phy) ? 1156 struct iwl_rx_phy_res *rx_start = (include_phy) ?
1184 (struct iwl4965_rx_phy_res *)&(pkt->u.raw[0]) : 1157 (struct iwl_rx_phy_res *)&(pkt->u.raw[0]) :
1185 (struct iwl4965_rx_phy_res *)&priv->last_phy_res[1]; 1158 (struct iwl_rx_phy_res *)&priv->last_phy_res[1];
1186 __le32 *rx_end; 1159 __le32 *rx_end;
1187 unsigned int len = 0; 1160 unsigned int len = 0;
1188 u16 fc; 1161 u16 fc;
@@ -1210,7 +1183,7 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
1210 1183
1211 if (!include_phy) { 1184 if (!include_phy) {
1212 if (priv->last_phy_res[0]) 1185 if (priv->last_phy_res[0])
1213 rx_start = (struct iwl4965_rx_phy_res *) 1186 rx_start = (struct iwl_rx_phy_res *)
1214 &priv->last_phy_res[1]; 1187 &priv->last_phy_res[1];
1215 else 1188 else
1216 rx_start = NULL; 1189 rx_start = NULL;
@@ -1227,7 +1200,7 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
1227 1200
1228 len = le16_to_cpu(rx_start->byte_count); 1201 len = le16_to_cpu(rx_start->byte_count);
1229 rx_end = (__le32 *)(pkt->u.raw + rx_start->cfg_phy_cnt + 1202 rx_end = (__le32 *)(pkt->u.raw + rx_start->cfg_phy_cnt +
1230 sizeof(struct iwl4965_rx_phy_res) + len); 1203 sizeof(struct iwl_rx_phy_res) + len);
1231 } else { 1204 } else {
1232 struct iwl4965_rx_mpdu_res_start *amsdu = 1205 struct iwl4965_rx_mpdu_res_start *amsdu =
1233 (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw; 1206 (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw;
@@ -1316,6 +1289,6 @@ void iwl_rx_reply_rx_phy(struct iwl_priv *priv,
1316 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 1289 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
1317 priv->last_phy_res[0] = 1; 1290 priv->last_phy_res[0] = 1;
1318 memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]), 1291 memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]),
1319 sizeof(struct iwl4965_rx_phy_res)); 1292 sizeof(struct iwl_rx_phy_res));
1320} 1293}
1321EXPORT_SYMBOL(iwl_rx_reply_rx_phy); 1294EXPORT_SYMBOL(iwl_rx_reply_rx_phy);
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index efc750d2fc5c..9bb6adb28b73 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -202,6 +202,7 @@ static int iwl_send_scan_abort(struct iwl_priv *priv)
202 clear_bit(STATUS_SCAN_HW, &priv->status); 202 clear_bit(STATUS_SCAN_HW, &priv->status);
203 } 203 }
204 204
205 priv->alloc_rxb_skb--;
205 dev_kfree_skb_any(cmd.meta.u.skb); 206 dev_kfree_skb_any(cmd.meta.u.skb);
206 207
207 return ret; 208 return ret;
@@ -270,6 +271,7 @@ static void iwl_rx_scan_results_notif(struct iwl_priv *priv,
270static void iwl_rx_scan_complete_notif(struct iwl_priv *priv, 271static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
271 struct iwl_rx_mem_buffer *rxb) 272 struct iwl_rx_mem_buffer *rxb)
272{ 273{
274#ifdef CONFIG_IWLWIFI_DEBUG
273 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 275 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
274 struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw; 276 struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
275 277
@@ -277,6 +279,7 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
277 scan_notif->scanned_channels, 279 scan_notif->scanned_channels,
278 scan_notif->tsf_low, 280 scan_notif->tsf_low,
279 scan_notif->tsf_high, scan_notif->status); 281 scan_notif->tsf_high, scan_notif->status);
282#endif
280 283
281 /* The HW is no longer scanning */ 284 /* The HW is no longer scanning */
282 clear_bit(STATUS_SCAN_HW, &priv->status); 285 clear_bit(STATUS_SCAN_HW, &priv->status);
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 6d1467d0bd9d..60a6e0106036 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -823,7 +823,7 @@ int iwl_send_lq_cmd(struct iwl_priv *priv,
823 if (lq->sta_id == 0xFF) 823 if (lq->sta_id == 0xFF)
824 lq->sta_id = IWL_AP_ID; 824 lq->sta_id = IWL_AP_ID;
825 825
826 iwl_dump_lq_cmd(priv,lq); 826 iwl_dump_lq_cmd(priv, lq);
827 827
828 if (iwl_is_associated(priv) && priv->assoc_station_added) 828 if (iwl_is_associated(priv) && priv->assoc_station_added)
829 return iwl_send_cmd(priv, &cmd); 829 return iwl_send_cmd(priv, &cmd);
@@ -839,7 +839,7 @@ EXPORT_SYMBOL(iwl_send_lq_cmd);
839 * for automatic fallback during transmission. 839 * for automatic fallback during transmission.
840 * 840 *
841 * NOTE: This sets up a default set of values. These will be replaced later 841 * NOTE: This sets up a default set of values. These will be replaced later
842 * if the driver's iwl-4965-rs rate scaling algorithm is used, instead of 842 * if the driver's iwl-agn-rs rate scaling algorithm is used, instead of
843 * rc80211_simple. 843 * rc80211_simple.
844 * 844 *
845 * NOTE: Run REPLY_ADD_STA command to set up station table entry, before 845 * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 9b50b1052b09..4108c7c8f00f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -208,11 +208,12 @@ EXPORT_SYMBOL(iwl_txq_update_write_ptr);
208 * Free all buffers. 208 * Free all buffers.
209 * 0-fill, but do not free "txq" descriptor structure. 209 * 0-fill, but do not free "txq" descriptor structure.
210 */ 210 */
211static void iwl_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq) 211static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
212{ 212{
213 struct iwl_tx_queue *txq = &priv->txq[txq_id];
213 struct iwl_queue *q = &txq->q; 214 struct iwl_queue *q = &txq->q;
214 struct pci_dev *dev = priv->pci_dev; 215 struct pci_dev *dev = priv->pci_dev;
215 int len; 216 int i, slots_num, len;
216 217
217 if (q->n_bd == 0) 218 if (q->n_bd == 0)
218 return; 219 return;
@@ -227,7 +228,12 @@ static void iwl_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq)
227 len += IWL_MAX_SCAN_SIZE; 228 len += IWL_MAX_SCAN_SIZE;
228 229
229 /* De-alloc array of command/tx buffers */ 230 /* De-alloc array of command/tx buffers */
230 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd); 231 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
232 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
233 for (i = 0; i < slots_num; i++)
234 kfree(txq->cmd[i]);
235 if (txq_id == IWL_CMD_QUEUE_NUM)
236 kfree(txq->cmd[slots_num]);
231 237
232 /* De-alloc circular buffer of TFDs */ 238 /* De-alloc circular buffer of TFDs */
233 if (txq->q.n_bd) 239 if (txq->q.n_bd)
@@ -400,8 +406,7 @@ static int iwl_tx_queue_init(struct iwl_priv *priv,
400 struct iwl_tx_queue *txq, 406 struct iwl_tx_queue *txq,
401 int slots_num, u32 txq_id) 407 int slots_num, u32 txq_id)
402{ 408{
403 struct pci_dev *dev = priv->pci_dev; 409 int i, len;
404 int len;
405 int rc = 0; 410 int rc = 0;
406 411
407 /* 412 /*
@@ -412,17 +417,25 @@ static int iwl_tx_queue_init(struct iwl_priv *priv,
412 * For normal Tx queues (all other queues), no super-size command 417 * For normal Tx queues (all other queues), no super-size command
413 * space is needed. 418 * space is needed.
414 */ 419 */
415 len = sizeof(struct iwl_cmd) * slots_num; 420 len = sizeof(struct iwl_cmd);
416 if (txq_id == IWL_CMD_QUEUE_NUM) 421 for (i = 0; i <= slots_num; i++) {
417 len += IWL_MAX_SCAN_SIZE; 422 if (i == slots_num) {
418 txq->cmd = pci_alloc_consistent(dev, len, &txq->dma_addr_cmd); 423 if (txq_id == IWL_CMD_QUEUE_NUM)
419 if (!txq->cmd) 424 len += IWL_MAX_SCAN_SIZE;
420 return -ENOMEM; 425 else
426 continue;
427 }
428
429 txq->cmd[i] = kmalloc(len, GFP_KERNEL | GFP_DMA);
430 if (!txq->cmd[i])
431 return -ENOMEM;
432 }
421 433
422 /* Alloc driver data array and TFD circular buffer */ 434 /* Alloc driver data array and TFD circular buffer */
423 rc = iwl_tx_queue_alloc(priv, txq, txq_id); 435 rc = iwl_tx_queue_alloc(priv, txq, txq_id);
424 if (rc) { 436 if (rc) {
425 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd); 437 for (i = 0; i < slots_num; i++)
438 kfree(txq->cmd[i]);
426 439
427 return -ENOMEM; 440 return -ENOMEM;
428 } 441 }
@@ -451,7 +464,7 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
451 464
452 /* Tx queues */ 465 /* Tx queues */
453 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) 466 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
454 iwl_tx_queue_free(priv, &priv->txq[txq_id]); 467 iwl_tx_queue_free(priv, txq_id);
455 468
456 /* Keep-warm buffer */ 469 /* Keep-warm buffer */
457 iwl_kw_free(priv); 470 iwl_kw_free(priv);
@@ -751,20 +764,19 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
751 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 764 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
752 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 765 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
753 struct iwl_tfd_frame *tfd; 766 struct iwl_tfd_frame *tfd;
754 u32 *control_flags; 767 struct iwl_tx_queue *txq;
755 int txq_id = skb_get_queue_mapping(skb); 768 struct iwl_queue *q;
756 struct iwl_tx_queue *txq = NULL; 769 struct iwl_cmd *out_cmd;
757 struct iwl_queue *q = NULL; 770 struct iwl_tx_cmd *tx_cmd;
771 int swq_id, txq_id;
758 dma_addr_t phys_addr; 772 dma_addr_t phys_addr;
759 dma_addr_t txcmd_phys; 773 dma_addr_t txcmd_phys;
760 dma_addr_t scratch_phys; 774 dma_addr_t scratch_phys;
761 struct iwl_cmd *out_cmd = NULL;
762 struct iwl_tx_cmd *tx_cmd;
763 u16 len, idx, len_org; 775 u16 len, idx, len_org;
764 u16 seq_number = 0; 776 u16 seq_number = 0;
765 u8 id, hdr_len, unicast;
766 u8 sta_id;
767 __le16 fc; 777 __le16 fc;
778 u8 hdr_len, unicast;
779 u8 sta_id;
768 u8 wait_write_ptr = 0; 780 u8 wait_write_ptr = 0;
769 u8 tid = 0; 781 u8 tid = 0;
770 u8 *qc = NULL; 782 u8 *qc = NULL;
@@ -789,7 +801,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
789 } 801 }
790 802
791 unicast = !is_multicast_ether_addr(hdr->addr1); 803 unicast = !is_multicast_ether_addr(hdr->addr1);
792 id = 0;
793 804
794 fc = hdr->frame_control; 805 fc = hdr->frame_control;
795 806
@@ -827,14 +838,16 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
827 838
828 IWL_DEBUG_TX("station Id %d\n", sta_id); 839 IWL_DEBUG_TX("station Id %d\n", sta_id);
829 840
841 swq_id = skb_get_queue_mapping(skb);
842 txq_id = swq_id;
830 if (ieee80211_is_data_qos(fc)) { 843 if (ieee80211_is_data_qos(fc)) {
831 qc = ieee80211_get_qos_ctl(hdr); 844 qc = ieee80211_get_qos_ctl(hdr);
832 tid = qc[0] & 0xf; 845 tid = qc[0] & 0xf;
833 seq_number = priv->stations[sta_id].tid[tid].seq_number & 846 seq_number = priv->stations[sta_id].tid[tid].seq_number;
834 IEEE80211_SCTL_SEQ; 847 seq_number &= IEEE80211_SCTL_SEQ;
835 hdr->seq_ctrl = cpu_to_le16(seq_number) | 848 hdr->seq_ctrl = hdr->seq_ctrl &
836 (hdr->seq_ctrl & 849 __constant_cpu_to_le16(IEEE80211_SCTL_FRAG);
837 __constant_cpu_to_le16(IEEE80211_SCTL_FRAG)); 850 hdr->seq_ctrl |= cpu_to_le16(seq_number);
838 seq_number += 0x10; 851 seq_number += 0x10;
839 /* aggregation is on for this <sta,tid> */ 852 /* aggregation is on for this <sta,tid> */
840 if (info->flags & IEEE80211_TX_CTL_AMPDU) 853 if (info->flags & IEEE80211_TX_CTL_AMPDU)
@@ -851,7 +864,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
851 /* Set up first empty TFD within this queue's circular TFD buffer */ 864 /* Set up first empty TFD within this queue's circular TFD buffer */
852 tfd = &txq->bd[q->write_ptr]; 865 tfd = &txq->bd[q->write_ptr];
853 memset(tfd, 0, sizeof(*tfd)); 866 memset(tfd, 0, sizeof(*tfd));
854 control_flags = (u32 *) tfd;
855 idx = get_cmd_index(q, q->write_ptr, 0); 867 idx = get_cmd_index(q, q->write_ptr, 0);
856 868
857 /* Set up driver data for this TFD */ 869 /* Set up driver data for this TFD */
@@ -859,7 +871,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
859 txq->txb[q->write_ptr].skb[0] = skb; 871 txq->txb[q->write_ptr].skb[0] = skb;
860 872
861 /* Set up first empty entry in queue's array of Tx/cmd buffers */ 873 /* Set up first empty entry in queue's array of Tx/cmd buffers */
862 out_cmd = &txq->cmd[idx]; 874 out_cmd = txq->cmd[idx];
863 tx_cmd = &out_cmd->cmd.tx; 875 tx_cmd = &out_cmd->cmd.tx;
864 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); 876 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
865 memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd)); 877 memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
@@ -899,14 +911,15 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
899 911
900 /* Physical address of this Tx command's header (not MAC header!), 912 /* Physical address of this Tx command's header (not MAC header!),
901 * within command buffer array. */ 913 * within command buffer array. */
902 txcmd_phys = txq->dma_addr_cmd + sizeof(struct iwl_cmd) * idx + 914 txcmd_phys = pci_map_single(priv->pci_dev, out_cmd,
903 offsetof(struct iwl_cmd, hdr); 915 sizeof(struct iwl_cmd), PCI_DMA_TODEVICE);
916 txcmd_phys += offsetof(struct iwl_cmd, hdr);
904 917
905 /* Add buffer containing Tx command and MAC(!) header to TFD's 918 /* Add buffer containing Tx command and MAC(!) header to TFD's
906 * first entry */ 919 * first entry */
907 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len); 920 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len);
908 921
909 if (!(info->flags & IEEE80211_TX_CTL_DO_NOT_ENCRYPT)) 922 if (info->control.hw_key)
910 iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id); 923 iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
911 924
912 /* Set up TFD's 2nd entry to point directly to remainder of skb, 925 /* Set up TFD's 2nd entry to point directly to remainder of skb,
@@ -962,16 +975,15 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
962 if (ret) 975 if (ret)
963 return ret; 976 return ret;
964 977
965 if ((iwl_queue_space(q) < q->high_mark) 978 if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
966 && priv->mac80211_registered) {
967 if (wait_write_ptr) { 979 if (wait_write_ptr) {
968 spin_lock_irqsave(&priv->lock, flags); 980 spin_lock_irqsave(&priv->lock, flags);
969 txq->need_update = 1; 981 txq->need_update = 1;
970 iwl_txq_update_write_ptr(priv, txq); 982 iwl_txq_update_write_ptr(priv, txq);
971 spin_unlock_irqrestore(&priv->lock, flags); 983 spin_unlock_irqrestore(&priv->lock, flags);
984 } else {
985 ieee80211_stop_queue(priv->hw, swq_id);
972 } 986 }
973
974 ieee80211_stop_queue(priv->hw, skb_get_queue_mapping(skb));
975 } 987 }
976 988
977 return 0; 989 return 0;
@@ -999,13 +1011,12 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
999 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; 1011 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
1000 struct iwl_queue *q = &txq->q; 1012 struct iwl_queue *q = &txq->q;
1001 struct iwl_tfd_frame *tfd; 1013 struct iwl_tfd_frame *tfd;
1002 u32 *control_flags;
1003 struct iwl_cmd *out_cmd; 1014 struct iwl_cmd *out_cmd;
1004 u32 idx;
1005 u16 fix_size;
1006 dma_addr_t phys_addr; 1015 dma_addr_t phys_addr;
1007 int ret;
1008 unsigned long flags; 1016 unsigned long flags;
1017 int len, ret;
1018 u32 idx;
1019 u16 fix_size;
1009 1020
1010 cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len); 1021 cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
1011 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr)); 1022 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
@@ -1031,10 +1042,9 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1031 tfd = &txq->bd[q->write_ptr]; 1042 tfd = &txq->bd[q->write_ptr];
1032 memset(tfd, 0, sizeof(*tfd)); 1043 memset(tfd, 0, sizeof(*tfd));
1033 1044
1034 control_flags = (u32 *) tfd;
1035 1045
1036 idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE); 1046 idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE);
1037 out_cmd = &txq->cmd[idx]; 1047 out_cmd = txq->cmd[idx];
1038 1048
1039 out_cmd->hdr.cmd = cmd->id; 1049 out_cmd->hdr.cmd = cmd->id;
1040 memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta)); 1050 memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta));
@@ -1048,9 +1058,11 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1048 INDEX_TO_SEQ(q->write_ptr)); 1058 INDEX_TO_SEQ(q->write_ptr));
1049 if (out_cmd->meta.flags & CMD_SIZE_HUGE) 1059 if (out_cmd->meta.flags & CMD_SIZE_HUGE)
1050 out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME); 1060 out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME);
1051 1061 len = (idx == TFD_CMD_SLOTS) ?
1052 phys_addr = txq->dma_addr_cmd + sizeof(txq->cmd[0]) * idx + 1062 IWL_MAX_SCAN_SIZE : sizeof(struct iwl_cmd);
1053 offsetof(struct iwl_cmd, hdr); 1063 phys_addr = pci_map_single(priv->pci_dev, out_cmd, len,
1064 PCI_DMA_TODEVICE);
1065 phys_addr += offsetof(struct iwl_cmd, hdr);
1054 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size); 1066 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size);
1055 1067
1056 IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, " 1068 IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, "
@@ -1115,6 +1127,9 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1115{ 1127{
1116 struct iwl_tx_queue *txq = &priv->txq[txq_id]; 1128 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1117 struct iwl_queue *q = &txq->q; 1129 struct iwl_queue *q = &txq->q;
1130 struct iwl_tfd_frame *bd = &txq->bd[index];
1131 dma_addr_t dma_addr;
1132 int is_odd, buf_len;
1118 int nfreed = 0; 1133 int nfreed = 0;
1119 1134
1120 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) { 1135 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
@@ -1132,6 +1147,19 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1132 q->write_ptr, q->read_ptr); 1147 q->write_ptr, q->read_ptr);
1133 queue_work(priv->workqueue, &priv->restart); 1148 queue_work(priv->workqueue, &priv->restart);
1134 } 1149 }
1150 is_odd = (index/2) & 0x1;
1151 if (is_odd) {
1152 dma_addr = IWL_GET_BITS(bd->pa[index], tb2_addr_lo16) |
1153 (IWL_GET_BITS(bd->pa[index],
1154 tb2_addr_hi20) << 16);
1155 buf_len = IWL_GET_BITS(bd->pa[index], tb2_len);
1156 } else {
1157 dma_addr = le32_to_cpu(bd->pa[index].tb1_addr);
1158 buf_len = IWL_GET_BITS(bd->pa[index], tb1_len);
1159 }
1160
1161 pci_unmap_single(priv->pci_dev, dma_addr, buf_len,
1162 PCI_DMA_TODEVICE);
1135 nfreed++; 1163 nfreed++;
1136 } 1164 }
1137} 1165}
@@ -1163,7 +1191,7 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1163 BUG_ON(txq_id != IWL_CMD_QUEUE_NUM); 1191 BUG_ON(txq_id != IWL_CMD_QUEUE_NUM);
1164 1192
1165 cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge); 1193 cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
1166 cmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index]; 1194 cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
1167 1195
1168 /* Input error checking is done when commands are added to queue. */ 1196 /* Input error checking is done when commands are added to queue. */
1169 if (cmd->meta.flags & CMD_WANT_SKB) { 1197 if (cmd->meta.flags & CMD_WANT_SKB) {
@@ -1391,7 +1419,7 @@ static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1391 /* For each frame attempted in aggregation, 1419 /* For each frame attempted in aggregation,
1392 * update driver's record of tx frame's status. */ 1420 * update driver's record of tx frame's status. */
1393 for (i = 0; i < agg->frame_count ; i++) { 1421 for (i = 0; i < agg->frame_count ; i++) {
1394 ack = bitmap & (1 << i); 1422 ack = bitmap & (1ULL << i);
1395 successes += !!ack; 1423 successes += !!ack;
1396 IWL_DEBUG_TX_REPLY("%s ON i=%d idx=%d raw=%d\n", 1424 IWL_DEBUG_TX_REPLY("%s ON i=%d idx=%d raw=%d\n",
1397 ack? "ACK":"NACK", i, (agg->start_idx + i) & 0xff, 1425 ack? "ACK":"NACK", i, (agg->start_idx + i) & 0xff,
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 4a22d3fba75b..444847ab1b5a 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -275,10 +275,8 @@ static int iwl3945_tx_queue_alloc(struct iwl3945_priv *priv,
275 return 0; 275 return 0;
276 276
277 error: 277 error:
278 if (txq->txb) { 278 kfree(txq->txb);
279 kfree(txq->txb); 279 txq->txb = NULL;
280 txq->txb = NULL;
281 }
282 280
283 return -ENOMEM; 281 return -ENOMEM;
284} 282}
@@ -365,10 +363,8 @@ void iwl3945_tx_queue_free(struct iwl3945_priv *priv, struct iwl3945_tx_queue *t
365 txq->q.n_bd, txq->bd, txq->q.dma_addr); 363 txq->q.n_bd, txq->bd, txq->q.dma_addr);
366 364
367 /* De-alloc array of per-TFD driver data */ 365 /* De-alloc array of per-TFD driver data */
368 if (txq->txb) { 366 kfree(txq->txb);
369 kfree(txq->txb); 367 txq->txb = NULL;
370 txq->txb = NULL;
371 }
372 368
373 /* 0-fill queue descriptor structure */ 369 /* 0-fill queue descriptor structure */
374 memset(txq, 0, sizeof(*txq)); 370 memset(txq, 0, sizeof(*txq));
@@ -2667,7 +2663,7 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv, struct sk_buff *skb)
2667 * first entry */ 2663 * first entry */
2668 iwl3945_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len); 2664 iwl3945_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len);
2669 2665
2670 if (!(info->flags & IEEE80211_TX_CTL_DO_NOT_ENCRYPT)) 2666 if (info->control.hw_key)
2671 iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, 0); 2667 iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, 0);
2672 2668
2673 /* Set up TFD's 2nd entry to point directly to remainder of skb, 2669 /* Set up TFD's 2nd entry to point directly to remainder of skb,
@@ -2703,9 +2699,8 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv, struct sk_buff *skb)
2703 2699
2704 if (!ieee80211_has_morefrags(hdr->frame_control)) { 2700 if (!ieee80211_has_morefrags(hdr->frame_control)) {
2705 txq->need_update = 1; 2701 txq->need_update = 1;
2706 if (qc) { 2702 if (qc)
2707 priv->stations[sta_id].tid[tid].seq_number = seq_number; 2703 priv->stations[sta_id].tid[tid].seq_number = seq_number;
2708 }
2709 } else { 2704 } else {
2710 wait_write_ptr = 1; 2705 wait_write_ptr = 1;
2711 txq->need_update = 0; 2706 txq->need_update = 0;
@@ -3813,7 +3808,7 @@ int iwl3945_calc_db_from_ratio(int sig_ratio)
3813 /* 100:1 or higher, divide by 10 and use table, 3808 /* 100:1 or higher, divide by 10 and use table,
3814 * add 20 dB to make up for divide by 10 */ 3809 * add 20 dB to make up for divide by 10 */
3815 if (sig_ratio >= 100) 3810 if (sig_ratio >= 100)
3816 return (20 + (int)ratio2dB[sig_ratio/10]); 3811 return 20 + (int)ratio2dB[sig_ratio/10];
3817 3812
3818 /* We shouldn't see this */ 3813 /* We shouldn't see this */
3819 if (sig_ratio < 1) 3814 if (sig_ratio < 1)
@@ -5088,7 +5083,7 @@ static void iwl3945_dealloc_ucode_pci(struct iwl3945_priv *priv)
5088 * iwl3945_verify_inst_full - verify runtime uCode image in card vs. host, 5083 * iwl3945_verify_inst_full - verify runtime uCode image in card vs. host,
5089 * looking at all data. 5084 * looking at all data.
5090 */ 5085 */
5091static int iwl3945_verify_inst_full(struct iwl3945_priv *priv, __le32 * image, u32 len) 5086static int iwl3945_verify_inst_full(struct iwl3945_priv *priv, __le32 *image, u32 len)
5092{ 5087{
5093 u32 val; 5088 u32 val;
5094 u32 save_len = len; 5089 u32 save_len = len;
@@ -5237,7 +5232,7 @@ static int iwl3945_verify_bsm(struct iwl3945_priv *priv)
5237 val = iwl3945_read_prph(priv, BSM_WR_DWCOUNT_REG); 5232 val = iwl3945_read_prph(priv, BSM_WR_DWCOUNT_REG);
5238 for (reg = BSM_SRAM_LOWER_BOUND; 5233 for (reg = BSM_SRAM_LOWER_BOUND;
5239 reg < BSM_SRAM_LOWER_BOUND + len; 5234 reg < BSM_SRAM_LOWER_BOUND + len;
5240 reg += sizeof(u32), image ++) { 5235 reg += sizeof(u32), image++) {
5241 val = iwl3945_read_prph(priv, reg); 5236 val = iwl3945_read_prph(priv, reg);
5242 if (val != le32_to_cpu(*image)) { 5237 if (val != le32_to_cpu(*image)) {
5243 IWL_ERROR("BSM uCode verification failed at " 5238 IWL_ERROR("BSM uCode verification failed at "
@@ -6336,7 +6331,7 @@ static void iwl3945_bg_post_associate(struct work_struct *data)
6336 DECLARE_MAC_BUF(mac); 6331 DECLARE_MAC_BUF(mac);
6337 6332
6338 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { 6333 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
6339 IWL_ERROR("%s Should not be called in AP mode\n", __FUNCTION__); 6334 IWL_ERROR("%s Should not be called in AP mode\n", __func__);
6340 return; 6335 return;
6341 } 6336 }
6342 6337
@@ -6417,7 +6412,7 @@ static void iwl3945_bg_post_associate(struct work_struct *data)
6417 6412
6418 default: 6413 default:
6419 IWL_ERROR("%s Should not be called in %d mode\n", 6414 IWL_ERROR("%s Should not be called in %d mode\n",
6420 __FUNCTION__, priv->iw_mode); 6415 __func__, priv->iw_mode);
6421 break; 6416 break;
6422 } 6417 }
6423 6418
@@ -6594,12 +6589,6 @@ static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
6594 6589
6595 IWL_DEBUG_MAC80211("enter\n"); 6590 IWL_DEBUG_MAC80211("enter\n");
6596 6591
6597 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) {
6598 IWL_DEBUG_MAC80211("leave - monitor\n");
6599 dev_kfree_skb_any(skb);
6600 return 0;
6601 }
6602
6603 IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, 6592 IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
6604 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); 6593 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
6605 6594
@@ -7456,7 +7445,7 @@ static ssize_t show_measurement(struct device *d,
7456 struct iwl3945_priv *priv = dev_get_drvdata(d); 7445 struct iwl3945_priv *priv = dev_get_drvdata(d);
7457 struct iwl3945_spectrum_notification measure_report; 7446 struct iwl3945_spectrum_notification measure_report;
7458 u32 size = sizeof(measure_report), len = 0, ofs = 0; 7447 u32 size = sizeof(measure_report), len = 0, ofs = 0;
7459 u8 *data = (u8 *) & measure_report; 7448 u8 *data = (u8 *)&measure_report;
7460 unsigned long flags; 7449 unsigned long flags;
7461 7450
7462 spin_lock_irqsave(&priv->lock, flags); 7451 spin_lock_irqsave(&priv->lock, flags);
@@ -7627,7 +7616,7 @@ static ssize_t show_power_level(struct device *d,
7627 else 7616 else
7628 p += sprintf(p, " \n"); 7617 p += sprintf(p, " \n");
7629 7618
7630 return (p - buf + 1); 7619 return p - buf + 1;
7631 7620
7632} 7621}
7633 7622
@@ -7649,7 +7638,7 @@ static ssize_t show_statistics(struct device *d,
7649 struct iwl3945_priv *priv = dev_get_drvdata(d); 7638 struct iwl3945_priv *priv = dev_get_drvdata(d);
7650 u32 size = sizeof(struct iwl3945_notif_statistics); 7639 u32 size = sizeof(struct iwl3945_notif_statistics);
7651 u32 len = 0, ofs = 0; 7640 u32 len = 0, ofs = 0;
7652 u8 *data = (u8 *) & priv->statistics; 7641 u8 *data = (u8 *)&priv->statistics;
7653 int rc = 0; 7642 int rc = 0;
7654 7643
7655 if (!iwl3945_is_alive(priv)) 7644 if (!iwl3945_is_alive(priv))
@@ -7899,8 +7888,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
7899 priv->ibss_beacon = NULL; 7888 priv->ibss_beacon = NULL;
7900 7889
7901 /* Tell mac80211 our characteristics */ 7890 /* Tell mac80211 our characteristics */
7902 hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE | 7891 hw->flags = IEEE80211_HW_SIGNAL_DBM |
7903 IEEE80211_HW_SIGNAL_DBM |
7904 IEEE80211_HW_NOISE_DBM; 7892 IEEE80211_HW_NOISE_DBM;
7905 7893
7906 /* 4 EDCA QOS priorities */ 7894 /* 4 EDCA QOS priorities */
@@ -8004,16 +7992,16 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
8004 7992
8005 /* nic init */ 7993 /* nic init */
8006 iwl3945_set_bit(priv, CSR_GIO_CHICKEN_BITS, 7994 iwl3945_set_bit(priv, CSR_GIO_CHICKEN_BITS,
8007 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); 7995 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
8008 7996
8009 iwl3945_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 7997 iwl3945_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
8010 err = iwl3945_poll_bit(priv, CSR_GP_CNTRL, 7998 err = iwl3945_poll_bit(priv, CSR_GP_CNTRL,
8011 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 7999 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
8012 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); 8000 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
8013 if (err < 0) { 8001 if (err < 0) {
8014 IWL_DEBUG_INFO("Failed to init the card\n"); 8002 IWL_DEBUG_INFO("Failed to init the card\n");
8015 goto out_remove_sysfs; 8003 goto out_remove_sysfs;
8016 } 8004 }
8017 /* Read the EEPROM */ 8005 /* Read the EEPROM */
8018 err = iwl3945_eeprom_init(priv); 8006 err = iwl3945_eeprom_init(priv);
8019 if (err) { 8007 if (err) {
@@ -8115,9 +8103,8 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
8115 iwl3945_unset_hw_setting(priv); 8103 iwl3945_unset_hw_setting(priv);
8116 iwl3945_clear_stations_table(priv); 8104 iwl3945_clear_stations_table(priv);
8117 8105
8118 if (priv->mac80211_registered) { 8106 if (priv->mac80211_registered)
8119 ieee80211_unregister_hw(priv->hw); 8107 ieee80211_unregister_hw(priv->hw);
8120 }
8121 8108
8122 /*netif_stop_queue(dev); */ 8109 /*netif_stop_queue(dev); */
8123 flush_workqueue(priv->workqueue); 8110 flush_workqueue(priv->workqueue);
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 14d5d61cec4c..bd32ac0b4e07 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -297,9 +297,7 @@ static ssize_t lbs_rtap_set(struct device *dev,
297 lbs_add_rtap(priv); 297 lbs_add_rtap(priv);
298 } 298 }
299 priv->monitormode = monitor_mode; 299 priv->monitormode = monitor_mode;
300 } 300 } else {
301
302 else {
303 if (!priv->monitormode) 301 if (!priv->monitormode)
304 return strlen(buf); 302 return strlen(buf);
305 priv->monitormode = 0; 303 priv->monitormode = 0;
@@ -1242,8 +1240,6 @@ int lbs_start_card(struct lbs_private *priv)
1242 lbs_pr_err("cannot register ethX device\n"); 1240 lbs_pr_err("cannot register ethX device\n");
1243 goto done; 1241 goto done;
1244 } 1242 }
1245 if (device_create_file(&dev->dev, &dev_attr_lbs_rtap))
1246 lbs_pr_err("cannot register lbs_rtap attribute\n");
1247 1243
1248 lbs_update_channel(priv); 1244 lbs_update_channel(priv);
1249 1245
@@ -1275,6 +1271,13 @@ int lbs_start_card(struct lbs_private *priv)
1275 1271
1276 if (device_create_file(&dev->dev, &dev_attr_lbs_mesh)) 1272 if (device_create_file(&dev->dev, &dev_attr_lbs_mesh))
1277 lbs_pr_err("cannot register lbs_mesh attribute\n"); 1273 lbs_pr_err("cannot register lbs_mesh attribute\n");
1274
1275 /* While rtap isn't related to mesh, only mesh-enabled
1276 * firmware implements the rtap functionality via
1277 * CMD_802_11_MONITOR_MODE.
1278 */
1279 if (device_create_file(&dev->dev, &dev_attr_lbs_rtap))
1280 lbs_pr_err("cannot register lbs_rtap attribute\n");
1278 } 1281 }
1279 } 1282 }
1280 1283
@@ -1306,9 +1309,9 @@ void lbs_stop_card(struct lbs_private *priv)
1306 netif_carrier_off(priv->dev); 1309 netif_carrier_off(priv->dev);
1307 1310
1308 lbs_debugfs_remove_one(priv); 1311 lbs_debugfs_remove_one(priv);
1309 device_remove_file(&dev->dev, &dev_attr_lbs_rtap);
1310 if (priv->mesh_tlv) { 1312 if (priv->mesh_tlv) {
1311 device_remove_file(&dev->dev, &dev_attr_lbs_mesh); 1313 device_remove_file(&dev->dev, &dev_attr_lbs_mesh);
1314 device_remove_file(&dev->dev, &dev_attr_lbs_rtap);
1312 } 1315 }
1313 1316
1314 /* Flush pending command nodes */ 1317 /* Flush pending command nodes */
diff --git a/drivers/net/wireless/libertas/persistcfg.c b/drivers/net/wireless/libertas/persistcfg.c
index 6d0ff8decaf7..3309a9c3cfef 100644
--- a/drivers/net/wireless/libertas/persistcfg.c
+++ b/drivers/net/wireless/libertas/persistcfg.c
@@ -48,7 +48,7 @@ static ssize_t bootflag_get(struct device *dev,
48 if (ret) 48 if (ret)
49 return ret; 49 return ret;
50 50
51 return snprintf(buf, 12, "0x%x\n", le32_to_cpu(defs.bootflag)); 51 return snprintf(buf, 12, "%d\n", le32_to_cpu(defs.bootflag));
52} 52}
53 53
54/** 54/**
@@ -63,8 +63,8 @@ static ssize_t bootflag_set(struct device *dev, struct device_attribute *attr,
63 int ret; 63 int ret;
64 64
65 memset(&cmd, 0, sizeof(cmd)); 65 memset(&cmd, 0, sizeof(cmd));
66 ret = sscanf(buf, "%x", &datum); 66 ret = sscanf(buf, "%d", &datum);
67 if (ret != 1) 67 if ((ret != 1) || (datum > 1))
68 return -EINVAL; 68 return -EINVAL;
69 69
70 *((__le32 *)&cmd.data[0]) = cpu_to_le32(!!datum); 70 *((__le32 *)&cmd.data[0]) = cpu_to_le32(!!datum);
@@ -91,7 +91,7 @@ static ssize_t boottime_get(struct device *dev,
91 if (ret) 91 if (ret)
92 return ret; 92 return ret;
93 93
94 return snprintf(buf, 12, "0x%x\n", defs.boottime); 94 return snprintf(buf, 12, "%d\n", defs.boottime);
95} 95}
96 96
97/** 97/**
@@ -106,8 +106,8 @@ static ssize_t boottime_set(struct device *dev,
106 int ret; 106 int ret;
107 107
108 memset(&cmd, 0, sizeof(cmd)); 108 memset(&cmd, 0, sizeof(cmd));
109 ret = sscanf(buf, "%x", &datum); 109 ret = sscanf(buf, "%d", &datum);
110 if (ret != 1) 110 if ((ret != 1) || (datum > 255))
111 return -EINVAL; 111 return -EINVAL;
112 112
113 /* A too small boot time will result in the device booting into 113 /* A too small boot time will result in the device booting into
@@ -143,7 +143,7 @@ static ssize_t channel_get(struct device *dev,
143 if (ret) 143 if (ret)
144 return ret; 144 return ret;
145 145
146 return snprintf(buf, 12, "0x%x\n", le16_to_cpu(defs.channel)); 146 return snprintf(buf, 12, "%d\n", le16_to_cpu(defs.channel));
147} 147}
148 148
149/** 149/**
@@ -154,11 +154,11 @@ static ssize_t channel_set(struct device *dev, struct device_attribute *attr,
154{ 154{
155 struct lbs_private *priv = to_net_dev(dev)->priv; 155 struct lbs_private *priv = to_net_dev(dev)->priv;
156 struct cmd_ds_mesh_config cmd; 156 struct cmd_ds_mesh_config cmd;
157 uint16_t datum; 157 uint32_t datum;
158 int ret; 158 int ret;
159 159
160 memset(&cmd, 0, sizeof(cmd)); 160 memset(&cmd, 0, sizeof(cmd));
161 ret = sscanf(buf, "%hx", &datum); 161 ret = sscanf(buf, "%d", &datum);
162 if (ret != 1 || datum < 1 || datum > 11) 162 if (ret != 1 || datum < 1 || datum > 11)
163 return -EINVAL; 163 return -EINVAL;
164 164
@@ -274,8 +274,8 @@ static ssize_t protocol_id_set(struct device *dev,
274 int ret; 274 int ret;
275 275
276 memset(&cmd, 0, sizeof(cmd)); 276 memset(&cmd, 0, sizeof(cmd));
277 ret = sscanf(buf, "%x", &datum); 277 ret = sscanf(buf, "%d", &datum);
278 if (ret != 1) 278 if ((ret != 1) || (datum > 255))
279 return -EINVAL; 279 return -EINVAL;
280 280
281 /* fetch all other Information Element parameters */ 281 /* fetch all other Information Element parameters */
@@ -328,8 +328,8 @@ static ssize_t metric_id_set(struct device *dev, struct device_attribute *attr,
328 int ret; 328 int ret;
329 329
330 memset(&cmd, 0, sizeof(cmd)); 330 memset(&cmd, 0, sizeof(cmd));
331 ret = sscanf(buf, "%x", &datum); 331 ret = sscanf(buf, "%d", &datum);
332 if (ret != 1) 332 if ((ret != 1) || (datum > 255))
333 return -EINVAL; 333 return -EINVAL;
334 334
335 /* fetch all other Information Element parameters */ 335 /* fetch all other Information Element parameters */
@@ -382,8 +382,8 @@ static ssize_t capability_set(struct device *dev, struct device_attribute *attr,
382 int ret; 382 int ret;
383 383
384 memset(&cmd, 0, sizeof(cmd)); 384 memset(&cmd, 0, sizeof(cmd));
385 ret = sscanf(buf, "%x", &datum); 385 ret = sscanf(buf, "%d", &datum);
386 if (ret != 1) 386 if ((ret != 1) || (datum > 255))
387 return -EINVAL; 387 return -EINVAL;
388 388
389 /* fetch all other Information Element parameters */ 389 /* fetch all other Information Element parameters */
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 5816230d58f8..248d31a7aa33 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -500,7 +500,7 @@ failed_hw:
500 device_unregister(data->dev); 500 device_unregister(data->dev);
501failed_drvdata: 501failed_drvdata:
502 ieee80211_free_hw(hw); 502 ieee80211_free_hw(hw);
503 hwsim_radios[i] = 0; 503 hwsim_radios[i] = NULL;
504failed: 504failed:
505 mac80211_hwsim_free(); 505 mac80211_hwsim_free();
506 return err; 506 return err;
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index b047306bf386..1ebcafe7ca5f 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -1998,13 +1998,6 @@ __orinoco_set_multicast_list(struct net_device *dev)
1998 else 1998 else
1999 priv->mc_count = mc_count; 1999 priv->mc_count = mc_count;
2000 } 2000 }
2001
2002 /* Since we can set the promiscuous flag when it wasn't asked
2003 for, make sure the net_device knows about it. */
2004 if (priv->promiscuous)
2005 dev->flags |= IFF_PROMISC;
2006 else
2007 dev->flags &= ~IFF_PROMISC;
2008} 2001}
2009 2002
2010/* This must be called from user context, without locks held - use 2003/* This must be called from user context, without locks held - use
diff --git a/drivers/net/wireless/p54/p54.h b/drivers/net/wireless/p54/p54.h
index c6f27b9022f9..4801a363507b 100644
--- a/drivers/net/wireless/p54/p54.h
+++ b/drivers/net/wireless/p54/p54.h
@@ -52,6 +52,8 @@ struct p54_common {
52 int (*open)(struct ieee80211_hw *dev); 52 int (*open)(struct ieee80211_hw *dev);
53 void (*stop)(struct ieee80211_hw *dev); 53 void (*stop)(struct ieee80211_hw *dev);
54 int mode; 54 int mode;
55 u16 seqno;
56 struct mutex conf_mutex;
55 u8 mac_addr[ETH_ALEN]; 57 u8 mac_addr[ETH_ALEN];
56 u8 bssid[ETH_ALEN]; 58 u8 bssid[ETH_ALEN];
57 struct pda_iq_autocal_entry *iq_autocal; 59 struct pda_iq_autocal_entry *iq_autocal;
diff --git a/drivers/net/wireless/p54/p54common.c b/drivers/net/wireless/p54/p54common.c
index ffaf7a6b6810..83cd85e1f847 100644
--- a/drivers/net/wireless/p54/p54common.c
+++ b/drivers/net/wireless/p54/p54common.c
@@ -553,6 +553,7 @@ static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
553 struct ieee80211_tx_queue_stats *current_queue; 553 struct ieee80211_tx_queue_stats *current_queue;
554 struct p54_common *priv = dev->priv; 554 struct p54_common *priv = dev->priv;
555 struct p54_control_hdr *hdr; 555 struct p54_control_hdr *hdr;
556 struct ieee80211_hdr *ieee80211hdr = (struct ieee80211_hdr *)skb->data;
556 struct p54_tx_control_allocdata *txhdr; 557 struct p54_tx_control_allocdata *txhdr;
557 size_t padding, len; 558 size_t padding, len;
558 u8 rate; 559 u8 rate;
@@ -605,6 +606,19 @@ static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
605 if (padding) 606 if (padding)
606 txhdr->align[0] = padding; 607 txhdr->align[0] = padding;
607 608
609 /* FIXME: The sequence that follows is needed for this driver to
610 * work with mac80211 since "mac80211: fix TX sequence numbers".
611 * As with the temporary code in rt2x00, changes will be needed
612 * to get proper sequence numbers on beacons. In addition, this
613 * patch places the sequence number in the hardware state, which
614 * limits us to a single virtual state.
615 */
616 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
617 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
618 priv->seqno += 0x10;
619 ieee80211hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
620 ieee80211hdr->seq_ctrl |= cpu_to_le16(priv->seqno);
621 }
608 /* modifies skb->cb and with it info, so must be last! */ 622 /* modifies skb->cb and with it info, so must be last! */
609 p54_assign_address(dev, skb, hdr, skb->len); 623 p54_assign_address(dev, skb, hdr, skb->len);
610 624
@@ -803,8 +817,8 @@ static void p54_set_vdcf(struct ieee80211_hw *dev)
803 817
804 if (dev->conf.flags & IEEE80211_CONF_SHORT_SLOT_TIME) { 818 if (dev->conf.flags & IEEE80211_CONF_SHORT_SLOT_TIME) {
805 vdcf->slottime = 9; 819 vdcf->slottime = 9;
806 vdcf->magic1 = 0x00; 820 vdcf->magic1 = 0x10;
807 vdcf->magic2 = 0x10; 821 vdcf->magic2 = 0x00;
808 } else { 822 } else {
809 vdcf->slottime = 20; 823 vdcf->slottime = 20;
810 vdcf->magic1 = 0x0a; 824 vdcf->magic1 = 0x0a;
@@ -886,9 +900,12 @@ static void p54_remove_interface(struct ieee80211_hw *dev,
886static int p54_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf) 900static int p54_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf)
887{ 901{
888 int ret; 902 int ret;
903 struct p54_common *priv = dev->priv;
889 904
905 mutex_lock(&priv->conf_mutex);
890 ret = p54_set_freq(dev, cpu_to_le16(conf->channel->center_freq)); 906 ret = p54_set_freq(dev, cpu_to_le16(conf->channel->center_freq));
891 p54_set_vdcf(dev); 907 p54_set_vdcf(dev);
908 mutex_unlock(&priv->conf_mutex);
892 return ret; 909 return ret;
893} 910}
894 911
@@ -898,10 +915,12 @@ static int p54_config_interface(struct ieee80211_hw *dev,
898{ 915{
899 struct p54_common *priv = dev->priv; 916 struct p54_common *priv = dev->priv;
900 917
918 mutex_lock(&priv->conf_mutex);
901 p54_set_filter(dev, 0, priv->mac_addr, conf->bssid, 0, 1, 0, 0xF642); 919 p54_set_filter(dev, 0, priv->mac_addr, conf->bssid, 0, 1, 0, 0xF642);
902 p54_set_filter(dev, 0, priv->mac_addr, conf->bssid, 2, 0, 0, 0); 920 p54_set_filter(dev, 0, priv->mac_addr, conf->bssid, 2, 0, 0, 0);
903 p54_set_leds(dev, 1, !is_multicast_ether_addr(conf->bssid), 0); 921 p54_set_leds(dev, 1, !is_multicast_ether_addr(conf->bssid), 0);
904 memcpy(priv->bssid, conf->bssid, ETH_ALEN); 922 memcpy(priv->bssid, conf->bssid, ETH_ALEN);
923 mutex_unlock(&priv->conf_mutex);
905 return 0; 924 return 0;
906} 925}
907 926
@@ -1009,6 +1028,7 @@ struct ieee80211_hw *p54_init_common(size_t priv_data_len)
1009 } 1028 }
1010 1029
1011 p54_init_vdcf(dev); 1030 p54_init_vdcf(dev);
1031 mutex_init(&priv->conf_mutex);
1012 1032
1013 return dev; 1033 return dev;
1014} 1034}
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 97fa14e0a479..3d75a7137d3c 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -2518,7 +2518,7 @@ enum {
2518 2518
2519#define PRISM2_HOSTAPD_MAX_BUF_SIZE 1024 2519#define PRISM2_HOSTAPD_MAX_BUF_SIZE 1024
2520#define PRISM2_HOSTAPD_GENERIC_ELEMENT_HDR_LEN \ 2520#define PRISM2_HOSTAPD_GENERIC_ELEMENT_HDR_LEN \
2521((int) (&((struct prism2_hostapd_param *) 0)->u.generic_elem.data)) 2521 offsetof(struct prism2_hostapd_param, u.generic_elem.data)
2522 2522
2523/* Maximum length for algorithm names (-1 for nul termination) 2523/* Maximum length for algorithm names (-1 for nul termination)
2524 * used in ioctl() */ 2524 * used in ioctl() */
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index aa6dfb811c71..181a146b4768 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -1220,6 +1220,7 @@ static void rt2500pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1220 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs); 1220 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
1221 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, 1221 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
1222 test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags)); 1222 test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
1223 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skb->len);
1223 rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, CIPHER_NONE); 1224 rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, CIPHER_NONE);
1224 rt2x00_desc_write(txd, 0, word); 1225 rt2x00_desc_write(txd, 0, word);
1225} 1226}
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 3558cb210747..cd5af656932d 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -633,6 +633,16 @@ static void rt2500usb_reset_tuner(struct rt2x00_dev *rt2x00dev)
633 rt2x00dev->link.vgc_level = value; 633 rt2x00dev->link.vgc_level = value;
634} 634}
635 635
636/*
637 * NOTE: This function is directly ported from legacy driver, but
638 * despite it being declared it was never called. Although link tuning
639 * sounds like a good idea, and usually works well for the other drivers,
640 * it does _not_ work with rt2500usb. Enabling this function will result
641 * in TX capabilities only until association kicks in. Immediately
642 * after the successful association all TX frames will be kept in the
643 * hardware queue and never transmitted.
644 */
645#if 0
636static void rt2500usb_link_tuner(struct rt2x00_dev *rt2x00dev) 646static void rt2500usb_link_tuner(struct rt2x00_dev *rt2x00dev)
637{ 647{
638 int rssi = rt2x00_get_link_rssi(&rt2x00dev->link); 648 int rssi = rt2x00_get_link_rssi(&rt2x00dev->link);
@@ -752,6 +762,9 @@ dynamic_cca_tune:
752 rt2x00dev->link.vgc_level = r17; 762 rt2x00dev->link.vgc_level = r17;
753 } 763 }
754} 764}
765#else
766#define rt2500usb_link_tuner NULL
767#endif
755 768
756/* 769/*
757 * Initialization functions. 770 * Initialization functions.
@@ -1121,6 +1134,7 @@ static void rt2500usb_write_beacon(struct queue_entry *entry)
1121 int pipe = usb_sndbulkpipe(usb_dev, 1); 1134 int pipe = usb_sndbulkpipe(usb_dev, 1);
1122 int length; 1135 int length;
1123 u16 reg; 1136 u16 reg;
1137 u32 word, len;
1124 1138
1125 /* 1139 /*
1126 * Add the descriptor in front of the skb. 1140 * Add the descriptor in front of the skb.
@@ -1130,6 +1144,17 @@ static void rt2500usb_write_beacon(struct queue_entry *entry)
1130 skbdesc->desc = entry->skb->data; 1144 skbdesc->desc = entry->skb->data;
1131 1145
1132 /* 1146 /*
1147 * Adjust the beacon databyte count. The current number is
1148 * calculated before this function gets called, but falsely
1149 * assumes that the descriptor was already present in the SKB.
1150 */
1151 rt2x00_desc_read(skbdesc->desc, 0, &word);
1152 len = rt2x00_get_field32(word, TXD_W0_DATABYTE_COUNT);
1153 len += skbdesc->desc_len;
1154 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, len);
1155 rt2x00_desc_write(skbdesc->desc, 0, word);
1156
1157 /*
1133 * Disable beaconing while we are reloading the beacon data, 1158 * Disable beaconing while we are reloading the beacon data,
1134 * otherwise we might be sending out invalid data. 1159 * otherwise we might be sending out invalid data.
1135 */ 1160 */
@@ -1364,6 +1389,9 @@ static int rt2500usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1364 rt2x00_set_field16(&word, EEPROM_BBPTUNE_VGCLOWER, bbp); 1389 rt2x00_set_field16(&word, EEPROM_BBPTUNE_VGCLOWER, bbp);
1365 rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_VGC, word); 1390 rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_VGC, word);
1366 EEPROM(rt2x00dev, "BBPtune vgc: 0x%04x\n", word); 1391 EEPROM(rt2x00dev, "BBPtune vgc: 0x%04x\n", word);
1392 } else {
1393 rt2x00_set_field16(&word, EEPROM_BBPTUNE_VGCLOWER, bbp);
1394 rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_VGC, word);
1367 } 1395 }
1368 1396
1369 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R17, &word); 1397 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R17, &word);
@@ -1372,9 +1400,6 @@ static int rt2500usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1372 rt2x00_set_field16(&word, EEPROM_BBPTUNE_R17_HIGH, 0x41); 1400 rt2x00_set_field16(&word, EEPROM_BBPTUNE_R17_HIGH, 0x41);
1373 rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_R17, word); 1401 rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_R17, word);
1374 EEPROM(rt2x00dev, "BBPtune r17: 0x%04x\n", word); 1402 EEPROM(rt2x00dev, "BBPtune r17: 0x%04x\n", word);
1375 } else {
1376 rt2x00_set_field16(&word, EEPROM_BBPTUNE_VGCLOWER, bbp);
1377 rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_VGC, word);
1378 } 1403 }
1379 1404
1380 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R24, &word); 1405 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R24, &word);
@@ -1650,7 +1675,6 @@ static void rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1650 * Initialize all hw fields. 1675 * Initialize all hw fields.
1651 */ 1676 */
1652 rt2x00dev->hw->flags = 1677 rt2x00dev->hw->flags =
1653 IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE |
1654 IEEE80211_HW_RX_INCLUDES_FCS | 1678 IEEE80211_HW_RX_INCLUDES_FCS |
1655 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 1679 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
1656 IEEE80211_HW_SIGNAL_DBM; 1680 IEEE80211_HW_SIGNAL_DBM;
@@ -1726,6 +1750,7 @@ static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev)
1726 __set_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags); 1750 __set_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
1727 __set_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags); 1751 __set_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags);
1728 __set_bit(DRIVER_REQUIRE_SCHEDULED, &rt2x00dev->flags); 1752 __set_bit(DRIVER_REQUIRE_SCHEDULED, &rt2x00dev->flags);
1753 __set_bit(CONFIG_DISABLE_LINK_TUNING, &rt2x00dev->flags);
1729 1754
1730 /* 1755 /*
1731 * Set the rssi offset. 1756 * Set the rssi offset.
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 07b03b3c7ef1..8b10ea41b204 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -108,7 +108,10 @@
108#define SHORT_PIFS ( SIFS + SHORT_SLOT_TIME ) 108#define SHORT_PIFS ( SIFS + SHORT_SLOT_TIME )
109#define DIFS ( PIFS + SLOT_TIME ) 109#define DIFS ( PIFS + SLOT_TIME )
110#define SHORT_DIFS ( SHORT_PIFS + SHORT_SLOT_TIME ) 110#define SHORT_DIFS ( SHORT_PIFS + SHORT_SLOT_TIME )
111#define EIFS ( SIFS + (8 * (IEEE80211_HEADER + ACK_SIZE)) ) 111#define EIFS ( SIFS + DIFS + \
112 (8 * (IEEE80211_HEADER + ACK_SIZE)) )
113#define SHORT_EIFS ( SIFS + SHORT_DIFS + \
114 (8 * (IEEE80211_HEADER + ACK_SIZE)) )
112 115
113/* 116/*
114 * Chipset identification 117 * Chipset identification
@@ -365,6 +368,12 @@ struct rt2x00_intf {
365#define DELAYED_CONFIG_ERP 0x00000002 368#define DELAYED_CONFIG_ERP 0x00000002
366#define DELAYED_LED_ASSOC 0x00000004 369#define DELAYED_LED_ASSOC 0x00000004
367 370
371 /*
372 * Software sequence counter, this is only required
373 * for hardware which doesn't support hardware
374 * sequence counting.
375 */
376 spinlock_t seqlock;
368 u16 seqno; 377 u16 seqno;
369}; 378};
370 379
@@ -597,6 +606,7 @@ enum rt2x00_flags {
597 DEVICE_STARTED_SUSPEND, 606 DEVICE_STARTED_SUSPEND,
598 DEVICE_ENABLED_RADIO, 607 DEVICE_ENABLED_RADIO,
599 DEVICE_DISABLED_RADIO_HW, 608 DEVICE_DISABLED_RADIO_HW,
609 DEVICE_DIRTY_CONFIG,
600 610
601 /* 611 /*
602 * Driver features 612 * Driver features
diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c
index f20ca712504f..d134c3be539a 100644
--- a/drivers/net/wireless/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/rt2x00/rt2x00config.c
@@ -254,6 +254,8 @@ config:
254 libconf.ant.rx = default_ant->rx; 254 libconf.ant.rx = default_ant->rx;
255 else if (active_ant->rx == ANTENNA_SW_DIVERSITY) 255 else if (active_ant->rx == ANTENNA_SW_DIVERSITY)
256 libconf.ant.rx = ANTENNA_B; 256 libconf.ant.rx = ANTENNA_B;
257 else
258 libconf.ant.rx = active_ant->rx;
257 259
258 if (conf->antenna_sel_tx) 260 if (conf->antenna_sel_tx)
259 libconf.ant.tx = conf->antenna_sel_tx; 261 libconf.ant.tx = conf->antenna_sel_tx;
@@ -261,6 +263,8 @@ config:
261 libconf.ant.tx = default_ant->tx; 263 libconf.ant.tx = default_ant->tx;
262 else if (active_ant->tx == ANTENNA_SW_DIVERSITY) 264 else if (active_ant->tx == ANTENNA_SW_DIVERSITY)
263 libconf.ant.tx = ANTENNA_B; 265 libconf.ant.tx = ANTENNA_B;
266 else
267 libconf.ant.tx = active_ant->tx;
264 } 268 }
265 269
266 if (flags & CONFIG_UPDATE_SLOT_TIME) { 270 if (flags & CONFIG_UPDATE_SLOT_TIME) {
@@ -271,7 +275,7 @@ config:
271 libconf.sifs = SIFS; 275 libconf.sifs = SIFS;
272 libconf.pifs = short_slot_time ? SHORT_PIFS : PIFS; 276 libconf.pifs = short_slot_time ? SHORT_PIFS : PIFS;
273 libconf.difs = short_slot_time ? SHORT_DIFS : DIFS; 277 libconf.difs = short_slot_time ? SHORT_DIFS : DIFS;
274 libconf.eifs = EIFS; 278 libconf.eifs = short_slot_time ? SHORT_EIFS : EIFS;
275 } 279 }
276 280
277 libconf.conf = conf; 281 libconf.conf = conf;
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index 300cf061035f..6bee1d611bbf 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -372,9 +372,6 @@ static ssize_t rt2x00debug_write_##__name(struct file *file, \
372 if (*offset) \ 372 if (*offset) \
373 return 0; \ 373 return 0; \
374 \ 374 \
375 if (!capable(CAP_NET_ADMIN)) \
376 return -EPERM; \
377 \
378 if (intf->offset_##__name >= debug->__name.word_count) \ 375 if (intf->offset_##__name >= debug->__name.word_count) \
379 return -EINVAL; \ 376 return -EINVAL; \
380 \ 377 \
@@ -454,7 +451,7 @@ static struct dentry *rt2x00debug_create_file_driver(const char *name,
454 data += sprintf(data, "compiled: %s %s\n", __DATE__, __TIME__); 451 data += sprintf(data, "compiled: %s %s\n", __DATE__, __TIME__);
455 blob->size = strlen(blob->data); 452 blob->size = strlen(blob->data);
456 453
457 return debugfs_create_blob(name, S_IRUGO, intf->driver_folder, blob); 454 return debugfs_create_blob(name, S_IRUSR, intf->driver_folder, blob);
458} 455}
459 456
460static struct dentry *rt2x00debug_create_file_chipset(const char *name, 457static struct dentry *rt2x00debug_create_file_chipset(const char *name,
@@ -482,7 +479,7 @@ static struct dentry *rt2x00debug_create_file_chipset(const char *name,
482 data += sprintf(data, "rf length: %d\n", debug->rf.word_count); 479 data += sprintf(data, "rf length: %d\n", debug->rf.word_count);
483 blob->size = strlen(blob->data); 480 blob->size = strlen(blob->data);
484 481
485 return debugfs_create_blob(name, S_IRUGO, intf->driver_folder, blob); 482 return debugfs_create_blob(name, S_IRUSR, intf->driver_folder, blob);
486} 483}
487 484
488void rt2x00debug_register(struct rt2x00_dev *rt2x00dev) 485void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
@@ -517,7 +514,7 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
517 if (IS_ERR(intf->chipset_entry)) 514 if (IS_ERR(intf->chipset_entry))
518 goto exit; 515 goto exit;
519 516
520 intf->dev_flags = debugfs_create_file("dev_flags", S_IRUGO, 517 intf->dev_flags = debugfs_create_file("dev_flags", S_IRUSR,
521 intf->driver_folder, intf, 518 intf->driver_folder, intf,
522 &rt2x00debug_fop_dev_flags); 519 &rt2x00debug_fop_dev_flags);
523 if (IS_ERR(intf->dev_flags)) 520 if (IS_ERR(intf->dev_flags))
@@ -532,7 +529,7 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
532({ \ 529({ \
533 (__intf)->__name##_off_entry = \ 530 (__intf)->__name##_off_entry = \
534 debugfs_create_u32(__stringify(__name) "_offset", \ 531 debugfs_create_u32(__stringify(__name) "_offset", \
535 S_IRUGO | S_IWUSR, \ 532 S_IRUSR | S_IWUSR, \
536 (__intf)->register_folder, \ 533 (__intf)->register_folder, \
537 &(__intf)->offset_##__name); \ 534 &(__intf)->offset_##__name); \
538 if (IS_ERR((__intf)->__name##_off_entry)) \ 535 if (IS_ERR((__intf)->__name##_off_entry)) \
@@ -540,7 +537,7 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
540 \ 537 \
541 (__intf)->__name##_val_entry = \ 538 (__intf)->__name##_val_entry = \
542 debugfs_create_file(__stringify(__name) "_value", \ 539 debugfs_create_file(__stringify(__name) "_value", \
543 S_IRUGO | S_IWUSR, \ 540 S_IRUSR | S_IWUSR, \
544 (__intf)->register_folder, \ 541 (__intf)->register_folder, \
545 (__intf), &rt2x00debug_fop_##__name);\ 542 (__intf), &rt2x00debug_fop_##__name);\
546 if (IS_ERR((__intf)->__name##_val_entry)) \ 543 if (IS_ERR((__intf)->__name##_val_entry)) \
@@ -560,7 +557,7 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
560 goto exit; 557 goto exit;
561 558
562 intf->queue_frame_dump_entry = 559 intf->queue_frame_dump_entry =
563 debugfs_create_file("dump", S_IRUGO, intf->queue_folder, 560 debugfs_create_file("dump", S_IRUSR, intf->queue_folder,
564 intf, &rt2x00debug_fop_queue_dump); 561 intf, &rt2x00debug_fop_queue_dump);
565 if (IS_ERR(intf->queue_frame_dump_entry)) 562 if (IS_ERR(intf->queue_frame_dump_entry))
566 goto exit; 563 goto exit;
@@ -569,7 +566,7 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
569 init_waitqueue_head(&intf->frame_dump_waitqueue); 566 init_waitqueue_head(&intf->frame_dump_waitqueue);
570 567
571 intf->queue_stats_entry = 568 intf->queue_stats_entry =
572 debugfs_create_file("queue", S_IRUGO, intf->queue_folder, 569 debugfs_create_file("queue", S_IRUSR, intf->queue_folder,
573 intf, &rt2x00debug_fop_queue_stats); 570 intf, &rt2x00debug_fop_queue_stats);
574 571
575 return; 572 return;
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 8c93eb8353b0..f42283ad7b02 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -1013,6 +1013,7 @@ int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
1013 rt2x00dev->intf_associated = 0; 1013 rt2x00dev->intf_associated = 0;
1014 1014
1015 __set_bit(DEVICE_STARTED, &rt2x00dev->flags); 1015 __set_bit(DEVICE_STARTED, &rt2x00dev->flags);
1016 __set_bit(DEVICE_DIRTY_CONFIG, &rt2x00dev->flags);
1016 1017
1017 return 0; 1018 return 0;
1018} 1019}
@@ -1237,9 +1238,9 @@ int rt2x00lib_resume(struct rt2x00_dev *rt2x00dev)
1237 /* 1238 /*
1238 * Reconfigure device. 1239 * Reconfigure device.
1239 */ 1240 */
1240 rt2x00lib_config(rt2x00dev, &rt2x00dev->hw->conf, 1); 1241 retval = rt2x00mac_config(rt2x00dev->hw, &rt2x00dev->hw->conf);
1241 if (!rt2x00dev->hw->conf.radio_enabled) 1242 if (retval)
1242 rt2x00lib_disable_radio(rt2x00dev); 1243 goto exit;
1243 1244
1244 /* 1245 /*
1245 * Iterator over each active interface to 1246 * Iterator over each active interface to
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index f2c9b0e79b5f..c5fb3a72cf37 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -125,13 +125,6 @@ void rt2x00queue_unmap_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb);
125void rt2x00queue_free_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb); 125void rt2x00queue_free_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb);
126 126
127/** 127/**
128 * rt2x00queue_free_skb - free a skb
129 * @rt2x00dev: Pointer to &struct rt2x00_dev.
130 * @skb: The skb to free.
131 */
132void rt2x00queue_free_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb);
133
134/**
135 * rt2x00queue_write_tx_frame - Write TX frame to hardware 128 * rt2x00queue_write_tx_frame - Write TX frame to hardware
136 * @queue: Queue over which the frame should be send 129 * @queue: Queue over which the frame should be send
137 * @skb: The skb to send 130 * @skb: The skb to send
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index f1dcbaa80c3c..d06507388635 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -63,7 +63,7 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
63 */ 63 */
64 memcpy(skb->cb, frag_skb->cb, sizeof(skb->cb)); 64 memcpy(skb->cb, frag_skb->cb, sizeof(skb->cb));
65 rts_info = IEEE80211_SKB_CB(skb); 65 rts_info = IEEE80211_SKB_CB(skb);
66 rts_info->flags |= IEEE80211_TX_CTL_DO_NOT_ENCRYPT; 66 rts_info->control.hw_key = NULL;
67 rts_info->flags &= ~IEEE80211_TX_CTL_USE_RTS_CTS; 67 rts_info->flags &= ~IEEE80211_TX_CTL_USE_RTS_CTS;
68 rts_info->flags &= ~IEEE80211_TX_CTL_USE_CTS_PROTECT; 68 rts_info->flags &= ~IEEE80211_TX_CTL_USE_CTS_PROTECT;
69 rts_info->flags &= ~IEEE80211_TX_CTL_REQ_TX_STATUS; 69 rts_info->flags &= ~IEEE80211_TX_CTL_REQ_TX_STATUS;
@@ -83,6 +83,7 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
83 (struct ieee80211_rts *)(skb->data)); 83 (struct ieee80211_rts *)(skb->data));
84 84
85 if (rt2x00queue_write_tx_frame(queue, skb)) { 85 if (rt2x00queue_write_tx_frame(queue, skb)) {
86 dev_kfree_skb_any(skb);
86 WARNING(rt2x00dev, "Failed to send RTS/CTS frame.\n"); 87 WARNING(rt2x00dev, "Failed to send RTS/CTS frame.\n");
87 return NETDEV_TX_BUSY; 88 return NETDEV_TX_BUSY;
88 } 89 }
@@ -96,7 +97,6 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
96 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 97 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
97 struct ieee80211_hdr *ieee80211hdr = (struct ieee80211_hdr *)skb->data; 98 struct ieee80211_hdr *ieee80211hdr = (struct ieee80211_hdr *)skb->data;
98 enum data_queue_qid qid = skb_get_queue_mapping(skb); 99 enum data_queue_qid qid = skb_get_queue_mapping(skb);
99 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
100 struct data_queue *queue; 100 struct data_queue *queue;
101 u16 frame_control; 101 u16 frame_control;
102 102
@@ -152,18 +152,6 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
152 } 152 }
153 } 153 }
154 154
155 /*
156 * XXX: This is as wrong as the old mac80211 code was,
157 * due to beacons not getting sequence numbers assigned
158 * properly.
159 */
160 if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
161 if (tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
162 intf->seqno += 0x10;
163 ieee80211hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
164 ieee80211hdr->seq_ctrl |= cpu_to_le16(intf->seqno);
165 }
166
167 if (rt2x00queue_write_tx_frame(queue, skb)) { 155 if (rt2x00queue_write_tx_frame(queue, skb)) {
168 ieee80211_stop_queue(rt2x00dev->hw, qid); 156 ieee80211_stop_queue(rt2x00dev->hw, qid);
169 return NETDEV_TX_BUSY; 157 return NETDEV_TX_BUSY;
@@ -215,23 +203,43 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
215 !test_bit(DEVICE_STARTED, &rt2x00dev->flags)) 203 !test_bit(DEVICE_STARTED, &rt2x00dev->flags))
216 return -ENODEV; 204 return -ENODEV;
217 205
218 /* 206 switch (conf->type) {
219 * We don't support mixed combinations of sta and ap virtual 207 case IEEE80211_IF_TYPE_AP:
220 * interfaces. We can only add this interface when the rival 208 /*
221 * interface count is 0. 209 * We don't support mixed combinations of
222 */ 210 * sta and ap interfaces.
223 if ((conf->type == IEEE80211_IF_TYPE_AP && rt2x00dev->intf_sta_count) || 211 */
224 (conf->type != IEEE80211_IF_TYPE_AP && rt2x00dev->intf_ap_count)) 212 if (rt2x00dev->intf_sta_count)
225 return -ENOBUFS; 213 return -ENOBUFS;
226 214
227 /* 215 /*
228 * Check if we exceeded the maximum amount of supported interfaces. 216 * Check if we exceeded the maximum amount
229 */ 217 * of supported interfaces.
230 if ((conf->type == IEEE80211_IF_TYPE_AP && 218 */
231 rt2x00dev->intf_ap_count >= rt2x00dev->ops->max_ap_intf) || 219 if (rt2x00dev->intf_ap_count >= rt2x00dev->ops->max_ap_intf)
232 (conf->type != IEEE80211_IF_TYPE_AP && 220 return -ENOBUFS;
233 rt2x00dev->intf_sta_count >= rt2x00dev->ops->max_sta_intf)) 221
234 return -ENOBUFS; 222 break;
223 case IEEE80211_IF_TYPE_STA:
224 case IEEE80211_IF_TYPE_IBSS:
225 /*
226 * We don't support mixed combinations of
227 * sta and ap interfaces.
228 */
229 if (rt2x00dev->intf_ap_count)
230 return -ENOBUFS;
231
232 /*
233 * Check if we exceeded the maximum amount
234 * of supported interfaces.
235 */
236 if (rt2x00dev->intf_sta_count >= rt2x00dev->ops->max_sta_intf)
237 return -ENOBUFS;
238
239 break;
240 default:
241 return -EINVAL;
242 }
235 243
236 /* 244 /*
237 * Loop through all beacon queues to find a free 245 * Loop through all beacon queues to find a free
@@ -259,6 +267,7 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
259 rt2x00dev->intf_sta_count++; 267 rt2x00dev->intf_sta_count++;
260 268
261 spin_lock_init(&intf->lock); 269 spin_lock_init(&intf->lock);
270 spin_lock_init(&intf->seqlock);
262 intf->beacon = entry; 271 intf->beacon = entry;
263 272
264 if (conf->type == IEEE80211_IF_TYPE_AP) 273 if (conf->type == IEEE80211_IF_TYPE_AP)
@@ -322,6 +331,7 @@ EXPORT_SYMBOL_GPL(rt2x00mac_remove_interface);
322int rt2x00mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf) 331int rt2x00mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf)
323{ 332{
324 struct rt2x00_dev *rt2x00dev = hw->priv; 333 struct rt2x00_dev *rt2x00dev = hw->priv;
334 int force_reconfig;
325 335
326 /* 336 /*
327 * Mac80211 might be calling this function while we are trying 337 * Mac80211 might be calling this function while we are trying
@@ -341,7 +351,17 @@ int rt2x00mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf)
341 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF); 351 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF);
342 } 352 }
343 353
344 rt2x00lib_config(rt2x00dev, conf, 0); 354 /*
355 * When the DEVICE_DIRTY_CONFIG flag is set, the device has recently
356 * been started and the configuration must be forced upon the hardware.
357 * Otherwise registers will not be intialized correctly and could
358 * result in non-working hardware because essential registers aren't
359 * initialized.
360 */
361 force_reconfig =
362 __test_and_clear_bit(DEVICE_DIRTY_CONFIG, &rt2x00dev->flags);
363
364 rt2x00lib_config(rt2x00dev, conf, force_reconfig);
345 365
346 /* 366 /*
347 * Reenable RX only if the radio should be on. 367 * Reenable RX only if the radio should be on.
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 7f442030f5ad..898cdd7f57d9 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -120,6 +120,7 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
120{ 120{
121 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 121 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
122 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); 122 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
123 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
123 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data; 124 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
124 struct ieee80211_rate *rate = 125 struct ieee80211_rate *rate =
125 ieee80211_get_tx_rate(rt2x00dev->hw, tx_info); 126 ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
@@ -127,6 +128,7 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
127 unsigned int data_length; 128 unsigned int data_length;
128 unsigned int duration; 129 unsigned int duration;
129 unsigned int residual; 130 unsigned int residual;
131 unsigned long irqflags;
130 132
131 memset(txdesc, 0, sizeof(*txdesc)); 133 memset(txdesc, 0, sizeof(*txdesc));
132 134
@@ -200,6 +202,31 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
200 } 202 }
201 203
202 /* 204 /*
205 * Hardware should insert sequence counter.
206 * FIXME: We insert a software sequence counter first for
207 * hardware that doesn't support hardware sequence counting.
208 *
209 * This is wrong because beacons are not getting sequence
210 * numbers assigned properly.
211 *
212 * A secondary problem exists for drivers that cannot toggle
213 * sequence counting per-frame, since those will override the
214 * sequence counter given by mac80211.
215 */
216 if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
217 spin_lock_irqsave(&intf->seqlock, irqflags);
218
219 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
220 intf->seqno += 0x10;
221 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
222 hdr->seq_ctrl |= cpu_to_le16(intf->seqno);
223
224 spin_unlock_irqrestore(&intf->seqlock, irqflags);
225
226 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
227 }
228
229 /*
203 * PLCP setup 230 * PLCP setup
204 * Length calculation depends on OFDM/CCK rate. 231 * Length calculation depends on OFDM/CCK rate.
205 */ 232 */
@@ -466,9 +493,12 @@ void rt2x00queue_init_rx(struct rt2x00_dev *rt2x00dev)
466 if (!rt2x00dev->ops->lib->init_rxentry) 493 if (!rt2x00dev->ops->lib->init_rxentry)
467 return; 494 return;
468 495
469 for (i = 0; i < queue->limit; i++) 496 for (i = 0; i < queue->limit; i++) {
497 queue->entries[i].flags = 0;
498
470 rt2x00dev->ops->lib->init_rxentry(rt2x00dev, 499 rt2x00dev->ops->lib->init_rxentry(rt2x00dev,
471 &queue->entries[i]); 500 &queue->entries[i]);
501 }
472} 502}
473 503
474void rt2x00queue_init_tx(struct rt2x00_dev *rt2x00dev) 504void rt2x00queue_init_tx(struct rt2x00_dev *rt2x00dev)
@@ -482,9 +512,12 @@ void rt2x00queue_init_tx(struct rt2x00_dev *rt2x00dev)
482 if (!rt2x00dev->ops->lib->init_txentry) 512 if (!rt2x00dev->ops->lib->init_txentry)
483 continue; 513 continue;
484 514
485 for (i = 0; i < queue->limit; i++) 515 for (i = 0; i < queue->limit; i++) {
516 queue->entries[i].flags = 0;
517
486 rt2x00dev->ops->lib->init_txentry(rt2x00dev, 518 rt2x00dev->ops->lib->init_txentry(rt2x00dev,
487 &queue->entries[i]); 519 &queue->entries[i]);
520 }
488 } 521 }
489} 522}
490 523
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index 8945945c892e..a4a8c57004db 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -199,6 +199,7 @@ struct txdone_entry_desc {
199 * @ENTRY_TXD_RTS_FRAME: This frame is a RTS frame. 199 * @ENTRY_TXD_RTS_FRAME: This frame is a RTS frame.
200 * @ENTRY_TXD_CTS_FRAME: This frame is a CTS-to-self frame. 200 * @ENTRY_TXD_CTS_FRAME: This frame is a CTS-to-self frame.
201 * @ENTRY_TXD_OFDM_RATE: This frame is send out with an OFDM rate. 201 * @ENTRY_TXD_OFDM_RATE: This frame is send out with an OFDM rate.
202 * @ENTRY_TXD_GENERATE_SEQ: This frame requires sequence counter.
202 * @ENTRY_TXD_FIRST_FRAGMENT: This is the first frame. 203 * @ENTRY_TXD_FIRST_FRAGMENT: This is the first frame.
203 * @ENTRY_TXD_MORE_FRAG: This frame is followed by another fragment. 204 * @ENTRY_TXD_MORE_FRAG: This frame is followed by another fragment.
204 * @ENTRY_TXD_REQ_TIMESTAMP: Require timestamp to be inserted. 205 * @ENTRY_TXD_REQ_TIMESTAMP: Require timestamp to be inserted.
@@ -210,6 +211,7 @@ enum txentry_desc_flags {
210 ENTRY_TXD_RTS_FRAME, 211 ENTRY_TXD_RTS_FRAME,
211 ENTRY_TXD_CTS_FRAME, 212 ENTRY_TXD_CTS_FRAME,
212 ENTRY_TXD_OFDM_RATE, 213 ENTRY_TXD_OFDM_RATE,
214 ENTRY_TXD_GENERATE_SEQ,
213 ENTRY_TXD_FIRST_FRAGMENT, 215 ENTRY_TXD_FIRST_FRAGMENT,
214 ENTRY_TXD_MORE_FRAG, 216 ENTRY_TXD_MORE_FRAG,
215 ENTRY_TXD_REQ_TIMESTAMP, 217 ENTRY_TXD_REQ_TIMESTAMP,
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 83862e7f7aec..8d76bb2e0312 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -122,6 +122,38 @@ int rt2x00usb_vendor_request_buff(struct rt2x00_dev *rt2x00dev,
122} 122}
123EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request_buff); 123EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request_buff);
124 124
125int rt2x00usb_vendor_request_large_buff(struct rt2x00_dev *rt2x00dev,
126 const u8 request, const u8 requesttype,
127 const u16 offset, const void *buffer,
128 const u16 buffer_length,
129 const int timeout)
130{
131 int status = 0;
132 unsigned char *tb;
133 u16 off, len, bsize;
134
135 mutex_lock(&rt2x00dev->usb_cache_mutex);
136
137 tb = (char *)buffer;
138 off = offset;
139 len = buffer_length;
140 while (len && !status) {
141 bsize = min_t(u16, CSR_CACHE_SIZE, len);
142 status = rt2x00usb_vendor_req_buff_lock(rt2x00dev, request,
143 requesttype, off, tb,
144 bsize, timeout);
145
146 tb += bsize;
147 len -= bsize;
148 off += bsize;
149 }
150
151 mutex_unlock(&rt2x00dev->usb_cache_mutex);
152
153 return status;
154}
155EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request_large_buff);
156
125/* 157/*
126 * TX data handlers. 158 * TX data handlers.
127 */ 159 */
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.h b/drivers/net/wireless/rt2x00/rt2x00usb.h
index aad794adf52c..3b4a67417f95 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.h
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.h
@@ -70,8 +70,7 @@
70/* 70/*
71 * Cache size 71 * Cache size
72 */ 72 */
73#define CSR_CACHE_SIZE 8 73#define CSR_CACHE_SIZE 64
74#define CSR_CACHE_SIZE_FIRMWARE 64
75 74
76/* 75/*
77 * USB request types. 76 * USB request types.
@@ -172,6 +171,25 @@ int rt2x00usb_vendor_req_buff_lock(struct rt2x00_dev *rt2x00dev,
172 const u16 buffer_length, const int timeout); 171 const u16 buffer_length, const int timeout);
173 172
174/** 173/**
174 * rt2x00usb_vendor_request_large_buff - Send register command to device (buffered)
175 * @rt2x00dev: Pointer to &struct rt2x00_dev
176 * @request: USB vendor command (See &enum rt2x00usb_vendor_request)
177 * @requesttype: Request type &USB_VENDOR_REQUEST_*
178 * @offset: Register start offset to perform action on
179 * @buffer: Buffer where information will be read/written to by device
180 * @buffer_length: Size of &buffer
181 * @timeout: Operation timeout
182 *
183 * This function is used to transfer register data in blocks larger
184 * then CSR_CACHE_SIZE. Use for firmware upload, keys and beacons.
185 */
186int rt2x00usb_vendor_request_large_buff(struct rt2x00_dev *rt2x00dev,
187 const u8 request, const u8 requesttype,
188 const u16 offset, const void *buffer,
189 const u16 buffer_length,
190 const int timeout);
191
192/**
175 * rt2x00usb_vendor_request_sw - Send single register command to device 193 * rt2x00usb_vendor_request_sw - Send single register command to device
176 * @rt2x00dev: Pointer to &struct rt2x00_dev 194 * @rt2x00dev: Pointer to &struct rt2x00_dev
177 * @request: USB vendor command (See &enum rt2x00usb_vendor_request) 195 * @request: USB vendor command (See &enum rt2x00usb_vendor_request)
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index f7c1f92c1448..087e90b328cd 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -1004,6 +1004,11 @@ static int rt61pci_load_firmware(struct rt2x00_dev *rt2x00dev, const void *data,
1004 } 1004 }
1005 1005
1006 /* 1006 /*
1007 * Hardware needs another millisecond before it is ready.
1008 */
1009 msleep(1);
1010
1011 /*
1007 * Reset MAC and BBP registers. 1012 * Reset MAC and BBP registers.
1008 */ 1013 */
1009 reg = 0; 1014 reg = 0;
@@ -1544,7 +1549,8 @@ static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1544 rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min); 1549 rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min);
1545 rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max); 1550 rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max);
1546 rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, IEEE80211_HEADER); 1551 rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, IEEE80211_HEADER);
1547 rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE, 1); 1552 rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE,
1553 test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags));
1548 rt2x00_set_field32(&word, TXD_W1_BUFFER_COUNT, 1); 1554 rt2x00_set_field32(&word, TXD_W1_BUFFER_COUNT, 1);
1549 rt2x00_desc_write(txd, 1, word); 1555 rt2x00_desc_write(txd, 1, word);
1550 1556
@@ -2278,7 +2284,6 @@ static void rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2278 * Initialize all hw fields. 2284 * Initialize all hw fields.
2279 */ 2285 */
2280 rt2x00dev->hw->flags = 2286 rt2x00dev->hw->flags =
2281 IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE |
2282 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 2287 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
2283 IEEE80211_HW_SIGNAL_DBM; 2288 IEEE80211_HW_SIGNAL_DBM;
2284 rt2x00dev->hw->extra_tx_headroom = 0; 2289 rt2x00dev->hw->extra_tx_headroom = 0;
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index d383735ab8f2..9761eaaa08be 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -890,9 +890,6 @@ static int rt73usb_load_firmware(struct rt2x00_dev *rt2x00dev, const void *data,
890 unsigned int i; 890 unsigned int i;
891 int status; 891 int status;
892 u32 reg; 892 u32 reg;
893 const char *ptr = data;
894 char *cache;
895 int buflen;
896 893
897 /* 894 /*
898 * Wait for stable hardware. 895 * Wait for stable hardware.
@@ -911,31 +908,12 @@ static int rt73usb_load_firmware(struct rt2x00_dev *rt2x00dev, const void *data,
911 908
912 /* 909 /*
913 * Write firmware to device. 910 * Write firmware to device.
914 * We setup a seperate cache for this action,
915 * since we are going to write larger chunks of data
916 * then normally used cache size.
917 */ 911 */
918 cache = kmalloc(CSR_CACHE_SIZE_FIRMWARE, GFP_KERNEL); 912 rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE,
919 if (!cache) { 913 USB_VENDOR_REQUEST_OUT,
920 ERROR(rt2x00dev, "Failed to allocate firmware cache.\n"); 914 FIRMWARE_IMAGE_BASE,
921 return -ENOMEM; 915 data, len,
922 } 916 REGISTER_TIMEOUT32(len));
923
924 for (i = 0; i < len; i += CSR_CACHE_SIZE_FIRMWARE) {
925 buflen = min_t(int, len - i, CSR_CACHE_SIZE_FIRMWARE);
926
927 memcpy(cache, ptr, buflen);
928
929 rt2x00usb_vendor_request(rt2x00dev, USB_MULTI_WRITE,
930 USB_VENDOR_REQUEST_OUT,
931 FIRMWARE_IMAGE_BASE + i, 0,
932 cache, buflen,
933 REGISTER_TIMEOUT32(buflen));
934
935 ptr += buflen;
936 }
937
938 kfree(cache);
939 917
940 /* 918 /*
941 * Send firmware request to device to load firmware, 919 * Send firmware request to device to load firmware,
@@ -1303,7 +1281,8 @@ static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1303 rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min); 1281 rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min);
1304 rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max); 1282 rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max);
1305 rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, IEEE80211_HEADER); 1283 rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, IEEE80211_HEADER);
1306 rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE, 1); 1284 rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE,
1285 test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags));
1307 rt2x00_desc_write(txd, 1, word); 1286 rt2x00_desc_write(txd, 1, word);
1308 1287
1309 rt2x00_desc_read(txd, 2, &word); 1288 rt2x00_desc_read(txd, 2, &word);
@@ -1352,6 +1331,7 @@ static void rt73usb_write_beacon(struct queue_entry *entry)
1352 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); 1331 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
1353 unsigned int beacon_base; 1332 unsigned int beacon_base;
1354 u32 reg; 1333 u32 reg;
1334 u32 word, len;
1355 1335
1356 /* 1336 /*
1357 * Add the descriptor in front of the skb. 1337 * Add the descriptor in front of the skb.
@@ -1361,6 +1341,17 @@ static void rt73usb_write_beacon(struct queue_entry *entry)
1361 skbdesc->desc = entry->skb->data; 1341 skbdesc->desc = entry->skb->data;
1362 1342
1363 /* 1343 /*
1344 * Adjust the beacon databyte count. The current number is
1345 * calculated before this function gets called, but falsely
1346 * assumes that the descriptor was already present in the SKB.
1347 */
1348 rt2x00_desc_read(skbdesc->desc, 0, &word);
1349 len = rt2x00_get_field32(word, TXD_W0_DATABYTE_COUNT);
1350 len += skbdesc->desc_len;
1351 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, len);
1352 rt2x00_desc_write(skbdesc->desc, 0, word);
1353
1354 /*
1364 * Disable beaconing while we are reloading the beacon data, 1355 * Disable beaconing while we are reloading the beacon data,
1365 * otherwise we might be sending out invalid data. 1356 * otherwise we might be sending out invalid data.
1366 */ 1357 */
@@ -1374,10 +1365,10 @@ static void rt73usb_write_beacon(struct queue_entry *entry)
1374 * Write entire beacon with descriptor to register. 1365 * Write entire beacon with descriptor to register.
1375 */ 1366 */
1376 beacon_base = HW_BEACON_OFFSET(entry->entry_idx); 1367 beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
1377 rt2x00usb_vendor_request(rt2x00dev, USB_MULTI_WRITE, 1368 rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE,
1378 USB_VENDOR_REQUEST_OUT, beacon_base, 0, 1369 USB_VENDOR_REQUEST_OUT, beacon_base,
1379 entry->skb->data, entry->skb->len, 1370 entry->skb->data, entry->skb->len,
1380 REGISTER_TIMEOUT32(entry->skb->len)); 1371 REGISTER_TIMEOUT32(entry->skb->len));
1381 1372
1382 /* 1373 /*
1383 * Clean up the beacon skb. 1374 * Clean up the beacon skb.
@@ -1871,7 +1862,6 @@ static void rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1871 * Initialize all hw fields. 1862 * Initialize all hw fields.
1872 */ 1863 */
1873 rt2x00dev->hw->flags = 1864 rt2x00dev->hw->flags =
1874 IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE |
1875 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 1865 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
1876 IEEE80211_HW_SIGNAL_DBM; 1866 IEEE80211_HW_SIGNAL_DBM;
1877 rt2x00dev->hw->extra_tx_headroom = TXD_DESC_SIZE; 1867 rt2x00dev->hw->extra_tx_headroom = TXD_DESC_SIZE;
diff --git a/drivers/net/wireless/rtl8187.h b/drivers/net/wireless/rtl8187.h
index 3afb49f8866a..5a9515c99960 100644
--- a/drivers/net/wireless/rtl8187.h
+++ b/drivers/net/wireless/rtl8187.h
@@ -47,11 +47,13 @@ struct rtl8187_rx_hdr {
47struct rtl8187b_rx_hdr { 47struct rtl8187b_rx_hdr {
48 __le32 flags; 48 __le32 flags;
49 __le64 mac_time; 49 __le64 mac_time;
50 u8 noise; 50 u8 sq;
51 u8 signal; 51 u8 rssi;
52 u8 agc; 52 u8 agc;
53 u8 reserved; 53 u8 flags2;
54 __le32 unused; 54 __le16 snr_long2end;
55 s8 pwdb_g12;
56 u8 fot;
55} __attribute__((packed)); 57} __attribute__((packed));
56 58
57/* {rtl8187,rtl8187b}_tx_info is in skb */ 59/* {rtl8187,rtl8187b}_tx_info is in skb */
@@ -92,6 +94,10 @@ struct rtl8187_priv {
92 const struct rtl818x_rf_ops *rf; 94 const struct rtl818x_rf_ops *rf;
93 struct ieee80211_vif *vif; 95 struct ieee80211_vif *vif;
94 int mode; 96 int mode;
97 /* The mutex protects the TX loopback state.
98 * Any attempt to set channels concurrently locks the device.
99 */
100 struct mutex conf_mutex;
95 101
96 /* rtl8187 specific */ 102 /* rtl8187 specific */
97 struct ieee80211_channel channels[14]; 103 struct ieee80211_channel channels[14];
@@ -100,6 +106,7 @@ struct rtl8187_priv {
100 struct usb_device *udev; 106 struct usb_device *udev;
101 u32 rx_conf; 107 u32 rx_conf;
102 u16 txpwr_base; 108 u16 txpwr_base;
109 u16 seqno;
103 u8 asic_rev; 110 u8 asic_rev;
104 u8 is_rtl8187b; 111 u8 is_rtl8187b;
105 enum { 112 enum {
diff --git a/drivers/net/wireless/rtl8187_dev.c b/drivers/net/wireless/rtl8187_dev.c
index d3067b1216ca..57376fb993ed 100644
--- a/drivers/net/wireless/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl8187_dev.c
@@ -31,6 +31,8 @@ MODULE_DESCRIPTION("RTL8187/RTL8187B USB wireless driver");
31MODULE_LICENSE("GPL"); 31MODULE_LICENSE("GPL");
32 32
33static struct usb_device_id rtl8187_table[] __devinitdata = { 33static struct usb_device_id rtl8187_table[] __devinitdata = {
34 /* Asus */
35 {USB_DEVICE(0x0b05, 0x171d), .driver_info = DEVICE_RTL8187},
34 /* Realtek */ 36 /* Realtek */
35 {USB_DEVICE(0x0bda, 0x8187), .driver_info = DEVICE_RTL8187}, 37 {USB_DEVICE(0x0bda, 0x8187), .driver_info = DEVICE_RTL8187},
36 {USB_DEVICE(0x0bda, 0x8189), .driver_info = DEVICE_RTL8187B}, 38 {USB_DEVICE(0x0bda, 0x8189), .driver_info = DEVICE_RTL8187B},
@@ -169,6 +171,7 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
169{ 171{
170 struct rtl8187_priv *priv = dev->priv; 172 struct rtl8187_priv *priv = dev->priv;
171 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 173 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
174 struct ieee80211_hdr *ieee80211hdr = (struct ieee80211_hdr *)skb->data;
172 unsigned int ep; 175 unsigned int ep;
173 void *buf; 176 void *buf;
174 struct urb *urb; 177 struct urb *urb;
@@ -234,6 +237,20 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
234 ep = epmap[skb_get_queue_mapping(skb)]; 237 ep = epmap[skb_get_queue_mapping(skb)];
235 } 238 }
236 239
240 /* FIXME: The sequence that follows is needed for this driver to
241 * work with mac80211 since "mac80211: fix TX sequence numbers".
242 * As with the temporary code in rt2x00, changes will be needed
243 * to get proper sequence numbers on beacons. In addition, this
244 * patch places the sequence number in the hardware state, which
245 * limits us to a single virtual state.
246 */
247 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
248 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
249 priv->seqno += 0x10;
250 ieee80211hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
251 ieee80211hdr->seq_ctrl |= cpu_to_le16(priv->seqno);
252 }
253
237 info->driver_data[0] = dev; 254 info->driver_data[0] = dev;
238 info->driver_data[1] = urb; 255 info->driver_data[1] = urb;
239 256
@@ -257,6 +274,7 @@ static void rtl8187_rx_cb(struct urb *urb)
257 struct ieee80211_rx_status rx_status = { 0 }; 274 struct ieee80211_rx_status rx_status = { 0 };
258 int rate, signal; 275 int rate, signal;
259 u32 flags; 276 u32 flags;
277 u32 quality;
260 278
261 spin_lock(&priv->rx_queue.lock); 279 spin_lock(&priv->rx_queue.lock);
262 if (skb->next) 280 if (skb->next)
@@ -280,44 +298,57 @@ static void rtl8187_rx_cb(struct urb *urb)
280 flags = le32_to_cpu(hdr->flags); 298 flags = le32_to_cpu(hdr->flags);
281 signal = hdr->signal & 0x7f; 299 signal = hdr->signal & 0x7f;
282 rx_status.antenna = (hdr->signal >> 7) & 1; 300 rx_status.antenna = (hdr->signal >> 7) & 1;
283 rx_status.signal = signal;
284 rx_status.noise = hdr->noise; 301 rx_status.noise = hdr->noise;
285 rx_status.mactime = le64_to_cpu(hdr->mac_time); 302 rx_status.mactime = le64_to_cpu(hdr->mac_time);
286 priv->signal = signal;
287 priv->quality = signal; 303 priv->quality = signal;
304 rx_status.qual = priv->quality;
288 priv->noise = hdr->noise; 305 priv->noise = hdr->noise;
306 rate = (flags >> 20) & 0xF;
307 if (rate > 3) { /* OFDM rate */
308 if (signal > 90)
309 signal = 90;
310 else if (signal < 25)
311 signal = 25;
312 signal = 90 - signal;
313 } else { /* CCK rate */
314 if (signal > 95)
315 signal = 95;
316 else if (signal < 30)
317 signal = 30;
318 signal = 95 - signal;
319 }
320 rx_status.signal = signal;
321 priv->signal = signal;
289 } else { 322 } else {
290 struct rtl8187b_rx_hdr *hdr = 323 struct rtl8187b_rx_hdr *hdr =
291 (typeof(hdr))(skb_tail_pointer(skb) - sizeof(*hdr)); 324 (typeof(hdr))(skb_tail_pointer(skb) - sizeof(*hdr));
325 /* The Realtek datasheet for the RTL8187B shows that the RX
326 * header contains the following quantities: signal quality,
327 * RSSI, AGC, the received power in dB, and the measured SNR.
328 * In testing, none of these quantities show qualitative
329 * agreement with AP signal strength, except for the AGC,
330 * which is inversely proportional to the strength of the
331 * signal. In the following, the quality and signal strength
332 * are derived from the AGC. The arbitrary scaling constants
333 * are chosen to make the results close to the values obtained
334 * for a BCM4312 using b43 as the driver. The noise is ignored
335 * for now.
336 */
292 flags = le32_to_cpu(hdr->flags); 337 flags = le32_to_cpu(hdr->flags);
293 signal = hdr->agc >> 1; 338 quality = 170 - hdr->agc;
294 rx_status.antenna = (hdr->signal >> 7) & 1; 339 if (quality > 100)
295 rx_status.signal = 64 - min(hdr->noise, (u8)64); 340 quality = 100;
296 rx_status.noise = hdr->noise; 341 signal = 14 - hdr->agc / 2;
342 rx_status.qual = quality;
343 priv->quality = quality;
344 rx_status.signal = signal;
345 priv->signal = signal;
346 rx_status.antenna = (hdr->rssi >> 7) & 1;
297 rx_status.mactime = le64_to_cpu(hdr->mac_time); 347 rx_status.mactime = le64_to_cpu(hdr->mac_time);
298 priv->signal = hdr->signal; 348 rate = (flags >> 20) & 0xF;
299 priv->quality = hdr->agc >> 1;
300 priv->noise = hdr->noise;
301 } 349 }
302 350
303 skb_trim(skb, flags & 0x0FFF); 351 skb_trim(skb, flags & 0x0FFF);
304 rate = (flags >> 20) & 0xF;
305 if (rate > 3) { /* OFDM rate */
306 if (signal > 90)
307 signal = 90;
308 else if (signal < 25)
309 signal = 25;
310 signal = 90 - signal;
311 } else { /* CCK rate */
312 if (signal > 95)
313 signal = 95;
314 else if (signal < 30)
315 signal = 30;
316 signal = 95 - signal;
317 }
318
319 rx_status.qual = priv->quality;
320 rx_status.signal = signal;
321 rx_status.rate_idx = rate; 352 rx_status.rate_idx = rate;
322 rx_status.freq = dev->conf.channel->center_freq; 353 rx_status.freq = dev->conf.channel->center_freq;
323 rx_status.band = dev->conf.channel->band; 354 rx_status.band = dev->conf.channel->band;
@@ -697,6 +728,7 @@ static int rtl8187_start(struct ieee80211_hw *dev)
697 if (ret) 728 if (ret)
698 return ret; 729 return ret;
699 730
731 mutex_lock(&priv->conf_mutex);
700 if (priv->is_rtl8187b) { 732 if (priv->is_rtl8187b) {
701 reg = RTL818X_RX_CONF_MGMT | 733 reg = RTL818X_RX_CONF_MGMT |
702 RTL818X_RX_CONF_DATA | 734 RTL818X_RX_CONF_DATA |
@@ -718,6 +750,7 @@ static int rtl8187_start(struct ieee80211_hw *dev)
718 (7 << 0 /* long retry limit */) | 750 (7 << 0 /* long retry limit */) |
719 (7 << 21 /* MAX TX DMA */)); 751 (7 << 21 /* MAX TX DMA */));
720 rtl8187_init_urbs(dev); 752 rtl8187_init_urbs(dev);
753 mutex_unlock(&priv->conf_mutex);
721 return 0; 754 return 0;
722 } 755 }
723 756
@@ -761,6 +794,7 @@ static int rtl8187_start(struct ieee80211_hw *dev)
761 reg |= RTL818X_CMD_TX_ENABLE; 794 reg |= RTL818X_CMD_TX_ENABLE;
762 reg |= RTL818X_CMD_RX_ENABLE; 795 reg |= RTL818X_CMD_RX_ENABLE;
763 rtl818x_iowrite8(priv, &priv->map->CMD, reg); 796 rtl818x_iowrite8(priv, &priv->map->CMD, reg);
797 mutex_unlock(&priv->conf_mutex);
764 798
765 return 0; 799 return 0;
766} 800}
@@ -772,6 +806,7 @@ static void rtl8187_stop(struct ieee80211_hw *dev)
772 struct sk_buff *skb; 806 struct sk_buff *skb;
773 u32 reg; 807 u32 reg;
774 808
809 mutex_lock(&priv->conf_mutex);
775 rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0); 810 rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0);
776 811
777 reg = rtl818x_ioread8(priv, &priv->map->CMD); 812 reg = rtl818x_ioread8(priv, &priv->map->CMD);
@@ -791,7 +826,7 @@ static void rtl8187_stop(struct ieee80211_hw *dev)
791 usb_kill_urb(info->urb); 826 usb_kill_urb(info->urb);
792 kfree_skb(skb); 827 kfree_skb(skb);
793 } 828 }
794 return; 829 mutex_unlock(&priv->conf_mutex);
795} 830}
796 831
797static int rtl8187_add_interface(struct ieee80211_hw *dev, 832static int rtl8187_add_interface(struct ieee80211_hw *dev,
@@ -811,6 +846,7 @@ static int rtl8187_add_interface(struct ieee80211_hw *dev,
811 return -EOPNOTSUPP; 846 return -EOPNOTSUPP;
812 } 847 }
813 848
849 mutex_lock(&priv->conf_mutex);
814 priv->vif = conf->vif; 850 priv->vif = conf->vif;
815 851
816 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); 852 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
@@ -819,6 +855,7 @@ static int rtl8187_add_interface(struct ieee80211_hw *dev,
819 ((u8 *)conf->mac_addr)[i]); 855 ((u8 *)conf->mac_addr)[i]);
820 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL); 856 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
821 857
858 mutex_unlock(&priv->conf_mutex);
822 return 0; 859 return 0;
823} 860}
824 861
@@ -826,8 +863,10 @@ static void rtl8187_remove_interface(struct ieee80211_hw *dev,
826 struct ieee80211_if_init_conf *conf) 863 struct ieee80211_if_init_conf *conf)
827{ 864{
828 struct rtl8187_priv *priv = dev->priv; 865 struct rtl8187_priv *priv = dev->priv;
866 mutex_lock(&priv->conf_mutex);
829 priv->mode = IEEE80211_IF_TYPE_MNTR; 867 priv->mode = IEEE80211_IF_TYPE_MNTR;
830 priv->vif = NULL; 868 priv->vif = NULL;
869 mutex_unlock(&priv->conf_mutex);
831} 870}
832 871
833static int rtl8187_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf) 872static int rtl8187_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf)
@@ -835,6 +874,7 @@ static int rtl8187_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf)
835 struct rtl8187_priv *priv = dev->priv; 874 struct rtl8187_priv *priv = dev->priv;
836 u32 reg; 875 u32 reg;
837 876
877 mutex_lock(&priv->conf_mutex);
838 reg = rtl818x_ioread32(priv, &priv->map->TX_CONF); 878 reg = rtl818x_ioread32(priv, &priv->map->TX_CONF);
839 /* Enable TX loopback on MAC level to avoid TX during channel 879 /* Enable TX loopback on MAC level to avoid TX during channel
840 * changes, as this has be seen to causes problems and the 880 * changes, as this has be seen to causes problems and the
@@ -867,6 +907,7 @@ static int rtl8187_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf)
867 rtl818x_iowrite16(priv, &priv->map->ATIMTR_INTERVAL, 100); 907 rtl818x_iowrite16(priv, &priv->map->ATIMTR_INTERVAL, 100);
868 rtl818x_iowrite16(priv, &priv->map->BEACON_INTERVAL, 100); 908 rtl818x_iowrite16(priv, &priv->map->BEACON_INTERVAL, 100);
869 rtl818x_iowrite16(priv, &priv->map->BEACON_INTERVAL_TIME, 100); 909 rtl818x_iowrite16(priv, &priv->map->BEACON_INTERVAL_TIME, 100);
910 mutex_unlock(&priv->conf_mutex);
870 return 0; 911 return 0;
871} 912}
872 913
@@ -878,6 +919,7 @@ static int rtl8187_config_interface(struct ieee80211_hw *dev,
878 int i; 919 int i;
879 u8 reg; 920 u8 reg;
880 921
922 mutex_lock(&priv->conf_mutex);
881 for (i = 0; i < ETH_ALEN; i++) 923 for (i = 0; i < ETH_ALEN; i++)
882 rtl818x_iowrite8(priv, &priv->map->BSSID[i], conf->bssid[i]); 924 rtl818x_iowrite8(priv, &priv->map->BSSID[i], conf->bssid[i]);
883 925
@@ -891,6 +933,7 @@ static int rtl8187_config_interface(struct ieee80211_hw *dev,
891 rtl818x_iowrite8(priv, &priv->map->MSR, reg); 933 rtl818x_iowrite8(priv, &priv->map->MSR, reg);
892 } 934 }
893 935
936 mutex_unlock(&priv->conf_mutex);
894 return 0; 937 return 0;
895} 938}
896 939
@@ -1015,9 +1058,7 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
1015 1058
1016 priv->mode = IEEE80211_IF_TYPE_MNTR; 1059 priv->mode = IEEE80211_IF_TYPE_MNTR;
1017 dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 1060 dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
1018 IEEE80211_HW_RX_INCLUDES_FCS | 1061 IEEE80211_HW_RX_INCLUDES_FCS;
1019 IEEE80211_HW_SIGNAL_UNSPEC;
1020 dev->max_signal = 65;
1021 1062
1022 eeprom.data = dev; 1063 eeprom.data = dev;
1023 eeprom.register_read = rtl8187_eeprom_register_read; 1064 eeprom.register_read = rtl8187_eeprom_register_read;
@@ -1132,10 +1173,16 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
1132 (*channel++).hw_value = txpwr >> 8; 1173 (*channel++).hw_value = txpwr >> 8;
1133 } 1174 }
1134 1175
1135 if (priv->is_rtl8187b) 1176 if (priv->is_rtl8187b) {
1136 printk(KERN_WARNING "rtl8187: 8187B chip detected. Support " 1177 printk(KERN_WARNING "rtl8187: 8187B chip detected. Support "
1137 "is EXPERIMENTAL, and could damage your\n" 1178 "is EXPERIMENTAL, and could damage your\n"
1138 " hardware, use at your own risk\n"); 1179 " hardware, use at your own risk\n");
1180 dev->flags |= IEEE80211_HW_SIGNAL_DBM;
1181 } else {
1182 dev->flags |= IEEE80211_HW_SIGNAL_UNSPEC;
1183 dev->max_signal = 65;
1184 }
1185
1139 if ((id->driver_info == DEVICE_RTL8187) && priv->is_rtl8187b) 1186 if ((id->driver_info == DEVICE_RTL8187) && priv->is_rtl8187b)
1140 printk(KERN_INFO "rtl8187: inconsistency between id with OEM" 1187 printk(KERN_INFO "rtl8187: inconsistency between id with OEM"
1141 " info!\n"); 1188 " info!\n");
@@ -1154,6 +1201,7 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
1154 printk(KERN_ERR "rtl8187: Cannot register device\n"); 1201 printk(KERN_ERR "rtl8187: Cannot register device\n");
1155 goto err_free_dev; 1202 goto err_free_dev;
1156 } 1203 }
1204 mutex_init(&priv->conf_mutex);
1157 1205
1158 printk(KERN_INFO "%s: hwaddr %s, %s V%d + %s\n", 1206 printk(KERN_INFO "%s: hwaddr %s, %s V%d + %s\n",
1159 wiphy_name(dev->wiphy), print_mac(mac, dev->wiphy->perm_addr), 1207 wiphy_name(dev->wiphy), print_mac(mac, dev->wiphy->perm_addr),
diff --git a/drivers/net/wireless/wavelan.c b/drivers/net/wireless/wavelan.c
index 49ae97003952..136220b5ca81 100644
--- a/drivers/net/wireless/wavelan.c
+++ b/drivers/net/wireless/wavelan.c
@@ -1409,9 +1409,6 @@ static void wavelan_set_multicast_list(struct net_device * dev)
1409 lp->mc_count = 0; 1409 lp->mc_count = 0;
1410 1410
1411 wv_82586_reconfig(dev); 1411 wv_82586_reconfig(dev);
1412
1413 /* Tell the kernel that we are doing a really bad job. */
1414 dev->flags |= IFF_PROMISC;
1415 } 1412 }
1416 } else 1413 } else
1417 /* Are there multicast addresses to send? */ 1414 /* Are there multicast addresses to send? */
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c
index b584c0ecc62d..00a3559e5aa4 100644
--- a/drivers/net/wireless/wavelan_cs.c
+++ b/drivers/net/wireless/wavelan_cs.c
@@ -1412,9 +1412,6 @@ wavelan_set_multicast_list(struct net_device * dev)
1412 lp->mc_count = 0; 1412 lp->mc_count = 0;
1413 1413
1414 wv_82593_reconfig(dev); 1414 wv_82593_reconfig(dev);
1415
1416 /* Tell the kernel that we are doing a really bad job... */
1417 dev->flags |= IFF_PROMISC;
1418 } 1415 }
1419 } 1416 }
1420 else 1417 else
@@ -1433,9 +1430,6 @@ wavelan_set_multicast_list(struct net_device * dev)
1433 lp->mc_count = 0; 1430 lp->mc_count = 0;
1434 1431
1435 wv_82593_reconfig(dev); 1432 wv_82593_reconfig(dev);
1436
1437 /* Tell the kernel that we are doing a really bad job... */
1438 dev->flags |= IFF_ALLMULTI;
1439 } 1433 }
1440 } 1434 }
1441 else 1435 else
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index fcc532bb6a7e..4d7b98b05030 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -935,7 +935,6 @@ struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf)
935 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &mac->band; 935 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &mac->band;
936 936
937 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | 937 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
938 IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE |
939 IEEE80211_HW_SIGNAL_DB; 938 IEEE80211_HW_SIGNAL_DB;
940 939
941 hw->max_signal = 100; 940 hw->max_signal = 100;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 902bbe788215..c749bdba214c 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -329,7 +329,7 @@ static int xennet_open(struct net_device *dev)
329 } 329 }
330 spin_unlock_bh(&np->rx_lock); 330 spin_unlock_bh(&np->rx_lock);
331 331
332 xennet_maybe_wake_tx(dev); 332 netif_start_queue(dev);
333 333
334 return 0; 334 return 0;
335} 335}
diff --git a/drivers/pcmcia/at91_cf.c b/drivers/pcmcia/at91_cf.c
index 684968558c19..a0ffb8ebfe00 100644
--- a/drivers/pcmcia/at91_cf.c
+++ b/drivers/pcmcia/at91_cf.c
@@ -18,13 +18,13 @@
18 18
19#include <pcmcia/ss.h> 19#include <pcmcia/ss.h>
20 20
21#include <asm/hardware.h> 21#include <mach/hardware.h>
22#include <asm/io.h> 22#include <asm/io.h>
23#include <asm/sizes.h> 23#include <asm/sizes.h>
24#include <asm/gpio.h> 24#include <asm/gpio.h>
25 25
26#include <asm/arch/board.h> 26#include <mach/board.h>
27#include <asm/arch/at91rm9200_mc.h> 27#include <mach/at91rm9200_mc.h>
28 28
29 29
30/* 30/*
diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c
index 569b746b5731..f3736398900e 100644
--- a/drivers/pcmcia/omap_cf.c
+++ b/drivers/pcmcia/omap_cf.c
@@ -19,12 +19,12 @@
19 19
20#include <pcmcia/ss.h> 20#include <pcmcia/ss.h>
21 21
22#include <asm/hardware.h> 22#include <mach/hardware.h>
23#include <asm/io.h> 23#include <asm/io.h>
24#include <asm/sizes.h> 24#include <asm/sizes.h>
25 25
26#include <asm/arch/mux.h> 26#include <mach/mux.h>
27#include <asm/arch/tc.h> 27#include <mach/tc.h>
28 28
29 29
30/* NOTE: don't expect this to support many I/O cards. The 16xx chips have 30/* NOTE: don't expect this to support many I/O cards. The 16xx chips have
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c
index ccfdf1969a7f..1b07af5a2ed3 100644
--- a/drivers/pcmcia/pxa2xx_base.c
+++ b/drivers/pcmcia/pxa2xx_base.c
@@ -24,12 +24,12 @@
24#include <linux/spinlock.h> 24#include <linux/spinlock.h>
25#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26 26
27#include <asm/hardware.h> 27#include <mach/hardware.h>
28#include <asm/io.h> 28#include <asm/io.h>
29#include <asm/irq.h> 29#include <asm/irq.h>
30#include <asm/system.h> 30#include <asm/system.h>
31#include <asm/arch/pxa-regs.h> 31#include <mach/pxa-regs.h>
32#include <asm/arch/pxa2xx-regs.h> 32#include <mach/pxa2xx-regs.h>
33 33
34#include <pcmcia/cs_types.h> 34#include <pcmcia/cs_types.h>
35#include <pcmcia/ss.h> 35#include <pcmcia/ss.h>
diff --git a/drivers/pcmcia/pxa2xx_cm_x270.c b/drivers/pcmcia/pxa2xx_cm_x270.c
index bb95db7d2b76..bcff5cfed051 100644
--- a/drivers/pcmcia/pxa2xx_cm_x270.c
+++ b/drivers/pcmcia/pxa2xx_cm_x270.c
@@ -16,7 +16,7 @@
16#include <linux/gpio.h> 16#include <linux/gpio.h>
17 17
18#include <asm/mach-types.h> 18#include <asm/mach-types.h>
19#include <asm/arch/pxa-regs.h> 19#include <mach/pxa-regs.h>
20 20
21#include "soc_common.h" 21#include "soc_common.h"
22 22
diff --git a/drivers/pcmcia/pxa2xx_lubbock.c b/drivers/pcmcia/pxa2xx_lubbock.c
index 881ec8a8e389..37ec55df086e 100644
--- a/drivers/pcmcia/pxa2xx_lubbock.c
+++ b/drivers/pcmcia/pxa2xx_lubbock.c
@@ -21,11 +21,11 @@
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/delay.h> 22#include <linux/delay.h>
23 23
24#include <asm/hardware.h> 24#include <mach/hardware.h>
25#include <asm/hardware/sa1111.h> 25#include <asm/hardware/sa1111.h>
26#include <asm/mach-types.h> 26#include <asm/mach-types.h>
27#include <asm/arch/pxa-regs.h> 27#include <mach/pxa-regs.h>
28#include <asm/arch/lubbock.h> 28#include <mach/lubbock.h>
29 29
30#include "sa1111_generic.h" 30#include "sa1111_generic.h"
31 31
diff --git a/drivers/pcmcia/pxa2xx_mainstone.c b/drivers/pcmcia/pxa2xx_mainstone.c
index 92d1cc33808c..877001db4916 100644
--- a/drivers/pcmcia/pxa2xx_mainstone.c
+++ b/drivers/pcmcia/pxa2xx_mainstone.c
@@ -21,12 +21,12 @@
21 21
22#include <pcmcia/ss.h> 22#include <pcmcia/ss.h>
23 23
24#include <asm/hardware.h> 24#include <mach/hardware.h>
25#include <asm/mach-types.h> 25#include <asm/mach-types.h>
26#include <asm/irq.h> 26#include <asm/irq.h>
27 27
28#include <asm/arch/pxa-regs.h> 28#include <mach/pxa-regs.h>
29#include <asm/arch/mainstone.h> 29#include <mach/mainstone.h>
30 30
31#include "soc_common.h" 31#include "soc_common.h"
32 32
diff --git a/drivers/pcmcia/pxa2xx_palmtx.c b/drivers/pcmcia/pxa2xx_palmtx.c
index 4abde190c1f5..a8771ffc61e8 100644
--- a/drivers/pcmcia/pxa2xx_palmtx.c
+++ b/drivers/pcmcia/pxa2xx_palmtx.c
@@ -16,8 +16,8 @@
16 16
17#include <asm/mach-types.h> 17#include <asm/mach-types.h>
18 18
19#include <asm/arch/gpio.h> 19#include <mach/gpio.h>
20#include <asm/arch/palmtx.h> 20#include <mach/palmtx.h>
21 21
22#include "soc_common.h" 22#include "soc_common.h"
23 23
diff --git a/drivers/pcmcia/pxa2xx_sharpsl.c b/drivers/pcmcia/pxa2xx_sharpsl.c
index d71f93d45833..1cd02f5a23a0 100644
--- a/drivers/pcmcia/pxa2xx_sharpsl.c
+++ b/drivers/pcmcia/pxa2xx_sharpsl.c
@@ -19,7 +19,7 @@
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20 20
21#include <asm/mach-types.h> 21#include <asm/mach-types.h>
22#include <asm/hardware.h> 22#include <mach/hardware.h>
23#include <asm/irq.h> 23#include <asm/irq.h>
24#include <asm/hardware/scoop.h> 24#include <asm/hardware/scoop.h>
25 25
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
index d0c1d63d1891..203e579ebbd2 100644
--- a/drivers/pcmcia/rsrc_nonstatic.c
+++ b/drivers/pcmcia/rsrc_nonstatic.c
@@ -275,7 +275,7 @@ static int readable(struct pcmcia_socket *s, struct resource *res,
275 destroy_cis_cache(s); 275 destroy_cis_cache(s);
276 } 276 }
277 s->cis_mem.res = NULL; 277 s->cis_mem.res = NULL;
278 if ((ret != 0) || (count == 0)) 278 if ((ret != 0) || (*count == 0))
279 return 0; 279 return 0;
280 return 1; 280 return 1;
281} 281}
diff --git a/drivers/pcmcia/sa1100_assabet.c b/drivers/pcmcia/sa1100_assabet.c
index ce133ce81c10..f424146a2bc9 100644
--- a/drivers/pcmcia/sa1100_assabet.c
+++ b/drivers/pcmcia/sa1100_assabet.c
@@ -11,11 +11,11 @@
11#include <linux/device.h> 11#include <linux/device.h>
12#include <linux/init.h> 12#include <linux/init.h>
13 13
14#include <asm/hardware.h> 14#include <mach/hardware.h>
15#include <asm/mach-types.h> 15#include <asm/mach-types.h>
16#include <asm/irq.h> 16#include <asm/irq.h>
17#include <asm/signal.h> 17#include <asm/signal.h>
18#include <asm/arch/assabet.h> 18#include <mach/assabet.h>
19 19
20#include "sa1100_generic.h" 20#include "sa1100_generic.h"
21 21
diff --git a/drivers/pcmcia/sa1100_badge4.c b/drivers/pcmcia/sa1100_badge4.c
index 607c3f326eca..1ca9737ea79e 100644
--- a/drivers/pcmcia/sa1100_badge4.c
+++ b/drivers/pcmcia/sa1100_badge4.c
@@ -18,9 +18,9 @@
18#include <linux/errno.h> 18#include <linux/errno.h>
19#include <linux/init.h> 19#include <linux/init.h>
20 20
21#include <asm/hardware.h> 21#include <mach/hardware.h>
22#include <asm/mach-types.h> 22#include <asm/mach-types.h>
23#include <asm/arch/badge4.h> 23#include <mach/badge4.h>
24#include <asm/hardware/sa1111.h> 24#include <asm/hardware/sa1111.h>
25 25
26#include "sa1111_generic.h" 26#include "sa1111_generic.h"
diff --git a/drivers/pcmcia/sa1100_cerf.c b/drivers/pcmcia/sa1100_cerf.c
index 7c3951a2675d..63e6bc431a0d 100644
--- a/drivers/pcmcia/sa1100_cerf.c
+++ b/drivers/pcmcia/sa1100_cerf.c
@@ -11,10 +11,10 @@
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/delay.h> 12#include <linux/delay.h>
13 13
14#include <asm/hardware.h> 14#include <mach/hardware.h>
15#include <asm/mach-types.h> 15#include <asm/mach-types.h>
16#include <asm/irq.h> 16#include <asm/irq.h>
17#include <asm/arch/cerf.h> 17#include <mach/cerf.h>
18#include "sa1100_generic.h" 18#include "sa1100_generic.h"
19 19
20#define CERF_SOCKET 1 20#define CERF_SOCKET 1
diff --git a/drivers/pcmcia/sa1100_h3600.c b/drivers/pcmcia/sa1100_h3600.c
index e5491879acd9..6de4e1b41d60 100644
--- a/drivers/pcmcia/sa1100_h3600.c
+++ b/drivers/pcmcia/sa1100_h3600.c
@@ -11,10 +11,10 @@
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/delay.h> 12#include <linux/delay.h>
13 13
14#include <asm/hardware.h> 14#include <mach/hardware.h>
15#include <asm/irq.h> 15#include <asm/irq.h>
16#include <asm/mach-types.h> 16#include <asm/mach-types.h>
17#include <asm/arch/h3600.h> 17#include <mach/h3600.h>
18 18
19#include "sa1100_generic.h" 19#include "sa1100_generic.h"
20 20
diff --git a/drivers/pcmcia/sa1100_jornada720.c b/drivers/pcmcia/sa1100_jornada720.c
index 2167e6714d2d..57ca085473d5 100644
--- a/drivers/pcmcia/sa1100_jornada720.c
+++ b/drivers/pcmcia/sa1100_jornada720.c
@@ -10,7 +10,7 @@
10#include <linux/errno.h> 10#include <linux/errno.h>
11#include <linux/init.h> 11#include <linux/init.h>
12 12
13#include <asm/hardware.h> 13#include <mach/hardware.h>
14#include <asm/hardware/sa1111.h> 14#include <asm/hardware/sa1111.h>
15#include <asm/mach-types.h> 15#include <asm/mach-types.h>
16 16
diff --git a/drivers/pcmcia/sa1100_neponset.c b/drivers/pcmcia/sa1100_neponset.c
index 687492fcd5b4..4c41e86ccff9 100644
--- a/drivers/pcmcia/sa1100_neponset.c
+++ b/drivers/pcmcia/sa1100_neponset.c
@@ -9,9 +9,9 @@
9#include <linux/errno.h> 9#include <linux/errno.h>
10#include <linux/init.h> 10#include <linux/init.h>
11 11
12#include <asm/hardware.h> 12#include <mach/hardware.h>
13#include <asm/mach-types.h> 13#include <asm/mach-types.h>
14#include <asm/arch/neponset.h> 14#include <mach/neponset.h>
15#include <asm/hardware/sa1111.h> 15#include <asm/hardware/sa1111.h>
16 16
17#include "sa1111_generic.h" 17#include "sa1111_generic.h"
diff --git a/drivers/pcmcia/sa1100_shannon.c b/drivers/pcmcia/sa1100_shannon.c
index 494912fccc0d..46d8c1977c2a 100644
--- a/drivers/pcmcia/sa1100_shannon.c
+++ b/drivers/pcmcia/sa1100_shannon.c
@@ -9,9 +9,9 @@
9#include <linux/device.h> 9#include <linux/device.h>
10#include <linux/init.h> 10#include <linux/init.h>
11 11
12#include <asm/hardware.h> 12#include <mach/hardware.h>
13#include <asm/mach-types.h> 13#include <asm/mach-types.h>
14#include <asm/arch/shannon.h> 14#include <mach/shannon.h>
15#include <asm/irq.h> 15#include <asm/irq.h>
16#include "sa1100_generic.h" 16#include "sa1100_generic.h"
17 17
diff --git a/drivers/pcmcia/sa1100_simpad.c b/drivers/pcmcia/sa1100_simpad.c
index 42567de894b9..33a08ae09fdf 100644
--- a/drivers/pcmcia/sa1100_simpad.c
+++ b/drivers/pcmcia/sa1100_simpad.c
@@ -9,10 +9,10 @@
9#include <linux/device.h> 9#include <linux/device.h>
10#include <linux/init.h> 10#include <linux/init.h>
11 11
12#include <asm/hardware.h> 12#include <mach/hardware.h>
13#include <asm/mach-types.h> 13#include <asm/mach-types.h>
14#include <asm/irq.h> 14#include <asm/irq.h>
15#include <asm/arch/simpad.h> 15#include <mach/simpad.h>
16#include "sa1100_generic.h" 16#include "sa1100_generic.h"
17 17
18extern long get_cs3_shadow(void); 18extern long get_cs3_shadow(void);
diff --git a/drivers/pcmcia/sa1111_generic.c b/drivers/pcmcia/sa1111_generic.c
index 658cddfbcf29..6924d0ea8d32 100644
--- a/drivers/pcmcia/sa1111_generic.c
+++ b/drivers/pcmcia/sa1111_generic.c
@@ -14,7 +14,7 @@
14 14
15#include <pcmcia/ss.h> 15#include <pcmcia/ss.h>
16 16
17#include <asm/hardware.h> 17#include <mach/hardware.h>
18#include <asm/hardware/sa1111.h> 18#include <asm/hardware/sa1111.h>
19#include <asm/io.h> 19#include <asm/io.h>
20#include <asm/irq.h> 20#include <asm/irq.h>
diff --git a/drivers/pcmcia/sa11xx_base.c b/drivers/pcmcia/sa11xx_base.c
index 31a7abc55b23..7cb1273202cc 100644
--- a/drivers/pcmcia/sa11xx_base.c
+++ b/drivers/pcmcia/sa11xx_base.c
@@ -37,7 +37,7 @@
37#include <linux/kernel.h> 37#include <linux/kernel.h>
38#include <linux/spinlock.h> 38#include <linux/spinlock.h>
39 39
40#include <asm/hardware.h> 40#include <mach/hardware.h>
41#include <asm/io.h> 41#include <asm/io.h>
42#include <asm/irq.h> 42#include <asm/irq.h>
43#include <asm/system.h> 43#include <asm/system.h>
diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c
index 8c21446996f2..c48f3f69bdaf 100644
--- a/drivers/pcmcia/soc_common.c
+++ b/drivers/pcmcia/soc_common.c
@@ -43,7 +43,7 @@
43#include <linux/spinlock.h> 43#include <linux/spinlock.h>
44#include <linux/cpufreq.h> 44#include <linux/cpufreq.h>
45 45
46#include <asm/hardware.h> 46#include <mach/hardware.h>
47#include <asm/io.h> 47#include <asm/io.h>
48#include <asm/system.h> 48#include <asm/system.h>
49 49
@@ -51,7 +51,7 @@
51 51
52/* FIXME: platform dependent resource declaration has to move out of this file */ 52/* FIXME: platform dependent resource declaration has to move out of this file */
53#ifdef CONFIG_ARCH_PXA 53#ifdef CONFIG_ARCH_PXA
54#include <asm/arch/pxa-regs.h> 54#include <mach/pxa-regs.h>
55#endif 55#endif
56 56
57#ifdef DEBUG 57#ifdef DEBUG
diff --git a/drivers/pnp/support.c b/drivers/pnp/support.c
index bbf78ef4ba02..b42df1620718 100644
--- a/drivers/pnp/support.c
+++ b/drivers/pnp/support.c
@@ -77,7 +77,7 @@ void dbg_pnp_show_resources(struct pnp_dev *dev, char *desc)
77{ 77{
78#ifdef DEBUG 78#ifdef DEBUG
79 char buf[128]; 79 char buf[128];
80 int len = 0; 80 int len;
81 struct pnp_resource *pnp_res; 81 struct pnp_resource *pnp_res;
82 struct resource *res; 82 struct resource *res;
83 83
@@ -89,9 +89,10 @@ void dbg_pnp_show_resources(struct pnp_dev *dev, char *desc)
89 dev_dbg(&dev->dev, "%s: current resources:\n", desc); 89 dev_dbg(&dev->dev, "%s: current resources:\n", desc);
90 list_for_each_entry(pnp_res, &dev->resources, list) { 90 list_for_each_entry(pnp_res, &dev->resources, list) {
91 res = &pnp_res->res; 91 res = &pnp_res->res;
92 len = 0;
92 93
93 len += snprintf(buf + len, sizeof(buf) - len, " %-3s ", 94 len += scnprintf(buf + len, sizeof(buf) - len, " %-3s ",
94 pnp_resource_type_name(res)); 95 pnp_resource_type_name(res));
95 96
96 if (res->flags & IORESOURCE_DISABLED) { 97 if (res->flags & IORESOURCE_DISABLED) {
97 dev_dbg(&dev->dev, "%sdisabled\n", buf); 98 dev_dbg(&dev->dev, "%sdisabled\n", buf);
@@ -101,18 +102,18 @@ void dbg_pnp_show_resources(struct pnp_dev *dev, char *desc)
101 switch (pnp_resource_type(res)) { 102 switch (pnp_resource_type(res)) {
102 case IORESOURCE_IO: 103 case IORESOURCE_IO:
103 case IORESOURCE_MEM: 104 case IORESOURCE_MEM:
104 len += snprintf(buf + len, sizeof(buf) - len, 105 len += scnprintf(buf + len, sizeof(buf) - len,
105 "%#llx-%#llx flags %#lx", 106 "%#llx-%#llx flags %#lx",
106 (unsigned long long) res->start, 107 (unsigned long long) res->start,
107 (unsigned long long) res->end, 108 (unsigned long long) res->end,
108 res->flags); 109 res->flags);
109 break; 110 break;
110 case IORESOURCE_IRQ: 111 case IORESOURCE_IRQ:
111 case IORESOURCE_DMA: 112 case IORESOURCE_DMA:
112 len += snprintf(buf + len, sizeof(buf) - len, 113 len += scnprintf(buf + len, sizeof(buf) - len,
113 "%lld flags %#lx", 114 "%lld flags %#lx",
114 (unsigned long long) res->start, 115 (unsigned long long) res->start,
115 res->flags); 116 res->flags);
116 break; 117 break;
117 } 118 }
118 dev_dbg(&dev->dev, "%s\n", buf); 119 dev_dbg(&dev->dev, "%s\n", buf);
@@ -144,66 +145,67 @@ void dbg_pnp_show_option(struct pnp_dev *dev, struct pnp_option *option)
144 struct pnp_dma *dma; 145 struct pnp_dma *dma;
145 146
146 if (pnp_option_is_dependent(option)) 147 if (pnp_option_is_dependent(option))
147 len += snprintf(buf + len, sizeof(buf) - len, 148 len += scnprintf(buf + len, sizeof(buf) - len,
148 " dependent set %d (%s) ", 149 " dependent set %d (%s) ",
149 pnp_option_set(option), 150 pnp_option_set(option),
150 pnp_option_priority_name(option)); 151 pnp_option_priority_name(option));
151 else 152 else
152 len += snprintf(buf + len, sizeof(buf) - len, " independent "); 153 len += scnprintf(buf + len, sizeof(buf) - len,
154 " independent ");
153 155
154 switch (option->type) { 156 switch (option->type) {
155 case IORESOURCE_IO: 157 case IORESOURCE_IO:
156 port = &option->u.port; 158 port = &option->u.port;
157 len += snprintf(buf + len, sizeof(buf) - len, "io min %#llx " 159 len += scnprintf(buf + len, sizeof(buf) - len, "io min %#llx "
158 "max %#llx align %lld size %lld flags %#x", 160 "max %#llx align %lld size %lld flags %#x",
159 (unsigned long long) port->min, 161 (unsigned long long) port->min,
160 (unsigned long long) port->max, 162 (unsigned long long) port->max,
161 (unsigned long long) port->align, 163 (unsigned long long) port->align,
162 (unsigned long long) port->size, port->flags); 164 (unsigned long long) port->size, port->flags);
163 break; 165 break;
164 case IORESOURCE_MEM: 166 case IORESOURCE_MEM:
165 mem = &option->u.mem; 167 mem = &option->u.mem;
166 len += snprintf(buf + len, sizeof(buf) - len, "mem min %#llx " 168 len += scnprintf(buf + len, sizeof(buf) - len, "mem min %#llx "
167 "max %#llx align %lld size %lld flags %#x", 169 "max %#llx align %lld size %lld flags %#x",
168 (unsigned long long) mem->min, 170 (unsigned long long) mem->min,
169 (unsigned long long) mem->max, 171 (unsigned long long) mem->max,
170 (unsigned long long) mem->align, 172 (unsigned long long) mem->align,
171 (unsigned long long) mem->size, mem->flags); 173 (unsigned long long) mem->size, mem->flags);
172 break; 174 break;
173 case IORESOURCE_IRQ: 175 case IORESOURCE_IRQ:
174 irq = &option->u.irq; 176 irq = &option->u.irq;
175 len += snprintf(buf + len, sizeof(buf) - len, "irq"); 177 len += scnprintf(buf + len, sizeof(buf) - len, "irq");
176 if (bitmap_empty(irq->map.bits, PNP_IRQ_NR)) 178 if (bitmap_empty(irq->map.bits, PNP_IRQ_NR))
177 len += snprintf(buf + len, sizeof(buf) - len, 179 len += scnprintf(buf + len, sizeof(buf) - len,
178 " <none>"); 180 " <none>");
179 else { 181 else {
180 for (i = 0; i < PNP_IRQ_NR; i++) 182 for (i = 0; i < PNP_IRQ_NR; i++)
181 if (test_bit(i, irq->map.bits)) 183 if (test_bit(i, irq->map.bits))
182 len += snprintf(buf + len, 184 len += scnprintf(buf + len,
183 sizeof(buf) - len, 185 sizeof(buf) - len,
184 " %d", i); 186 " %d", i);
185 } 187 }
186 len += snprintf(buf + len, sizeof(buf) - len, " flags %#x", 188 len += scnprintf(buf + len, sizeof(buf) - len, " flags %#x",
187 irq->flags); 189 irq->flags);
188 if (irq->flags & IORESOURCE_IRQ_OPTIONAL) 190 if (irq->flags & IORESOURCE_IRQ_OPTIONAL)
189 len += snprintf(buf + len, sizeof(buf) - len, 191 len += scnprintf(buf + len, sizeof(buf) - len,
190 " (optional)"); 192 " (optional)");
191 break; 193 break;
192 case IORESOURCE_DMA: 194 case IORESOURCE_DMA:
193 dma = &option->u.dma; 195 dma = &option->u.dma;
194 len += snprintf(buf + len, sizeof(buf) - len, "dma"); 196 len += scnprintf(buf + len, sizeof(buf) - len, "dma");
195 if (!dma->map) 197 if (!dma->map)
196 len += snprintf(buf + len, sizeof(buf) - len, 198 len += scnprintf(buf + len, sizeof(buf) - len,
197 " <none>"); 199 " <none>");
198 else { 200 else {
199 for (i = 0; i < 8; i++) 201 for (i = 0; i < 8; i++)
200 if (dma->map & (1 << i)) 202 if (dma->map & (1 << i))
201 len += snprintf(buf + len, 203 len += scnprintf(buf + len,
202 sizeof(buf) - len, 204 sizeof(buf) - len,
203 " %d", i); 205 " %d", i);
204 } 206 }
205 len += snprintf(buf + len, sizeof(buf) - len, " (bitmask %#x) " 207 len += scnprintf(buf + len, sizeof(buf) - len, " (bitmask %#x) "
206 "flags %#x", dma->map, dma->flags); 208 "flags %#x", dma->map, dma->flags);
207 break; 209 break;
208 } 210 }
209 dev_dbg(&dev->dev, "%s\n", buf); 211 dev_dbg(&dev->dev, "%s\n", buf);
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 4d17d384578d..9ce55850271a 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -49,6 +49,13 @@ config BATTERY_OLPC
49 help 49 help
50 Say Y to enable support for the battery on the OLPC laptop. 50 Say Y to enable support for the battery on the OLPC laptop.
51 51
52config BATTERY_TOSA
53 tristate "Sharp SL-6000 (tosa) battery"
54 depends on MACH_TOSA && MFD_TC6393XB
55 help
56 Say Y to enable support for the battery on the Sharp Zaurus
57 SL-6000 (tosa) models.
58
52config BATTERY_PALMTX 59config BATTERY_PALMTX
53 tristate "Palm T|X battery" 60 tristate "Palm T|X battery"
54 depends on MACH_PALMTX 61 depends on MACH_PALMTX
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index 6f43a54ee420..4706bf8ff459 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -20,4 +20,5 @@ obj-$(CONFIG_APM_POWER) += apm_power.o
20obj-$(CONFIG_BATTERY_DS2760) += ds2760_battery.o 20obj-$(CONFIG_BATTERY_DS2760) += ds2760_battery.o
21obj-$(CONFIG_BATTERY_PMU) += pmu_battery.o 21obj-$(CONFIG_BATTERY_PMU) += pmu_battery.o
22obj-$(CONFIG_BATTERY_OLPC) += olpc_battery.o 22obj-$(CONFIG_BATTERY_OLPC) += olpc_battery.o
23obj-$(CONFIG_BATTERY_TOSA) += tosa_battery.o
23obj-$(CONFIG_BATTERY_PALMTX) += palmtx_battery.o 24obj-$(CONFIG_BATTERY_PALMTX) += palmtx_battery.o
diff --git a/drivers/power/olpc_battery.c b/drivers/power/olpc_battery.c
index ab1e8289f07f..32570af3c5c9 100644
--- a/drivers/power/olpc_battery.c
+++ b/drivers/power/olpc_battery.c
@@ -19,7 +19,7 @@
19 19
20#define EC_BAT_VOLTAGE 0x10 /* uint16_t, *9.76/32, mV */ 20#define EC_BAT_VOLTAGE 0x10 /* uint16_t, *9.76/32, mV */
21#define EC_BAT_CURRENT 0x11 /* int16_t, *15.625/120, mA */ 21#define EC_BAT_CURRENT 0x11 /* int16_t, *15.625/120, mA */
22#define EC_BAT_ACR 0x12 22#define EC_BAT_ACR 0x12 /* int16_t, *6250/15, µAh */
23#define EC_BAT_TEMP 0x13 /* uint16_t, *100/256, °C */ 23#define EC_BAT_TEMP 0x13 /* uint16_t, *100/256, °C */
24#define EC_AMB_TEMP 0x14 /* uint16_t, *100/256, °C */ 24#define EC_AMB_TEMP 0x14 /* uint16_t, *100/256, °C */
25#define EC_BAT_STATUS 0x15 /* uint8_t, bitmask */ 25#define EC_BAT_STATUS 0x15 /* uint8_t, bitmask */
@@ -84,6 +84,119 @@ static struct power_supply olpc_ac = {
84 .get_property = olpc_ac_get_prop, 84 .get_property = olpc_ac_get_prop,
85}; 85};
86 86
87static char bat_serial[17]; /* Ick */
88
89static int olpc_bat_get_status(union power_supply_propval *val, uint8_t ec_byte)
90{
91 if (olpc_platform_info.ecver > 0x44) {
92 if (ec_byte & BAT_STAT_CHARGING)
93 val->intval = POWER_SUPPLY_STATUS_CHARGING;
94 else if (ec_byte & BAT_STAT_DISCHARGING)
95 val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
96 else if (ec_byte & BAT_STAT_FULL)
97 val->intval = POWER_SUPPLY_STATUS_FULL;
98 else /* er,... */
99 val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
100 } else {
101 /* Older EC didn't report charge/discharge bits */
102 if (!(ec_byte & BAT_STAT_AC)) /* No AC means discharging */
103 val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
104 else if (ec_byte & BAT_STAT_FULL)
105 val->intval = POWER_SUPPLY_STATUS_FULL;
106 else /* Not _necessarily_ true but EC doesn't tell all yet */
107 val->intval = POWER_SUPPLY_STATUS_CHARGING;
108 }
109
110 return 0;
111}
112
113static int olpc_bat_get_health(union power_supply_propval *val)
114{
115 uint8_t ec_byte;
116 int ret;
117
118 ret = olpc_ec_cmd(EC_BAT_ERRCODE, NULL, 0, &ec_byte, 1);
119 if (ret)
120 return ret;
121
122 switch (ec_byte) {
123 case 0:
124 val->intval = POWER_SUPPLY_HEALTH_GOOD;
125 break;
126
127 case BAT_ERR_OVERTEMP:
128 val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
129 break;
130
131 case BAT_ERR_OVERVOLTAGE:
132 val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
133 break;
134
135 case BAT_ERR_INFOFAIL:
136 case BAT_ERR_OUT_OF_CONTROL:
137 case BAT_ERR_ID_FAIL:
138 case BAT_ERR_ACR_FAIL:
139 val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
140 break;
141
142 default:
143 /* Eep. We don't know this failure code */
144 ret = -EIO;
145 }
146
147 return ret;
148}
149
150static int olpc_bat_get_mfr(union power_supply_propval *val)
151{
152 uint8_t ec_byte;
153 int ret;
154
155 ec_byte = BAT_ADDR_MFR_TYPE;
156 ret = olpc_ec_cmd(EC_BAT_EEPROM, &ec_byte, 1, &ec_byte, 1);
157 if (ret)
158 return ret;
159
160 switch (ec_byte >> 4) {
161 case 1:
162 val->strval = "Gold Peak";
163 break;
164 case 2:
165 val->strval = "BYD";
166 break;
167 default:
168 val->strval = "Unknown";
169 break;
170 }
171
172 return ret;
173}
174
175static int olpc_bat_get_tech(union power_supply_propval *val)
176{
177 uint8_t ec_byte;
178 int ret;
179
180 ec_byte = BAT_ADDR_MFR_TYPE;
181 ret = olpc_ec_cmd(EC_BAT_EEPROM, &ec_byte, 1, &ec_byte, 1);
182 if (ret)
183 return ret;
184
185 switch (ec_byte & 0xf) {
186 case 1:
187 val->intval = POWER_SUPPLY_TECHNOLOGY_NiMH;
188 break;
189 case 2:
190 val->intval = POWER_SUPPLY_TECHNOLOGY_LiFe;
191 break;
192 default:
193 val->intval = POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
194 break;
195 }
196
197 return ret;
198}
199
87/********************************************************************* 200/*********************************************************************
88 * Battery properties 201 * Battery properties
89 *********************************************************************/ 202 *********************************************************************/
@@ -94,6 +207,7 @@ static int olpc_bat_get_property(struct power_supply *psy,
94 int ret = 0; 207 int ret = 0;
95 int16_t ec_word; 208 int16_t ec_word;
96 uint8_t ec_byte; 209 uint8_t ec_byte;
210 uint64_t ser_buf;
97 211
98 ret = olpc_ec_cmd(EC_BAT_STATUS, NULL, 0, &ec_byte, 1); 212 ret = olpc_ec_cmd(EC_BAT_STATUS, NULL, 0, &ec_byte, 1);
99 if (ret) 213 if (ret)
@@ -110,25 +224,10 @@ static int olpc_bat_get_property(struct power_supply *psy,
110 224
111 switch (psp) { 225 switch (psp) {
112 case POWER_SUPPLY_PROP_STATUS: 226 case POWER_SUPPLY_PROP_STATUS:
113 if (olpc_platform_info.ecver > 0x44) { 227 ret = olpc_bat_get_status(val, ec_byte);
114 if (ec_byte & BAT_STAT_CHARGING) 228 if (ret)
115 val->intval = POWER_SUPPLY_STATUS_CHARGING; 229 return ret;
116 else if (ec_byte & BAT_STAT_DISCHARGING) 230 break;
117 val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
118 else if (ec_byte & BAT_STAT_FULL)
119 val->intval = POWER_SUPPLY_STATUS_FULL;
120 else /* er,... */
121 val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
122 } else {
123 /* Older EC didn't report charge/discharge bits */
124 if (!(ec_byte & BAT_STAT_AC)) /* No AC means discharging */
125 val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
126 else if (ec_byte & BAT_STAT_FULL)
127 val->intval = POWER_SUPPLY_STATUS_FULL;
128 else /* Not _necessarily_ true but EC doesn't tell all yet */
129 val->intval = POWER_SUPPLY_STATUS_CHARGING;
130 break;
131 }
132 case POWER_SUPPLY_PROP_PRESENT: 231 case POWER_SUPPLY_PROP_PRESENT:
133 val->intval = !!(ec_byte & BAT_STAT_PRESENT); 232 val->intval = !!(ec_byte & BAT_STAT_PRESENT);
134 break; 233 break;
@@ -137,72 +236,21 @@ static int olpc_bat_get_property(struct power_supply *psy,
137 if (ec_byte & BAT_STAT_DESTROY) 236 if (ec_byte & BAT_STAT_DESTROY)
138 val->intval = POWER_SUPPLY_HEALTH_DEAD; 237 val->intval = POWER_SUPPLY_HEALTH_DEAD;
139 else { 238 else {
140 ret = olpc_ec_cmd(EC_BAT_ERRCODE, NULL, 0, &ec_byte, 1); 239 ret = olpc_bat_get_health(val);
141 if (ret) 240 if (ret)
142 return ret; 241 return ret;
143
144 switch (ec_byte) {
145 case 0:
146 val->intval = POWER_SUPPLY_HEALTH_GOOD;
147 break;
148
149 case BAT_ERR_OVERTEMP:
150 val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
151 break;
152
153 case BAT_ERR_OVERVOLTAGE:
154 val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
155 break;
156
157 case BAT_ERR_INFOFAIL:
158 case BAT_ERR_OUT_OF_CONTROL:
159 case BAT_ERR_ID_FAIL:
160 case BAT_ERR_ACR_FAIL:
161 val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
162 break;
163
164 default:
165 /* Eep. We don't know this failure code */
166 return -EIO;
167 }
168 } 242 }
169 break; 243 break;
170 244
171 case POWER_SUPPLY_PROP_MANUFACTURER: 245 case POWER_SUPPLY_PROP_MANUFACTURER:
172 ec_byte = BAT_ADDR_MFR_TYPE; 246 ret = olpc_bat_get_mfr(val);
173 ret = olpc_ec_cmd(EC_BAT_EEPROM, &ec_byte, 1, &ec_byte, 1);
174 if (ret) 247 if (ret)
175 return ret; 248 return ret;
176
177 switch (ec_byte >> 4) {
178 case 1:
179 val->strval = "Gold Peak";
180 break;
181 case 2:
182 val->strval = "BYD";
183 break;
184 default:
185 val->strval = "Unknown";
186 break;
187 }
188 break; 249 break;
189 case POWER_SUPPLY_PROP_TECHNOLOGY: 250 case POWER_SUPPLY_PROP_TECHNOLOGY:
190 ec_byte = BAT_ADDR_MFR_TYPE; 251 ret = olpc_bat_get_tech(val);
191 ret = olpc_ec_cmd(EC_BAT_EEPROM, &ec_byte, 1, &ec_byte, 1);
192 if (ret) 252 if (ret)
193 return ret; 253 return ret;
194
195 switch (ec_byte & 0xf) {
196 case 1:
197 val->intval = POWER_SUPPLY_TECHNOLOGY_NiMH;
198 break;
199 case 2:
200 val->intval = POWER_SUPPLY_TECHNOLOGY_LiFe;
201 break;
202 default:
203 val->intval = POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
204 break;
205 }
206 break; 254 break;
207 case POWER_SUPPLY_PROP_VOLTAGE_AVG: 255 case POWER_SUPPLY_PROP_VOLTAGE_AVG:
208 ret = olpc_ec_cmd(EC_BAT_VOLTAGE, NULL, 0, (void *)&ec_word, 2); 256 ret = olpc_ec_cmd(EC_BAT_VOLTAGE, NULL, 0, (void *)&ec_word, 2);
@@ -241,6 +289,22 @@ static int olpc_bat_get_property(struct power_supply *psy,
241 ec_word = be16_to_cpu(ec_word); 289 ec_word = be16_to_cpu(ec_word);
242 val->intval = ec_word * 100 / 256; 290 val->intval = ec_word * 100 / 256;
243 break; 291 break;
292 case POWER_SUPPLY_PROP_CHARGE_COUNTER:
293 ret = olpc_ec_cmd(EC_BAT_ACR, NULL, 0, (void *)&ec_word, 2);
294 if (ret)
295 return ret;
296
297 ec_word = be16_to_cpu(ec_word);
298 val->intval = ec_word * 6250 / 15;
299 break;
300 case POWER_SUPPLY_PROP_SERIAL_NUMBER:
301 ret = olpc_ec_cmd(EC_BAT_SERIAL, NULL, 0, (void *)&ser_buf, 8);
302 if (ret)
303 return ret;
304
305 sprintf(bat_serial, "%016llx", (long long)be64_to_cpu(ser_buf));
306 val->strval = bat_serial;
307 break;
244 default: 308 default:
245 ret = -EINVAL; 309 ret = -EINVAL;
246 break; 310 break;
@@ -260,6 +324,50 @@ static enum power_supply_property olpc_bat_props[] = {
260 POWER_SUPPLY_PROP_TEMP, 324 POWER_SUPPLY_PROP_TEMP,
261 POWER_SUPPLY_PROP_TEMP_AMBIENT, 325 POWER_SUPPLY_PROP_TEMP_AMBIENT,
262 POWER_SUPPLY_PROP_MANUFACTURER, 326 POWER_SUPPLY_PROP_MANUFACTURER,
327 POWER_SUPPLY_PROP_SERIAL_NUMBER,
328 POWER_SUPPLY_PROP_CHARGE_COUNTER,
329};
330
331/* EEPROM reading goes completely around the power_supply API, sadly */
332
333#define EEPROM_START 0x20
334#define EEPROM_END 0x80
335#define EEPROM_SIZE (EEPROM_END - EEPROM_START)
336
337static ssize_t olpc_bat_eeprom_read(struct kobject *kobj,
338 struct bin_attribute *attr, char *buf, loff_t off, size_t count)
339{
340 uint8_t ec_byte;
341 int ret, end;
342
343 if (off >= EEPROM_SIZE)
344 return 0;
345 if (off + count > EEPROM_SIZE)
346 count = EEPROM_SIZE - off;
347
348 end = EEPROM_START + off + count;
349 for (ec_byte = EEPROM_START + off; ec_byte < end; ec_byte++) {
350 ret = olpc_ec_cmd(EC_BAT_EEPROM, &ec_byte, 1,
351 &buf[ec_byte - EEPROM_START], 1);
352 if (ret) {
353 printk(KERN_ERR "olpc-battery: EC command "
354 "EC_BAT_EEPROM @ 0x%x failed -"
355 " %d!\n", ec_byte, ret);
356 return -EIO;
357 }
358 }
359
360 return count;
361}
362
363static struct bin_attribute olpc_bat_eeprom = {
364 .attr = {
365 .name = "eeprom",
366 .mode = S_IRUGO,
367 .owner = THIS_MODULE,
368 },
369 .size = 0,
370 .read = olpc_bat_eeprom_read,
263}; 371};
264 372
265/********************************************************************* 373/*********************************************************************
@@ -290,8 +398,14 @@ static int __init olpc_bat_init(void)
290 398
291 if (!olpc_platform_info.ecver) 399 if (!olpc_platform_info.ecver)
292 return -ENXIO; 400 return -ENXIO;
293 if (olpc_platform_info.ecver < 0x43) { 401
294 printk(KERN_NOTICE "OLPC EC version 0x%02x too old for battery driver.\n", olpc_platform_info.ecver); 402 /*
403 * We've seen a number of EC protocol changes; this driver requires
404 * the latest EC protocol, supported by 0x44 and above.
405 */
406 if (olpc_platform_info.ecver < 0x44) {
407 printk(KERN_NOTICE "OLPC EC version 0x%02x too old for "
408 "battery driver.\n", olpc_platform_info.ecver);
295 return -ENXIO; 409 return -ENXIO;
296 } 410 }
297 411
@@ -315,8 +429,14 @@ static int __init olpc_bat_init(void)
315 if (ret) 429 if (ret)
316 goto battery_failed; 430 goto battery_failed;
317 431
432 ret = device_create_bin_file(olpc_bat.dev, &olpc_bat_eeprom);
433 if (ret)
434 goto eeprom_failed;
435
318 goto success; 436 goto success;
319 437
438eeprom_failed:
439 power_supply_unregister(&olpc_bat);
320battery_failed: 440battery_failed:
321 power_supply_unregister(&olpc_ac); 441 power_supply_unregister(&olpc_ac);
322ac_failed: 442ac_failed:
@@ -327,6 +447,7 @@ success:
327 447
328static void __exit olpc_bat_exit(void) 448static void __exit olpc_bat_exit(void)
329{ 449{
450 device_remove_bin_file(olpc_bat.dev, &olpc_bat_eeprom);
330 power_supply_unregister(&olpc_bat); 451 power_supply_unregister(&olpc_bat);
331 power_supply_unregister(&olpc_ac); 452 power_supply_unregister(&olpc_ac);
332 platform_device_unregister(bat_pdev); 453 platform_device_unregister(bat_pdev);
diff --git a/drivers/power/palmtx_battery.c b/drivers/power/palmtx_battery.c
index 244bb273a637..7035bfa41c62 100644
--- a/drivers/power/palmtx_battery.c
+++ b/drivers/power/palmtx_battery.c
@@ -22,7 +22,7 @@
22#include <linux/gpio.h> 22#include <linux/gpio.h>
23 23
24#include <asm/mach-types.h> 24#include <asm/mach-types.h>
25#include <asm/arch/palmtx.h> 25#include <mach/palmtx.h>
26 26
27static DEFINE_MUTEX(bat_lock); 27static DEFINE_MUTEX(bat_lock);
28static struct work_struct bat_work; 28static struct work_struct bat_work;
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index 49215da5249b..fe2aeb11939b 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -99,6 +99,7 @@ static struct device_attribute power_supply_attrs[] = {
99 POWER_SUPPLY_ATTR(charge_empty), 99 POWER_SUPPLY_ATTR(charge_empty),
100 POWER_SUPPLY_ATTR(charge_now), 100 POWER_SUPPLY_ATTR(charge_now),
101 POWER_SUPPLY_ATTR(charge_avg), 101 POWER_SUPPLY_ATTR(charge_avg),
102 POWER_SUPPLY_ATTR(charge_counter),
102 POWER_SUPPLY_ATTR(energy_full_design), 103 POWER_SUPPLY_ATTR(energy_full_design),
103 POWER_SUPPLY_ATTR(energy_empty_design), 104 POWER_SUPPLY_ATTR(energy_empty_design),
104 POWER_SUPPLY_ATTR(energy_full), 105 POWER_SUPPLY_ATTR(energy_full),
diff --git a/drivers/power/tosa_battery.c b/drivers/power/tosa_battery.c
new file mode 100644
index 000000000000..2eab35aab311
--- /dev/null
+++ b/drivers/power/tosa_battery.c
@@ -0,0 +1,486 @@
1/*
2 * Battery and Power Management code for the Sharp SL-6000x
3 *
4 * Copyright (c) 2005 Dirk Opfer
5 * Copyright (c) 2008 Dmitry Baryshkov
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 */
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/power_supply.h>
15#include <linux/wm97xx.h>
16#include <linux/delay.h>
17#include <linux/spinlock.h>
18#include <linux/interrupt.h>
19#include <linux/gpio.h>
20
21#include <asm/mach-types.h>
22#include <mach/tosa.h>
23
24static DEFINE_MUTEX(bat_lock); /* protects gpio pins */
25static struct work_struct bat_work;
26
27struct tosa_bat {
28 int status;
29 struct power_supply psy;
30 int full_chrg;
31
32 struct mutex work_lock; /* protects data */
33
34 bool (*is_present)(struct tosa_bat *bat);
35 int gpio_full;
36 int gpio_charge_off;
37
38 int technology;
39
40 int gpio_bat;
41 int adc_bat;
42 int adc_bat_divider;
43 int bat_max;
44 int bat_min;
45
46 int gpio_temp;
47 int adc_temp;
48 int adc_temp_divider;
49};
50
51static struct tosa_bat tosa_bat_main;
52static struct tosa_bat tosa_bat_jacket;
53
54static unsigned long tosa_read_bat(struct tosa_bat *bat)
55{
56 unsigned long value = 0;
57
58 if (bat->gpio_bat < 0 || bat->adc_bat < 0)
59 return 0;
60
61 mutex_lock(&bat_lock);
62 gpio_set_value(bat->gpio_bat, 1);
63 msleep(5);
64 value = wm97xx_read_aux_adc(bat->psy.dev->parent->driver_data,
65 bat->adc_bat);
66 gpio_set_value(bat->gpio_bat, 0);
67 mutex_unlock(&bat_lock);
68
69 value = value * 1000000 / bat->adc_bat_divider;
70
71 return value;
72}
73
74static unsigned long tosa_read_temp(struct tosa_bat *bat)
75{
76 unsigned long value = 0;
77
78 if (bat->gpio_temp < 0 || bat->adc_temp < 0)
79 return 0;
80
81 mutex_lock(&bat_lock);
82 gpio_set_value(bat->gpio_temp, 1);
83 msleep(5);
84 value = wm97xx_read_aux_adc(bat->psy.dev->parent->driver_data,
85 bat->adc_temp);
86 gpio_set_value(bat->gpio_temp, 0);
87 mutex_unlock(&bat_lock);
88
89 value = value * 10000 / bat->adc_temp_divider;
90
91 return value;
92}
93
94static int tosa_bat_get_property(struct power_supply *psy,
95 enum power_supply_property psp,
96 union power_supply_propval *val)
97{
98 int ret = 0;
99 struct tosa_bat *bat = container_of(psy, struct tosa_bat, psy);
100
101 if (bat->is_present && !bat->is_present(bat)
102 && psp != POWER_SUPPLY_PROP_PRESENT) {
103 return -ENODEV;
104 }
105
106 switch (psp) {
107 case POWER_SUPPLY_PROP_STATUS:
108 val->intval = bat->status;
109 break;
110 case POWER_SUPPLY_PROP_TECHNOLOGY:
111 val->intval = bat->technology;
112 break;
113 case POWER_SUPPLY_PROP_VOLTAGE_NOW:
114 val->intval = tosa_read_bat(bat);
115 break;
116 case POWER_SUPPLY_PROP_VOLTAGE_MAX:
117 if (bat->full_chrg == -1)
118 val->intval = bat->bat_max;
119 else
120 val->intval = bat->full_chrg;
121 break;
122 case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
123 val->intval = bat->bat_max;
124 break;
125 case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
126 val->intval = bat->bat_min;
127 break;
128 case POWER_SUPPLY_PROP_TEMP:
129 val->intval = tosa_read_temp(bat);
130 break;
131 case POWER_SUPPLY_PROP_PRESENT:
132 val->intval = bat->is_present ? bat->is_present(bat) : 1;
133 break;
134 default:
135 ret = -EINVAL;
136 break;
137 }
138 return ret;
139}
140
141static bool tosa_jacket_bat_is_present(struct tosa_bat *bat)
142{
143 return gpio_get_value(TOSA_GPIO_JACKET_DETECT) == 0;
144}
145
146static void tosa_bat_external_power_changed(struct power_supply *psy)
147{
148 schedule_work(&bat_work);
149}
150
151static irqreturn_t tosa_bat_gpio_isr(int irq, void *data)
152{
153 pr_info("tosa_bat_gpio irq: %d\n", gpio_get_value(irq_to_gpio(irq)));
154 schedule_work(&bat_work);
155 return IRQ_HANDLED;
156}
157
158static void tosa_bat_update(struct tosa_bat *bat)
159{
160 int old;
161 struct power_supply *psy = &bat->psy;
162
163 mutex_lock(&bat->work_lock);
164
165 old = bat->status;
166
167 if (bat->is_present && !bat->is_present(bat)) {
168 printk(KERN_NOTICE "%s not present\n", psy->name);
169 bat->status = POWER_SUPPLY_STATUS_UNKNOWN;
170 bat->full_chrg = -1;
171 } else if (power_supply_am_i_supplied(psy)) {
172 if (bat->status == POWER_SUPPLY_STATUS_DISCHARGING) {
173 gpio_set_value(bat->gpio_charge_off, 0);
174 mdelay(15);
175 }
176
177 if (gpio_get_value(bat->gpio_full)) {
178 if (old == POWER_SUPPLY_STATUS_CHARGING ||
179 bat->full_chrg == -1)
180 bat->full_chrg = tosa_read_bat(bat);
181
182 gpio_set_value(bat->gpio_charge_off, 1);
183 bat->status = POWER_SUPPLY_STATUS_FULL;
184 } else {
185 gpio_set_value(bat->gpio_charge_off, 0);
186 bat->status = POWER_SUPPLY_STATUS_CHARGING;
187 }
188 } else {
189 gpio_set_value(bat->gpio_charge_off, 1);
190 bat->status = POWER_SUPPLY_STATUS_DISCHARGING;
191 }
192
193 if (old != bat->status)
194 power_supply_changed(psy);
195
196 mutex_unlock(&bat->work_lock);
197}
198
199static void tosa_bat_work(struct work_struct *work)
200{
201 tosa_bat_update(&tosa_bat_main);
202 tosa_bat_update(&tosa_bat_jacket);
203}
204
205
206static enum power_supply_property tosa_bat_main_props[] = {
207 POWER_SUPPLY_PROP_STATUS,
208 POWER_SUPPLY_PROP_TECHNOLOGY,
209 POWER_SUPPLY_PROP_VOLTAGE_NOW,
210 POWER_SUPPLY_PROP_VOLTAGE_MAX,
211 POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
212 POWER_SUPPLY_PROP_TEMP,
213 POWER_SUPPLY_PROP_PRESENT,
214};
215
216static enum power_supply_property tosa_bat_bu_props[] = {
217 POWER_SUPPLY_PROP_STATUS,
218 POWER_SUPPLY_PROP_TECHNOLOGY,
219 POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
220 POWER_SUPPLY_PROP_VOLTAGE_NOW,
221 POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
222 POWER_SUPPLY_PROP_PRESENT,
223};
224
225static struct tosa_bat tosa_bat_main = {
226 .status = POWER_SUPPLY_STATUS_DISCHARGING,
227 .full_chrg = -1,
228 .psy = {
229 .name = "main-battery",
230 .type = POWER_SUPPLY_TYPE_BATTERY,
231 .properties = tosa_bat_main_props,
232 .num_properties = ARRAY_SIZE(tosa_bat_main_props),
233 .get_property = tosa_bat_get_property,
234 .external_power_changed = tosa_bat_external_power_changed,
235 .use_for_apm = 1,
236 },
237
238 .gpio_full = TOSA_GPIO_BAT0_CRG,
239 .gpio_charge_off = TOSA_GPIO_CHARGE_OFF,
240
241 .technology = POWER_SUPPLY_TECHNOLOGY_LIPO,
242
243 .gpio_bat = TOSA_GPIO_BAT0_V_ON,
244 .adc_bat = WM97XX_AUX_ID3,
245 .adc_bat_divider = 414,
246 .bat_max = 4310000,
247 .bat_min = 1551 * 1000000 / 414,
248
249 .gpio_temp = TOSA_GPIO_BAT1_TH_ON,
250 .adc_temp = WM97XX_AUX_ID2,
251 .adc_temp_divider = 10000,
252};
253
254static struct tosa_bat tosa_bat_jacket = {
255 .status = POWER_SUPPLY_STATUS_DISCHARGING,
256 .full_chrg = -1,
257 .psy = {
258 .name = "jacket-battery",
259 .type = POWER_SUPPLY_TYPE_BATTERY,
260 .properties = tosa_bat_main_props,
261 .num_properties = ARRAY_SIZE(tosa_bat_main_props),
262 .get_property = tosa_bat_get_property,
263 .external_power_changed = tosa_bat_external_power_changed,
264 },
265
266 .is_present = tosa_jacket_bat_is_present,
267 .gpio_full = TOSA_GPIO_BAT1_CRG,
268 .gpio_charge_off = TOSA_GPIO_CHARGE_OFF_JC,
269
270 .technology = POWER_SUPPLY_TECHNOLOGY_LIPO,
271
272 .gpio_bat = TOSA_GPIO_BAT1_V_ON,
273 .adc_bat = WM97XX_AUX_ID3,
274 .adc_bat_divider = 414,
275 .bat_max = 4310000,
276 .bat_min = 1551 * 1000000 / 414,
277
278 .gpio_temp = TOSA_GPIO_BAT0_TH_ON,
279 .adc_temp = WM97XX_AUX_ID2,
280 .adc_temp_divider = 10000,
281};
282
283static struct tosa_bat tosa_bat_bu = {
284 .status = POWER_SUPPLY_STATUS_UNKNOWN,
285 .full_chrg = -1,
286
287 .psy = {
288 .name = "backup-battery",
289 .type = POWER_SUPPLY_TYPE_BATTERY,
290 .properties = tosa_bat_bu_props,
291 .num_properties = ARRAY_SIZE(tosa_bat_bu_props),
292 .get_property = tosa_bat_get_property,
293 .external_power_changed = tosa_bat_external_power_changed,
294 },
295
296 .gpio_full = -1,
297 .gpio_charge_off = -1,
298
299 .technology = POWER_SUPPLY_TECHNOLOGY_LiMn,
300
301 .gpio_bat = TOSA_GPIO_BU_CHRG_ON,
302 .adc_bat = WM97XX_AUX_ID4,
303 .adc_bat_divider = 1266,
304
305 .gpio_temp = -1,
306 .adc_temp = -1,
307 .adc_temp_divider = -1,
308};
309
310static struct {
311 int gpio;
312 char *name;
313 bool output;
314 int value;
315} gpios[] = {
316 { TOSA_GPIO_CHARGE_OFF, "main charge off", 1, 1 },
317 { TOSA_GPIO_CHARGE_OFF_JC, "jacket charge off", 1, 1 },
318 { TOSA_GPIO_BAT_SW_ON, "battery switch", 1, 0 },
319 { TOSA_GPIO_BAT0_V_ON, "main battery", 1, 0 },
320 { TOSA_GPIO_BAT1_V_ON, "jacket battery", 1, 0 },
321 { TOSA_GPIO_BAT1_TH_ON, "main battery temp", 1, 0 },
322 { TOSA_GPIO_BAT0_TH_ON, "jacket battery temp", 1, 0 },
323 { TOSA_GPIO_BU_CHRG_ON, "backup battery", 1, 0 },
324 { TOSA_GPIO_BAT0_CRG, "main battery full", 0, 0 },
325 { TOSA_GPIO_BAT1_CRG, "jacket battery full", 0, 0 },
326 { TOSA_GPIO_BAT0_LOW, "main battery low", 0, 0 },
327 { TOSA_GPIO_BAT1_LOW, "jacket battery low", 0, 0 },
328 { TOSA_GPIO_JACKET_DETECT, "jacket detect", 0, 0 },
329};
330
331#ifdef CONFIG_PM
332static int tosa_bat_suspend(struct platform_device *dev, pm_message_t state)
333{
334 /* flush all pending status updates */
335 flush_scheduled_work();
336 return 0;
337}
338
339static int tosa_bat_resume(struct platform_device *dev)
340{
341 /* things may have changed while we were away */
342 schedule_work(&bat_work);
343 return 0;
344}
345#else
346#define tosa_bat_suspend NULL
347#define tosa_bat_resume NULL
348#endif
349
350static int __devinit tosa_bat_probe(struct platform_device *dev)
351{
352 int ret;
353 int i;
354
355 if (!machine_is_tosa())
356 return -ENODEV;
357
358 for (i = 0; i < ARRAY_SIZE(gpios); i++) {
359 ret = gpio_request(gpios[i].gpio, gpios[i].name);
360 if (ret) {
361 i--;
362 goto err_gpio;
363 }
364
365 if (gpios[i].output)
366 ret = gpio_direction_output(gpios[i].gpio,
367 gpios[i].value);
368 else
369 ret = gpio_direction_input(gpios[i].gpio);
370
371 if (ret)
372 goto err_gpio;
373 }
374
375 mutex_init(&tosa_bat_main.work_lock);
376 mutex_init(&tosa_bat_jacket.work_lock);
377
378 INIT_WORK(&bat_work, tosa_bat_work);
379
380 ret = power_supply_register(&dev->dev, &tosa_bat_main.psy);
381 if (ret)
382 goto err_psy_reg_main;
383 ret = power_supply_register(&dev->dev, &tosa_bat_jacket.psy);
384 if (ret)
385 goto err_psy_reg_jacket;
386 ret = power_supply_register(&dev->dev, &tosa_bat_bu.psy);
387 if (ret)
388 goto err_psy_reg_bu;
389
390 ret = request_irq(gpio_to_irq(TOSA_GPIO_BAT0_CRG),
391 tosa_bat_gpio_isr,
392 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
393 "main full", &tosa_bat_main);
394 if (ret)
395 goto err_req_main;
396
397 ret = request_irq(gpio_to_irq(TOSA_GPIO_BAT1_CRG),
398 tosa_bat_gpio_isr,
399 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
400 "jacket full", &tosa_bat_jacket);
401 if (ret)
402 goto err_req_jacket;
403
404 ret = request_irq(gpio_to_irq(TOSA_GPIO_JACKET_DETECT),
405 tosa_bat_gpio_isr,
406 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
407 "jacket detect", &tosa_bat_jacket);
408 if (!ret) {
409 schedule_work(&bat_work);
410 return 0;
411 }
412
413 free_irq(gpio_to_irq(TOSA_GPIO_BAT1_CRG), &tosa_bat_jacket);
414err_req_jacket:
415 free_irq(gpio_to_irq(TOSA_GPIO_BAT0_CRG), &tosa_bat_main);
416err_req_main:
417 power_supply_unregister(&tosa_bat_bu.psy);
418err_psy_reg_bu:
419 power_supply_unregister(&tosa_bat_jacket.psy);
420err_psy_reg_jacket:
421 power_supply_unregister(&tosa_bat_main.psy);
422err_psy_reg_main:
423
424 /* see comment in tosa_bat_remove */
425 flush_scheduled_work();
426
427 i--;
428err_gpio:
429 for (; i >= 0; i--)
430 gpio_free(gpios[i].gpio);
431
432 return ret;
433}
434
435static int __devexit tosa_bat_remove(struct platform_device *dev)
436{
437 int i;
438
439 free_irq(gpio_to_irq(TOSA_GPIO_JACKET_DETECT), &tosa_bat_jacket);
440 free_irq(gpio_to_irq(TOSA_GPIO_BAT1_CRG), &tosa_bat_jacket);
441 free_irq(gpio_to_irq(TOSA_GPIO_BAT0_CRG), &tosa_bat_main);
442
443 power_supply_unregister(&tosa_bat_bu.psy);
444 power_supply_unregister(&tosa_bat_jacket.psy);
445 power_supply_unregister(&tosa_bat_main.psy);
446
447 /*
448 * now flush all pending work.
449 * we won't get any more schedules, since all
450 * sources (isr and external_power_changed)
451 * are unregistered now.
452 */
453 flush_scheduled_work();
454
455 for (i = ARRAY_SIZE(gpios) - 1; i >= 0; i--)
456 gpio_free(gpios[i].gpio);
457
458 return 0;
459}
460
461static struct platform_driver tosa_bat_driver = {
462 .driver.name = "wm97xx-battery",
463 .driver.owner = THIS_MODULE,
464 .probe = tosa_bat_probe,
465 .remove = __devexit_p(tosa_bat_remove),
466 .suspend = tosa_bat_suspend,
467 .resume = tosa_bat_resume,
468};
469
470static int __init tosa_bat_init(void)
471{
472 return platform_driver_register(&tosa_bat_driver);
473}
474
475static void __exit tosa_bat_exit(void)
476{
477 platform_driver_unregister(&tosa_bat_driver);
478}
479
480module_init(tosa_bat_init);
481module_exit(tosa_bat_exit);
482
483MODULE_LICENSE("GPL");
484MODULE_AUTHOR("Dmitry Baryshkov");
485MODULE_DESCRIPTION("Tosa battery driver");
486MODULE_ALIAS("platform:wm97xx-battery");
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
new file mode 100644
index 000000000000..a656128f1fdd
--- /dev/null
+++ b/drivers/regulator/Kconfig
@@ -0,0 +1,59 @@
1menu "Voltage and Current regulators"
2
3config REGULATOR
4 bool "Voltage and Current Regulator Support"
5 default n
6 help
7 Generic Voltage and Current Regulator support.
8
9 This framework is designed to provide a generic interface to voltage
10 and current regulators within the Linux kernel. It's intended to
11 provide voltage and current control to client or consumer drivers and
12 also provide status information to user space applications through a
13 sysfs interface.
14
15 The intention is to allow systems to dynamically control regulator
16 output in order to save power and prolong battery life. This applies
17 to both voltage regulators (where voltage output is controllable) and
18 current sinks (where current output is controllable).
19
20 This framework safely compiles out if not selected so that client
21 drivers can still be used in systems with no software controllable
22 regulators.
23
24 If unsure, say no.
25
26config REGULATOR_DEBUG
27 bool "Regulator debug support"
28 depends on REGULATOR
29 help
30 Say yes here to enable debugging support.
31
32config REGULATOR_FIXED_VOLTAGE
33 tristate
34 default n
35 select REGULATOR
36
37config REGULATOR_VIRTUAL_CONSUMER
38 tristate "Virtual regulator consumer support"
39 default n
40 select REGULATOR
41 help
42 This driver provides a virtual consumer for the voltage and
43 current regulator API which provides sysfs controls for
44 configuring the supplies requested. This is mainly useful
45 for test purposes.
46
47 If unsure, say no.
48
49config REGULATOR_BQ24022
50 tristate "TI bq24022 Dual Input 1-Cell Li-Ion Charger IC"
51 default n
52 select REGULATOR
53 help
54 This driver controls a TI bq24022 Charger attached via
55 GPIOs. The provided current regulator can enable/disable
56 charging select between 100 mA and 500 mA charging current
57 limit.
58
59endmenu
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
new file mode 100644
index 000000000000..ac2c64efe65c
--- /dev/null
+++ b/drivers/regulator/Makefile
@@ -0,0 +1,12 @@
1#
2# Makefile for regulator drivers.
3#
4
5
6obj-$(CONFIG_REGULATOR) += core.o
7obj-$(CONFIG_REGULATOR_FIXED_VOLTAGE) += fixed.o
8obj-$(CONFIG_REGULATOR_VIRTUAL_CONSUMER) += virtual.o
9
10obj-$(CONFIG_REGULATOR_BQ24022) += bq24022.o
11
12ccflags-$(CONFIG_REGULATOR_DEBUG) += -DDEBUG
diff --git a/drivers/regulator/bq24022.c b/drivers/regulator/bq24022.c
new file mode 100644
index 000000000000..263699d6152d
--- /dev/null
+++ b/drivers/regulator/bq24022.c
@@ -0,0 +1,167 @@
1/*
2 * Support for TI bq24022 (bqTINY-II) Dual Input (USB/AC Adpater)
3 * 1-Cell Li-Ion Charger connected via GPIOs.
4 *
5 * Copyright (c) 2008 Philipp Zabel
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 */
12
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/platform_device.h>
16#include <linux/err.h>
17#include <linux/gpio.h>
18#include <linux/regulator/bq24022.h>
19#include <linux/regulator/driver.h>
20
21static int bq24022_set_current_limit(struct regulator_dev *rdev,
22 int min_uA, int max_uA)
23{
24 struct platform_device *pdev = rdev_get_drvdata(rdev);
25 struct bq24022_mach_info *pdata = pdev->dev.platform_data;
26
27 dev_dbg(&pdev->dev, "setting current limit to %s mA\n",
28 max_uA >= 500000 ? "500" : "100");
29
30 /* REVISIT: maybe return error if min_uA != 0 ? */
31 gpio_set_value(pdata->gpio_iset2, max_uA >= 500000);
32 return 0;
33}
34
35static int bq24022_get_current_limit(struct regulator_dev *rdev)
36{
37 struct platform_device *pdev = rdev_get_drvdata(rdev);
38 struct bq24022_mach_info *pdata = pdev->dev.platform_data;
39
40 return gpio_get_value(pdata->gpio_iset2) ? 500000 : 100000;
41}
42
43static int bq24022_enable(struct regulator_dev *rdev)
44{
45 struct platform_device *pdev = rdev_get_drvdata(rdev);
46 struct bq24022_mach_info *pdata = pdev->dev.platform_data;
47
48 dev_dbg(&pdev->dev, "enabling charger\n");
49
50 gpio_set_value(pdata->gpio_nce, 0);
51 return 0;
52}
53
54static int bq24022_disable(struct regulator_dev *rdev)
55{
56 struct platform_device *pdev = rdev_get_drvdata(rdev);
57 struct bq24022_mach_info *pdata = pdev->dev.platform_data;
58
59 dev_dbg(&pdev->dev, "disabling charger\n");
60
61 gpio_set_value(pdata->gpio_nce, 1);
62 return 0;
63}
64
65static int bq24022_is_enabled(struct regulator_dev *rdev)
66{
67 struct platform_device *pdev = rdev_get_drvdata(rdev);
68 struct bq24022_mach_info *pdata = pdev->dev.platform_data;
69
70 return !gpio_get_value(pdata->gpio_nce);
71}
72
73static struct regulator_ops bq24022_ops = {
74 .set_current_limit = bq24022_set_current_limit,
75 .get_current_limit = bq24022_get_current_limit,
76 .enable = bq24022_enable,
77 .disable = bq24022_disable,
78 .is_enabled = bq24022_is_enabled,
79};
80
81static struct regulator_desc bq24022_desc = {
82 .name = "bq24022",
83 .ops = &bq24022_ops,
84 .type = REGULATOR_CURRENT,
85};
86
87static int __init bq24022_probe(struct platform_device *pdev)
88{
89 struct bq24022_mach_info *pdata = pdev->dev.platform_data;
90 struct regulator_dev *bq24022;
91 int ret;
92
93 if (!pdata || !pdata->gpio_nce || !pdata->gpio_iset2)
94 return -EINVAL;
95
96 ret = gpio_request(pdata->gpio_nce, "ncharge_en");
97 if (ret) {
98 dev_dbg(&pdev->dev, "couldn't request nCE GPIO: %d\n",
99 pdata->gpio_nce);
100 goto err_ce;
101 }
102 ret = gpio_request(pdata->gpio_iset2, "charge_mode");
103 if (ret) {
104 dev_dbg(&pdev->dev, "couldn't request ISET2 GPIO: %d\n",
105 pdata->gpio_iset2);
106 goto err_iset2;
107 }
108 ret = gpio_direction_output(pdata->gpio_iset2, 0);
109 ret = gpio_direction_output(pdata->gpio_nce, 1);
110
111 bq24022 = regulator_register(&bq24022_desc, pdev);
112 if (IS_ERR(bq24022)) {
113 dev_dbg(&pdev->dev, "couldn't register regulator\n");
114 ret = PTR_ERR(bq24022);
115 goto err_reg;
116 }
117 platform_set_drvdata(pdev, bq24022);
118 dev_dbg(&pdev->dev, "registered regulator\n");
119
120 return 0;
121err_reg:
122 gpio_free(pdata->gpio_iset2);
123err_iset2:
124 gpio_free(pdata->gpio_nce);
125err_ce:
126 return ret;
127}
128
129static int __devexit bq24022_remove(struct platform_device *pdev)
130{
131 struct bq24022_mach_info *pdata = pdev->dev.platform_data;
132 struct regulator_dev *bq24022 = platform_get_drvdata(pdev);
133
134 regulator_unregister(bq24022);
135 gpio_free(pdata->gpio_iset2);
136 gpio_free(pdata->gpio_nce);
137
138 return 0;
139}
140
141static struct platform_driver bq24022_driver = {
142 .driver = {
143 .name = "bq24022",
144 },
145 .remove = __devexit_p(bq24022_remove),
146};
147
148static int __init bq24022_init(void)
149{
150 return platform_driver_probe(&bq24022_driver, bq24022_probe);
151}
152
153static void __exit bq24022_exit(void)
154{
155 platform_driver_unregister(&bq24022_driver);
156}
157
158/*
159 * make sure this is probed before gpio_vbus and pda_power,
160 * but after asic3 or other GPIO expander drivers.
161 */
162subsys_initcall(bq24022_init);
163module_exit(bq24022_exit);
164
165MODULE_AUTHOR("Philipp Zabel");
166MODULE_DESCRIPTION("TI bq24022 Li-Ion Charger driver");
167MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
new file mode 100644
index 000000000000..9c7986261568
--- /dev/null
+++ b/drivers/regulator/core.c
@@ -0,0 +1,1903 @@
1/*
2 * core.c -- Voltage/Current Regulator framework.
3 *
4 * Copyright 2007, 2008 Wolfson Microelectronics PLC.
5 *
6 * Author: Liam Girdwood <liam.girdwood@wolfsonmicro.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 */
14
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/device.h>
18#include <linux/err.h>
19#include <linux/mutex.h>
20#include <linux/suspend.h>
21#include <linux/regulator/consumer.h>
22#include <linux/regulator/driver.h>
23#include <linux/regulator/machine.h>
24
25#define REGULATOR_VERSION "0.5"
26
27static DEFINE_MUTEX(regulator_list_mutex);
28static LIST_HEAD(regulator_list);
29static LIST_HEAD(regulator_map_list);
30
31/**
32 * struct regulator_dev
33 *
34 * Voltage / Current regulator class device. One for each regulator.
35 */
36struct regulator_dev {
37 struct regulator_desc *desc;
38 int use_count;
39
40 /* lists we belong to */
41 struct list_head list; /* list of all regulators */
42 struct list_head slist; /* list of supplied regulators */
43
44 /* lists we own */
45 struct list_head consumer_list; /* consumers we supply */
46 struct list_head supply_list; /* regulators we supply */
47
48 struct blocking_notifier_head notifier;
49 struct mutex mutex; /* consumer lock */
50 struct module *owner;
51 struct device dev;
52 struct regulation_constraints *constraints;
53 struct regulator_dev *supply; /* for tree */
54
55 void *reg_data; /* regulator_dev data */
56};
57
58/**
59 * struct regulator_map
60 *
61 * Used to provide symbolic supply names to devices.
62 */
63struct regulator_map {
64 struct list_head list;
65 struct device *dev;
66 const char *supply;
67 const char *regulator;
68};
69
70static inline struct regulator_dev *to_rdev(struct device *d)
71{
72 return container_of(d, struct regulator_dev, dev);
73}
74
75/*
76 * struct regulator
77 *
78 * One for each consumer device.
79 */
80struct regulator {
81 struct device *dev;
82 struct list_head list;
83 int uA_load;
84 int min_uV;
85 int max_uV;
86 int enabled; /* client has called enabled */
87 char *supply_name;
88 struct device_attribute dev_attr;
89 struct regulator_dev *rdev;
90};
91
92static int _regulator_is_enabled(struct regulator_dev *rdev);
93static int _regulator_disable(struct regulator_dev *rdev);
94static int _regulator_get_voltage(struct regulator_dev *rdev);
95static int _regulator_get_current_limit(struct regulator_dev *rdev);
96static unsigned int _regulator_get_mode(struct regulator_dev *rdev);
97static void _notifier_call_chain(struct regulator_dev *rdev,
98 unsigned long event, void *data);
99
100/* gets the regulator for a given consumer device */
101static struct regulator *get_device_regulator(struct device *dev)
102{
103 struct regulator *regulator = NULL;
104 struct regulator_dev *rdev;
105
106 mutex_lock(&regulator_list_mutex);
107 list_for_each_entry(rdev, &regulator_list, list) {
108 mutex_lock(&rdev->mutex);
109 list_for_each_entry(regulator, &rdev->consumer_list, list) {
110 if (regulator->dev == dev) {
111 mutex_unlock(&rdev->mutex);
112 mutex_unlock(&regulator_list_mutex);
113 return regulator;
114 }
115 }
116 mutex_unlock(&rdev->mutex);
117 }
118 mutex_unlock(&regulator_list_mutex);
119 return NULL;
120}
121
122/* Platform voltage constraint check */
123static int regulator_check_voltage(struct regulator_dev *rdev,
124 int *min_uV, int *max_uV)
125{
126 BUG_ON(*min_uV > *max_uV);
127
128 if (!rdev->constraints) {
129 printk(KERN_ERR "%s: no constraints for %s\n", __func__,
130 rdev->desc->name);
131 return -ENODEV;
132 }
133 if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) {
134 printk(KERN_ERR "%s: operation not allowed for %s\n",
135 __func__, rdev->desc->name);
136 return -EPERM;
137 }
138
139 if (*max_uV > rdev->constraints->max_uV)
140 *max_uV = rdev->constraints->max_uV;
141 if (*min_uV < rdev->constraints->min_uV)
142 *min_uV = rdev->constraints->min_uV;
143
144 if (*min_uV > *max_uV)
145 return -EINVAL;
146
147 return 0;
148}
149
150/* current constraint check */
151static int regulator_check_current_limit(struct regulator_dev *rdev,
152 int *min_uA, int *max_uA)
153{
154 BUG_ON(*min_uA > *max_uA);
155
156 if (!rdev->constraints) {
157 printk(KERN_ERR "%s: no constraints for %s\n", __func__,
158 rdev->desc->name);
159 return -ENODEV;
160 }
161 if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_CURRENT)) {
162 printk(KERN_ERR "%s: operation not allowed for %s\n",
163 __func__, rdev->desc->name);
164 return -EPERM;
165 }
166
167 if (*max_uA > rdev->constraints->max_uA)
168 *max_uA = rdev->constraints->max_uA;
169 if (*min_uA < rdev->constraints->min_uA)
170 *min_uA = rdev->constraints->min_uA;
171
172 if (*min_uA > *max_uA)
173 return -EINVAL;
174
175 return 0;
176}
177
178/* operating mode constraint check */
179static int regulator_check_mode(struct regulator_dev *rdev, int mode)
180{
181 if (!rdev->constraints) {
182 printk(KERN_ERR "%s: no constraints for %s\n", __func__,
183 rdev->desc->name);
184 return -ENODEV;
185 }
186 if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_MODE)) {
187 printk(KERN_ERR "%s: operation not allowed for %s\n",
188 __func__, rdev->desc->name);
189 return -EPERM;
190 }
191 if (!(rdev->constraints->valid_modes_mask & mode)) {
192 printk(KERN_ERR "%s: invalid mode %x for %s\n",
193 __func__, mode, rdev->desc->name);
194 return -EINVAL;
195 }
196 return 0;
197}
198
199/* dynamic regulator mode switching constraint check */
200static int regulator_check_drms(struct regulator_dev *rdev)
201{
202 if (!rdev->constraints) {
203 printk(KERN_ERR "%s: no constraints for %s\n", __func__,
204 rdev->desc->name);
205 return -ENODEV;
206 }
207 if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS)) {
208 printk(KERN_ERR "%s: operation not allowed for %s\n",
209 __func__, rdev->desc->name);
210 return -EPERM;
211 }
212 return 0;
213}
214
215static ssize_t device_requested_uA_show(struct device *dev,
216 struct device_attribute *attr, char *buf)
217{
218 struct regulator *regulator;
219
220 regulator = get_device_regulator(dev);
221 if (regulator == NULL)
222 return 0;
223
224 return sprintf(buf, "%d\n", regulator->uA_load);
225}
226
227static ssize_t regulator_uV_show(struct device *dev,
228 struct device_attribute *attr, char *buf)
229{
230 struct regulator_dev *rdev = to_rdev(dev);
231 ssize_t ret;
232
233 mutex_lock(&rdev->mutex);
234 ret = sprintf(buf, "%d\n", _regulator_get_voltage(rdev));
235 mutex_unlock(&rdev->mutex);
236
237 return ret;
238}
239
240static ssize_t regulator_uA_show(struct device *dev,
241 struct device_attribute *attr, char *buf)
242{
243 struct regulator_dev *rdev = to_rdev(dev);
244
245 return sprintf(buf, "%d\n", _regulator_get_current_limit(rdev));
246}
247
248static ssize_t regulator_opmode_show(struct device *dev,
249 struct device_attribute *attr, char *buf)
250{
251 struct regulator_dev *rdev = to_rdev(dev);
252 int mode = _regulator_get_mode(rdev);
253
254 switch (mode) {
255 case REGULATOR_MODE_FAST:
256 return sprintf(buf, "fast\n");
257 case REGULATOR_MODE_NORMAL:
258 return sprintf(buf, "normal\n");
259 case REGULATOR_MODE_IDLE:
260 return sprintf(buf, "idle\n");
261 case REGULATOR_MODE_STANDBY:
262 return sprintf(buf, "standby\n");
263 }
264 return sprintf(buf, "unknown\n");
265}
266
267static ssize_t regulator_state_show(struct device *dev,
268 struct device_attribute *attr, char *buf)
269{
270 struct regulator_dev *rdev = to_rdev(dev);
271 int state = _regulator_is_enabled(rdev);
272
273 if (state > 0)
274 return sprintf(buf, "enabled\n");
275 else if (state == 0)
276 return sprintf(buf, "disabled\n");
277 else
278 return sprintf(buf, "unknown\n");
279}
280
281static ssize_t regulator_min_uA_show(struct device *dev,
282 struct device_attribute *attr, char *buf)
283{
284 struct regulator_dev *rdev = to_rdev(dev);
285
286 if (!rdev->constraints)
287 return sprintf(buf, "constraint not defined\n");
288
289 return sprintf(buf, "%d\n", rdev->constraints->min_uA);
290}
291
292static ssize_t regulator_max_uA_show(struct device *dev,
293 struct device_attribute *attr, char *buf)
294{
295 struct regulator_dev *rdev = to_rdev(dev);
296
297 if (!rdev->constraints)
298 return sprintf(buf, "constraint not defined\n");
299
300 return sprintf(buf, "%d\n", rdev->constraints->max_uA);
301}
302
303static ssize_t regulator_min_uV_show(struct device *dev,
304 struct device_attribute *attr, char *buf)
305{
306 struct regulator_dev *rdev = to_rdev(dev);
307
308 if (!rdev->constraints)
309 return sprintf(buf, "constraint not defined\n");
310
311 return sprintf(buf, "%d\n", rdev->constraints->min_uV);
312}
313
314static ssize_t regulator_max_uV_show(struct device *dev,
315 struct device_attribute *attr, char *buf)
316{
317 struct regulator_dev *rdev = to_rdev(dev);
318
319 if (!rdev->constraints)
320 return sprintf(buf, "constraint not defined\n");
321
322 return sprintf(buf, "%d\n", rdev->constraints->max_uV);
323}
324
325static ssize_t regulator_total_uA_show(struct device *dev,
326 struct device_attribute *attr, char *buf)
327{
328 struct regulator_dev *rdev = to_rdev(dev);
329 struct regulator *regulator;
330 int uA = 0;
331
332 mutex_lock(&rdev->mutex);
333 list_for_each_entry(regulator, &rdev->consumer_list, list)
334 uA += regulator->uA_load;
335 mutex_unlock(&rdev->mutex);
336 return sprintf(buf, "%d\n", uA);
337}
338
339static ssize_t regulator_num_users_show(struct device *dev,
340 struct device_attribute *attr, char *buf)
341{
342 struct regulator_dev *rdev = to_rdev(dev);
343 return sprintf(buf, "%d\n", rdev->use_count);
344}
345
346static ssize_t regulator_type_show(struct device *dev,
347 struct device_attribute *attr, char *buf)
348{
349 struct regulator_dev *rdev = to_rdev(dev);
350
351 switch (rdev->desc->type) {
352 case REGULATOR_VOLTAGE:
353 return sprintf(buf, "voltage\n");
354 case REGULATOR_CURRENT:
355 return sprintf(buf, "current\n");
356 }
357 return sprintf(buf, "unknown\n");
358}
359
360static ssize_t regulator_suspend_mem_uV_show(struct device *dev,
361 struct device_attribute *attr, char *buf)
362{
363 struct regulator_dev *rdev = to_rdev(dev);
364
365 if (!rdev->constraints)
366 return sprintf(buf, "not defined\n");
367 return sprintf(buf, "%d\n", rdev->constraints->state_mem.uV);
368}
369
370static ssize_t regulator_suspend_disk_uV_show(struct device *dev,
371 struct device_attribute *attr, char *buf)
372{
373 struct regulator_dev *rdev = to_rdev(dev);
374
375 if (!rdev->constraints)
376 return sprintf(buf, "not defined\n");
377 return sprintf(buf, "%d\n", rdev->constraints->state_disk.uV);
378}
379
380static ssize_t regulator_suspend_standby_uV_show(struct device *dev,
381 struct device_attribute *attr, char *buf)
382{
383 struct regulator_dev *rdev = to_rdev(dev);
384
385 if (!rdev->constraints)
386 return sprintf(buf, "not defined\n");
387 return sprintf(buf, "%d\n", rdev->constraints->state_standby.uV);
388}
389
390static ssize_t suspend_opmode_show(struct regulator_dev *rdev,
391 unsigned int mode, char *buf)
392{
393 switch (mode) {
394 case REGULATOR_MODE_FAST:
395 return sprintf(buf, "fast\n");
396 case REGULATOR_MODE_NORMAL:
397 return sprintf(buf, "normal\n");
398 case REGULATOR_MODE_IDLE:
399 return sprintf(buf, "idle\n");
400 case REGULATOR_MODE_STANDBY:
401 return sprintf(buf, "standby\n");
402 }
403 return sprintf(buf, "unknown\n");
404}
405
406static ssize_t regulator_suspend_mem_mode_show(struct device *dev,
407 struct device_attribute *attr, char *buf)
408{
409 struct regulator_dev *rdev = to_rdev(dev);
410
411 if (!rdev->constraints)
412 return sprintf(buf, "not defined\n");
413 return suspend_opmode_show(rdev,
414 rdev->constraints->state_mem.mode, buf);
415}
416
417static ssize_t regulator_suspend_disk_mode_show(struct device *dev,
418 struct device_attribute *attr, char *buf)
419{
420 struct regulator_dev *rdev = to_rdev(dev);
421
422 if (!rdev->constraints)
423 return sprintf(buf, "not defined\n");
424 return suspend_opmode_show(rdev,
425 rdev->constraints->state_disk.mode, buf);
426}
427
428static ssize_t regulator_suspend_standby_mode_show(struct device *dev,
429 struct device_attribute *attr, char *buf)
430{
431 struct regulator_dev *rdev = to_rdev(dev);
432
433 if (!rdev->constraints)
434 return sprintf(buf, "not defined\n");
435 return suspend_opmode_show(rdev,
436 rdev->constraints->state_standby.mode, buf);
437}
438
439static ssize_t regulator_suspend_mem_state_show(struct device *dev,
440 struct device_attribute *attr, char *buf)
441{
442 struct regulator_dev *rdev = to_rdev(dev);
443
444 if (!rdev->constraints)
445 return sprintf(buf, "not defined\n");
446
447 if (rdev->constraints->state_mem.enabled)
448 return sprintf(buf, "enabled\n");
449 else
450 return sprintf(buf, "disabled\n");
451}
452
453static ssize_t regulator_suspend_disk_state_show(struct device *dev,
454 struct device_attribute *attr, char *buf)
455{
456 struct regulator_dev *rdev = to_rdev(dev);
457
458 if (!rdev->constraints)
459 return sprintf(buf, "not defined\n");
460
461 if (rdev->constraints->state_disk.enabled)
462 return sprintf(buf, "enabled\n");
463 else
464 return sprintf(buf, "disabled\n");
465}
466
467static ssize_t regulator_suspend_standby_state_show(struct device *dev,
468 struct device_attribute *attr, char *buf)
469{
470 struct regulator_dev *rdev = to_rdev(dev);
471
472 if (!rdev->constraints)
473 return sprintf(buf, "not defined\n");
474
475 if (rdev->constraints->state_standby.enabled)
476 return sprintf(buf, "enabled\n");
477 else
478 return sprintf(buf, "disabled\n");
479}
480static struct device_attribute regulator_dev_attrs[] = {
481 __ATTR(microvolts, 0444, regulator_uV_show, NULL),
482 __ATTR(microamps, 0444, regulator_uA_show, NULL),
483 __ATTR(opmode, 0444, regulator_opmode_show, NULL),
484 __ATTR(state, 0444, regulator_state_show, NULL),
485 __ATTR(min_microvolts, 0444, regulator_min_uV_show, NULL),
486 __ATTR(min_microamps, 0444, regulator_min_uA_show, NULL),
487 __ATTR(max_microvolts, 0444, regulator_max_uV_show, NULL),
488 __ATTR(max_microamps, 0444, regulator_max_uA_show, NULL),
489 __ATTR(requested_microamps, 0444, regulator_total_uA_show, NULL),
490 __ATTR(num_users, 0444, regulator_num_users_show, NULL),
491 __ATTR(type, 0444, regulator_type_show, NULL),
492 __ATTR(suspend_mem_microvolts, 0444,
493 regulator_suspend_mem_uV_show, NULL),
494 __ATTR(suspend_disk_microvolts, 0444,
495 regulator_suspend_disk_uV_show, NULL),
496 __ATTR(suspend_standby_microvolts, 0444,
497 regulator_suspend_standby_uV_show, NULL),
498 __ATTR(suspend_mem_mode, 0444,
499 regulator_suspend_mem_mode_show, NULL),
500 __ATTR(suspend_disk_mode, 0444,
501 regulator_suspend_disk_mode_show, NULL),
502 __ATTR(suspend_standby_mode, 0444,
503 regulator_suspend_standby_mode_show, NULL),
504 __ATTR(suspend_mem_state, 0444,
505 regulator_suspend_mem_state_show, NULL),
506 __ATTR(suspend_disk_state, 0444,
507 regulator_suspend_disk_state_show, NULL),
508 __ATTR(suspend_standby_state, 0444,
509 regulator_suspend_standby_state_show, NULL),
510 __ATTR_NULL,
511};
512
513static void regulator_dev_release(struct device *dev)
514{
515 struct regulator_dev *rdev = to_rdev(dev);
516 kfree(rdev);
517}
518
519static struct class regulator_class = {
520 .name = "regulator",
521 .dev_release = regulator_dev_release,
522 .dev_attrs = regulator_dev_attrs,
523};
524
525/* Calculate the new optimum regulator operating mode based on the new total
526 * consumer load. All locks held by caller */
527static void drms_uA_update(struct regulator_dev *rdev)
528{
529 struct regulator *sibling;
530 int current_uA = 0, output_uV, input_uV, err;
531 unsigned int mode;
532
533 err = regulator_check_drms(rdev);
534 if (err < 0 || !rdev->desc->ops->get_optimum_mode ||
535 !rdev->desc->ops->get_voltage || !rdev->desc->ops->set_mode);
536 return;
537
538 /* get output voltage */
539 output_uV = rdev->desc->ops->get_voltage(rdev);
540 if (output_uV <= 0)
541 return;
542
543 /* get input voltage */
544 if (rdev->supply && rdev->supply->desc->ops->get_voltage)
545 input_uV = rdev->supply->desc->ops->get_voltage(rdev->supply);
546 else
547 input_uV = rdev->constraints->input_uV;
548 if (input_uV <= 0)
549 return;
550
551 /* calc total requested load */
552 list_for_each_entry(sibling, &rdev->consumer_list, list)
553 current_uA += sibling->uA_load;
554
555 /* now get the optimum mode for our new total regulator load */
556 mode = rdev->desc->ops->get_optimum_mode(rdev, input_uV,
557 output_uV, current_uA);
558
559 /* check the new mode is allowed */
560 err = regulator_check_mode(rdev, mode);
561 if (err == 0)
562 rdev->desc->ops->set_mode(rdev, mode);
563}
564
565static int suspend_set_state(struct regulator_dev *rdev,
566 struct regulator_state *rstate)
567{
568 int ret = 0;
569
570 /* enable & disable are mandatory for suspend control */
571 if (!rdev->desc->ops->set_suspend_enable ||
572 !rdev->desc->ops->set_suspend_disable)
573 return -EINVAL;
574
575 if (rstate->enabled)
576 ret = rdev->desc->ops->set_suspend_enable(rdev);
577 else
578 ret = rdev->desc->ops->set_suspend_disable(rdev);
579 if (ret < 0) {
580 printk(KERN_ERR "%s: failed to enabled/disable\n", __func__);
581 return ret;
582 }
583
584 if (rdev->desc->ops->set_suspend_voltage && rstate->uV > 0) {
585 ret = rdev->desc->ops->set_suspend_voltage(rdev, rstate->uV);
586 if (ret < 0) {
587 printk(KERN_ERR "%s: failed to set voltage\n",
588 __func__);
589 return ret;
590 }
591 }
592
593 if (rdev->desc->ops->set_suspend_mode && rstate->mode > 0) {
594 ret = rdev->desc->ops->set_suspend_mode(rdev, rstate->mode);
595 if (ret < 0) {
596 printk(KERN_ERR "%s: failed to set mode\n", __func__);
597 return ret;
598 }
599 }
600 return ret;
601}
602
603/* locks held by caller */
604static int suspend_prepare(struct regulator_dev *rdev, suspend_state_t state)
605{
606 if (!rdev->constraints)
607 return -EINVAL;
608
609 switch (state) {
610 case PM_SUSPEND_STANDBY:
611 return suspend_set_state(rdev,
612 &rdev->constraints->state_standby);
613 case PM_SUSPEND_MEM:
614 return suspend_set_state(rdev,
615 &rdev->constraints->state_mem);
616 case PM_SUSPEND_MAX:
617 return suspend_set_state(rdev,
618 &rdev->constraints->state_disk);
619 default:
620 return -EINVAL;
621 }
622}
623
624static void print_constraints(struct regulator_dev *rdev)
625{
626 struct regulation_constraints *constraints = rdev->constraints;
627 char buf[80];
628 int count;
629
630 if (rdev->desc->type == REGULATOR_VOLTAGE) {
631 if (constraints->min_uV == constraints->max_uV)
632 count = sprintf(buf, "%d mV ",
633 constraints->min_uV / 1000);
634 else
635 count = sprintf(buf, "%d <--> %d mV ",
636 constraints->min_uV / 1000,
637 constraints->max_uV / 1000);
638 } else {
639 if (constraints->min_uA == constraints->max_uA)
640 count = sprintf(buf, "%d mA ",
641 constraints->min_uA / 1000);
642 else
643 count = sprintf(buf, "%d <--> %d mA ",
644 constraints->min_uA / 1000,
645 constraints->max_uA / 1000);
646 }
647 if (constraints->valid_modes_mask & REGULATOR_MODE_FAST)
648 count += sprintf(buf + count, "fast ");
649 if (constraints->valid_modes_mask & REGULATOR_MODE_NORMAL)
650 count += sprintf(buf + count, "normal ");
651 if (constraints->valid_modes_mask & REGULATOR_MODE_IDLE)
652 count += sprintf(buf + count, "idle ");
653 if (constraints->valid_modes_mask & REGULATOR_MODE_STANDBY)
654 count += sprintf(buf + count, "standby");
655
656 printk(KERN_INFO "regulator: %s: %s\n", rdev->desc->name, buf);
657}
658
659#define REG_STR_SIZE 32
660
661static struct regulator *create_regulator(struct regulator_dev *rdev,
662 struct device *dev,
663 const char *supply_name)
664{
665 struct regulator *regulator;
666 char buf[REG_STR_SIZE];
667 int err, size;
668
669 regulator = kzalloc(sizeof(*regulator), GFP_KERNEL);
670 if (regulator == NULL)
671 return NULL;
672
673 mutex_lock(&rdev->mutex);
674 regulator->rdev = rdev;
675 list_add(&regulator->list, &rdev->consumer_list);
676
677 if (dev) {
678 /* create a 'requested_microamps_name' sysfs entry */
679 size = scnprintf(buf, REG_STR_SIZE, "microamps_requested_%s",
680 supply_name);
681 if (size >= REG_STR_SIZE)
682 goto overflow_err;
683
684 regulator->dev = dev;
685 regulator->dev_attr.attr.name = kstrdup(buf, GFP_KERNEL);
686 if (regulator->dev_attr.attr.name == NULL)
687 goto attr_name_err;
688
689 regulator->dev_attr.attr.owner = THIS_MODULE;
690 regulator->dev_attr.attr.mode = 0444;
691 regulator->dev_attr.show = device_requested_uA_show;
692 err = device_create_file(dev, &regulator->dev_attr);
693 if (err < 0) {
694 printk(KERN_WARNING "%s: could not add regulator_dev"
695 " load sysfs\n", __func__);
696 goto attr_name_err;
697 }
698
699 /* also add a link to the device sysfs entry */
700 size = scnprintf(buf, REG_STR_SIZE, "%s-%s",
701 dev->kobj.name, supply_name);
702 if (size >= REG_STR_SIZE)
703 goto attr_err;
704
705 regulator->supply_name = kstrdup(buf, GFP_KERNEL);
706 if (regulator->supply_name == NULL)
707 goto attr_err;
708
709 err = sysfs_create_link(&rdev->dev.kobj, &dev->kobj,
710 buf);
711 if (err) {
712 printk(KERN_WARNING
713 "%s: could not add device link %s err %d\n",
714 __func__, dev->kobj.name, err);
715 device_remove_file(dev, &regulator->dev_attr);
716 goto link_name_err;
717 }
718 }
719 mutex_unlock(&rdev->mutex);
720 return regulator;
721link_name_err:
722 kfree(regulator->supply_name);
723attr_err:
724 device_remove_file(regulator->dev, &regulator->dev_attr);
725attr_name_err:
726 kfree(regulator->dev_attr.attr.name);
727overflow_err:
728 list_del(&regulator->list);
729 kfree(regulator);
730 mutex_unlock(&rdev->mutex);
731 return NULL;
732}
733
734/**
735 * regulator_get - lookup and obtain a reference to a regulator.
736 * @dev: device for regulator "consumer"
737 * @id: Supply name or regulator ID.
738 *
739 * Returns a struct regulator corresponding to the regulator producer,
740 * or IS_ERR() condition containing errno. Use of supply names
741 * configured via regulator_set_device_supply() is strongly
742 * encouraged.
743 */
744struct regulator *regulator_get(struct device *dev, const char *id)
745{
746 struct regulator_dev *rdev;
747 struct regulator_map *map;
748 struct regulator *regulator = ERR_PTR(-ENODEV);
749 const char *supply = id;
750
751 if (id == NULL) {
752 printk(KERN_ERR "regulator: get() with no identifier\n");
753 return regulator;
754 }
755
756 mutex_lock(&regulator_list_mutex);
757
758 list_for_each_entry(map, &regulator_map_list, list) {
759 if (dev == map->dev &&
760 strcmp(map->supply, id) == 0) {
761 supply = map->regulator;
762 break;
763 }
764 }
765
766 list_for_each_entry(rdev, &regulator_list, list) {
767 if (strcmp(supply, rdev->desc->name) == 0 &&
768 try_module_get(rdev->owner))
769 goto found;
770 }
771 printk(KERN_ERR "regulator: Unable to get requested regulator: %s\n",
772 id);
773 mutex_unlock(&regulator_list_mutex);
774 return regulator;
775
776found:
777 regulator = create_regulator(rdev, dev, id);
778 if (regulator == NULL) {
779 regulator = ERR_PTR(-ENOMEM);
780 module_put(rdev->owner);
781 }
782
783 mutex_unlock(&regulator_list_mutex);
784 return regulator;
785}
786EXPORT_SYMBOL_GPL(regulator_get);
787
788/**
789 * regulator_put - "free" the regulator source
790 * @regulator: regulator source
791 *
792 * Note: drivers must ensure that all regulator_enable calls made on this
793 * regulator source are balanced by regulator_disable calls prior to calling
794 * this function.
795 */
796void regulator_put(struct regulator *regulator)
797{
798 struct regulator_dev *rdev;
799
800 if (regulator == NULL || IS_ERR(regulator))
801 return;
802
803 if (regulator->enabled) {
804 printk(KERN_WARNING "Releasing supply %s while enabled\n",
805 regulator->supply_name);
806 WARN_ON(regulator->enabled);
807 regulator_disable(regulator);
808 }
809
810 mutex_lock(&regulator_list_mutex);
811 rdev = regulator->rdev;
812
813 /* remove any sysfs entries */
814 if (regulator->dev) {
815 sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name);
816 kfree(regulator->supply_name);
817 device_remove_file(regulator->dev, &regulator->dev_attr);
818 kfree(regulator->dev_attr.attr.name);
819 }
820 list_del(&regulator->list);
821 kfree(regulator);
822
823 module_put(rdev->owner);
824 mutex_unlock(&regulator_list_mutex);
825}
826EXPORT_SYMBOL_GPL(regulator_put);
827
828/* locks held by regulator_enable() */
829static int _regulator_enable(struct regulator_dev *rdev)
830{
831 int ret = -EINVAL;
832
833 if (!rdev->constraints) {
834 printk(KERN_ERR "%s: %s has no constraints\n",
835 __func__, rdev->desc->name);
836 return ret;
837 }
838
839 /* do we need to enable the supply regulator first */
840 if (rdev->supply) {
841 ret = _regulator_enable(rdev->supply);
842 if (ret < 0) {
843 printk(KERN_ERR "%s: failed to enable %s: %d\n",
844 __func__, rdev->desc->name, ret);
845 return ret;
846 }
847 }
848
849 /* check voltage and requested load before enabling */
850 if (rdev->desc->ops->enable) {
851
852 if (rdev->constraints &&
853 (rdev->constraints->valid_ops_mask &
854 REGULATOR_CHANGE_DRMS))
855 drms_uA_update(rdev);
856
857 ret = rdev->desc->ops->enable(rdev);
858 if (ret < 0) {
859 printk(KERN_ERR "%s: failed to enable %s: %d\n",
860 __func__, rdev->desc->name, ret);
861 return ret;
862 }
863 rdev->use_count++;
864 return ret;
865 }
866
867 return ret;
868}
869
870/**
871 * regulator_enable - enable regulator output
872 * @regulator: regulator source
873 *
874 * Enable the regulator output at the predefined voltage or current value.
875 * NOTE: the output value can be set by other drivers, boot loader or may be
876 * hardwired in the regulator.
877 * NOTE: calls to regulator_enable() must be balanced with calls to
878 * regulator_disable().
879 */
880int regulator_enable(struct regulator *regulator)
881{
882 int ret;
883
884 if (regulator->enabled) {
885 printk(KERN_CRIT "Regulator %s already enabled\n",
886 regulator->supply_name);
887 WARN_ON(regulator->enabled);
888 return 0;
889 }
890
891 mutex_lock(&regulator->rdev->mutex);
892 regulator->enabled = 1;
893 ret = _regulator_enable(regulator->rdev);
894 if (ret != 0)
895 regulator->enabled = 0;
896 mutex_unlock(&regulator->rdev->mutex);
897 return ret;
898}
899EXPORT_SYMBOL_GPL(regulator_enable);
900
901/* locks held by regulator_disable() */
902static int _regulator_disable(struct regulator_dev *rdev)
903{
904 int ret = 0;
905
906 /* are we the last user and permitted to disable ? */
907 if (rdev->use_count == 1 && !rdev->constraints->always_on) {
908
909 /* we are last user */
910 if (rdev->desc->ops->disable) {
911 ret = rdev->desc->ops->disable(rdev);
912 if (ret < 0) {
913 printk(KERN_ERR "%s: failed to disable %s\n",
914 __func__, rdev->desc->name);
915 return ret;
916 }
917 }
918
919 /* decrease our supplies ref count and disable if required */
920 if (rdev->supply)
921 _regulator_disable(rdev->supply);
922
923 rdev->use_count = 0;
924 } else if (rdev->use_count > 1) {
925
926 if (rdev->constraints &&
927 (rdev->constraints->valid_ops_mask &
928 REGULATOR_CHANGE_DRMS))
929 drms_uA_update(rdev);
930
931 rdev->use_count--;
932 }
933 return ret;
934}
935
936/**
937 * regulator_disable - disable regulator output
938 * @regulator: regulator source
939 *
940 * Disable the regulator output voltage or current.
941 * NOTE: this will only disable the regulator output if no other consumer
942 * devices have it enabled.
943 * NOTE: calls to regulator_enable() must be balanced with calls to
944 * regulator_disable().
945 */
946int regulator_disable(struct regulator *regulator)
947{
948 int ret;
949
950 if (!regulator->enabled) {
951 printk(KERN_ERR "%s: not in use by this consumer\n",
952 __func__);
953 return 0;
954 }
955
956 mutex_lock(&regulator->rdev->mutex);
957 regulator->enabled = 0;
958 regulator->uA_load = 0;
959 ret = _regulator_disable(regulator->rdev);
960 mutex_unlock(&regulator->rdev->mutex);
961 return ret;
962}
963EXPORT_SYMBOL_GPL(regulator_disable);
964
965/* locks held by regulator_force_disable() */
966static int _regulator_force_disable(struct regulator_dev *rdev)
967{
968 int ret = 0;
969
970 /* force disable */
971 if (rdev->desc->ops->disable) {
972 /* ah well, who wants to live forever... */
973 ret = rdev->desc->ops->disable(rdev);
974 if (ret < 0) {
975 printk(KERN_ERR "%s: failed to force disable %s\n",
976 __func__, rdev->desc->name);
977 return ret;
978 }
979 /* notify other consumers that power has been forced off */
980 _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE,
981 NULL);
982 }
983
984 /* decrease our supplies ref count and disable if required */
985 if (rdev->supply)
986 _regulator_disable(rdev->supply);
987
988 rdev->use_count = 0;
989 return ret;
990}
991
992/**
993 * regulator_force_disable - force disable regulator output
994 * @regulator: regulator source
995 *
996 * Forcibly disable the regulator output voltage or current.
997 * NOTE: this *will* disable the regulator output even if other consumer
998 * devices have it enabled. This should be used for situations when device
999 * damage will likely occur if the regulator is not disabled (e.g. over temp).
1000 */
1001int regulator_force_disable(struct regulator *regulator)
1002{
1003 int ret;
1004
1005 mutex_lock(&regulator->rdev->mutex);
1006 regulator->enabled = 0;
1007 regulator->uA_load = 0;
1008 ret = _regulator_force_disable(regulator->rdev);
1009 mutex_unlock(&regulator->rdev->mutex);
1010 return ret;
1011}
1012EXPORT_SYMBOL_GPL(regulator_force_disable);
1013
1014static int _regulator_is_enabled(struct regulator_dev *rdev)
1015{
1016 int ret;
1017
1018 mutex_lock(&rdev->mutex);
1019
1020 /* sanity check */
1021 if (!rdev->desc->ops->is_enabled) {
1022 ret = -EINVAL;
1023 goto out;
1024 }
1025
1026 ret = rdev->desc->ops->is_enabled(rdev);
1027out:
1028 mutex_unlock(&rdev->mutex);
1029 return ret;
1030}
1031
1032/**
1033 * regulator_is_enabled - is the regulator output enabled
1034 * @regulator: regulator source
1035 *
1036 * Returns zero for disabled otherwise return number of enable requests.
1037 */
1038int regulator_is_enabled(struct regulator *regulator)
1039{
1040 return _regulator_is_enabled(regulator->rdev);
1041}
1042EXPORT_SYMBOL_GPL(regulator_is_enabled);
1043
1044/**
1045 * regulator_set_voltage - set regulator output voltage
1046 * @regulator: regulator source
1047 * @min_uV: Minimum required voltage in uV
1048 * @max_uV: Maximum acceptable voltage in uV
1049 *
1050 * Sets a voltage regulator to the desired output voltage. This can be set
1051 * during any regulator state. IOW, regulator can be disabled or enabled.
1052 *
1053 * If the regulator is enabled then the voltage will change to the new value
1054 * immediately otherwise if the regulator is disabled the regulator will
1055 * output at the new voltage when enabled.
1056 *
1057 * NOTE: If the regulator is shared between several devices then the lowest
1058 * request voltage that meets the system constraints will be used.
1059 * NOTE: Regulator system constraints must be set for this regulator before
1060 * calling this function otherwise this call will fail.
1061 */
1062int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV)
1063{
1064 struct regulator_dev *rdev = regulator->rdev;
1065 int ret;
1066
1067 mutex_lock(&rdev->mutex);
1068
1069 /* sanity check */
1070 if (!rdev->desc->ops->set_voltage) {
1071 ret = -EINVAL;
1072 goto out;
1073 }
1074
1075 /* constraints check */
1076 ret = regulator_check_voltage(rdev, &min_uV, &max_uV);
1077 if (ret < 0)
1078 goto out;
1079 regulator->min_uV = min_uV;
1080 regulator->max_uV = max_uV;
1081 ret = rdev->desc->ops->set_voltage(rdev, min_uV, max_uV);
1082
1083out:
1084 mutex_unlock(&rdev->mutex);
1085 return ret;
1086}
1087EXPORT_SYMBOL_GPL(regulator_set_voltage);
1088
1089static int _regulator_get_voltage(struct regulator_dev *rdev)
1090{
1091 /* sanity check */
1092 if (rdev->desc->ops->get_voltage)
1093 return rdev->desc->ops->get_voltage(rdev);
1094 else
1095 return -EINVAL;
1096}
1097
1098/**
1099 * regulator_get_voltage - get regulator output voltage
1100 * @regulator: regulator source
1101 *
1102 * This returns the current regulator voltage in uV.
1103 *
1104 * NOTE: If the regulator is disabled it will return the voltage value. This
1105 * function should not be used to determine regulator state.
1106 */
1107int regulator_get_voltage(struct regulator *regulator)
1108{
1109 int ret;
1110
1111 mutex_lock(&regulator->rdev->mutex);
1112
1113 ret = _regulator_get_voltage(regulator->rdev);
1114
1115 mutex_unlock(&regulator->rdev->mutex);
1116
1117 return ret;
1118}
1119EXPORT_SYMBOL_GPL(regulator_get_voltage);
1120
1121/**
1122 * regulator_set_current_limit - set regulator output current limit
1123 * @regulator: regulator source
1124 * @min_uA: Minimuum supported current in uA
1125 * @max_uA: Maximum supported current in uA
1126 *
1127 * Sets current sink to the desired output current. This can be set during
1128 * any regulator state. IOW, regulator can be disabled or enabled.
1129 *
1130 * If the regulator is enabled then the current will change to the new value
1131 * immediately otherwise if the regulator is disabled the regulator will
1132 * output at the new current when enabled.
1133 *
1134 * NOTE: Regulator system constraints must be set for this regulator before
1135 * calling this function otherwise this call will fail.
1136 */
1137int regulator_set_current_limit(struct regulator *regulator,
1138 int min_uA, int max_uA)
1139{
1140 struct regulator_dev *rdev = regulator->rdev;
1141 int ret;
1142
1143 mutex_lock(&rdev->mutex);
1144
1145 /* sanity check */
1146 if (!rdev->desc->ops->set_current_limit) {
1147 ret = -EINVAL;
1148 goto out;
1149 }
1150
1151 /* constraints check */
1152 ret = regulator_check_current_limit(rdev, &min_uA, &max_uA);
1153 if (ret < 0)
1154 goto out;
1155
1156 ret = rdev->desc->ops->set_current_limit(rdev, min_uA, max_uA);
1157out:
1158 mutex_unlock(&rdev->mutex);
1159 return ret;
1160}
1161EXPORT_SYMBOL_GPL(regulator_set_current_limit);
1162
1163static int _regulator_get_current_limit(struct regulator_dev *rdev)
1164{
1165 int ret;
1166
1167 mutex_lock(&rdev->mutex);
1168
1169 /* sanity check */
1170 if (!rdev->desc->ops->get_current_limit) {
1171 ret = -EINVAL;
1172 goto out;
1173 }
1174
1175 ret = rdev->desc->ops->get_current_limit(rdev);
1176out:
1177 mutex_unlock(&rdev->mutex);
1178 return ret;
1179}
1180
1181/**
1182 * regulator_get_current_limit - get regulator output current
1183 * @regulator: regulator source
1184 *
1185 * This returns the current supplied by the specified current sink in uA.
1186 *
1187 * NOTE: If the regulator is disabled it will return the current value. This
1188 * function should not be used to determine regulator state.
1189 */
1190int regulator_get_current_limit(struct regulator *regulator)
1191{
1192 return _regulator_get_current_limit(regulator->rdev);
1193}
1194EXPORT_SYMBOL_GPL(regulator_get_current_limit);
1195
1196/**
1197 * regulator_set_mode - set regulator operating mode
1198 * @regulator: regulator source
1199 * @mode: operating mode - one of the REGULATOR_MODE constants
1200 *
1201 * Set regulator operating mode to increase regulator efficiency or improve
1202 * regulation performance.
1203 *
1204 * NOTE: Regulator system constraints must be set for this regulator before
1205 * calling this function otherwise this call will fail.
1206 */
1207int regulator_set_mode(struct regulator *regulator, unsigned int mode)
1208{
1209 struct regulator_dev *rdev = regulator->rdev;
1210 int ret;
1211
1212 mutex_lock(&rdev->mutex);
1213
1214 /* sanity check */
1215 if (!rdev->desc->ops->set_mode) {
1216 ret = -EINVAL;
1217 goto out;
1218 }
1219
1220 /* constraints check */
1221 ret = regulator_check_mode(rdev, mode);
1222 if (ret < 0)
1223 goto out;
1224
1225 ret = rdev->desc->ops->set_mode(rdev, mode);
1226out:
1227 mutex_unlock(&rdev->mutex);
1228 return ret;
1229}
1230EXPORT_SYMBOL_GPL(regulator_set_mode);
1231
1232static unsigned int _regulator_get_mode(struct regulator_dev *rdev)
1233{
1234 int ret;
1235
1236 mutex_lock(&rdev->mutex);
1237
1238 /* sanity check */
1239 if (!rdev->desc->ops->get_mode) {
1240 ret = -EINVAL;
1241 goto out;
1242 }
1243
1244 ret = rdev->desc->ops->get_mode(rdev);
1245out:
1246 mutex_unlock(&rdev->mutex);
1247 return ret;
1248}
1249
1250/**
1251 * regulator_get_mode - get regulator operating mode
1252 * @regulator: regulator source
1253 *
1254 * Get the current regulator operating mode.
1255 */
1256unsigned int regulator_get_mode(struct regulator *regulator)
1257{
1258 return _regulator_get_mode(regulator->rdev);
1259}
1260EXPORT_SYMBOL_GPL(regulator_get_mode);
1261
1262/**
1263 * regulator_set_optimum_mode - set regulator optimum operating mode
1264 * @regulator: regulator source
1265 * @uA_load: load current
1266 *
1267 * Notifies the regulator core of a new device load. This is then used by
1268 * DRMS (if enabled by constraints) to set the most efficient regulator
1269 * operating mode for the new regulator loading.
1270 *
1271 * Consumer devices notify their supply regulator of the maximum power
1272 * they will require (can be taken from device datasheet in the power
1273 * consumption tables) when they change operational status and hence power
1274 * state. Examples of operational state changes that can affect power
1275 * consumption are :-
1276 *
1277 * o Device is opened / closed.
1278 * o Device I/O is about to begin or has just finished.
1279 * o Device is idling in between work.
1280 *
1281 * This information is also exported via sysfs to userspace.
1282 *
1283 * DRMS will sum the total requested load on the regulator and change
1284 * to the most efficient operating mode if platform constraints allow.
1285 *
1286 * Returns the new regulator mode or error.
1287 */
1288int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
1289{
1290 struct regulator_dev *rdev = regulator->rdev;
1291 struct regulator *consumer;
1292 int ret, output_uV, input_uV, total_uA_load = 0;
1293 unsigned int mode;
1294
1295 mutex_lock(&rdev->mutex);
1296
1297 regulator->uA_load = uA_load;
1298 ret = regulator_check_drms(rdev);
1299 if (ret < 0)
1300 goto out;
1301 ret = -EINVAL;
1302
1303 /* sanity check */
1304 if (!rdev->desc->ops->get_optimum_mode)
1305 goto out;
1306
1307 /* get output voltage */
1308 output_uV = rdev->desc->ops->get_voltage(rdev);
1309 if (output_uV <= 0) {
1310 printk(KERN_ERR "%s: invalid output voltage found for %s\n",
1311 __func__, rdev->desc->name);
1312 goto out;
1313 }
1314
1315 /* get input voltage */
1316 if (rdev->supply && rdev->supply->desc->ops->get_voltage)
1317 input_uV = rdev->supply->desc->ops->get_voltage(rdev->supply);
1318 else
1319 input_uV = rdev->constraints->input_uV;
1320 if (input_uV <= 0) {
1321 printk(KERN_ERR "%s: invalid input voltage found for %s\n",
1322 __func__, rdev->desc->name);
1323 goto out;
1324 }
1325
1326 /* calc total requested load for this regulator */
1327 list_for_each_entry(consumer, &rdev->consumer_list, list)
1328 total_uA_load += consumer->uA_load;
1329
1330 mode = rdev->desc->ops->get_optimum_mode(rdev,
1331 input_uV, output_uV,
1332 total_uA_load);
1333 if (ret <= 0) {
1334 printk(KERN_ERR "%s: failed to get optimum mode for %s @"
1335 " %d uA %d -> %d uV\n", __func__, rdev->desc->name,
1336 total_uA_load, input_uV, output_uV);
1337 goto out;
1338 }
1339
1340 ret = rdev->desc->ops->set_mode(rdev, mode);
1341 if (ret <= 0) {
1342 printk(KERN_ERR "%s: failed to set optimum mode %x for %s\n",
1343 __func__, mode, rdev->desc->name);
1344 goto out;
1345 }
1346 ret = mode;
1347out:
1348 mutex_unlock(&rdev->mutex);
1349 return ret;
1350}
1351EXPORT_SYMBOL_GPL(regulator_set_optimum_mode);
1352
1353/**
1354 * regulator_register_notifier - register regulator event notifier
1355 * @regulator: regulator source
1356 * @notifier_block: notifier block
1357 *
1358 * Register notifier block to receive regulator events.
1359 */
1360int regulator_register_notifier(struct regulator *regulator,
1361 struct notifier_block *nb)
1362{
1363 return blocking_notifier_chain_register(&regulator->rdev->notifier,
1364 nb);
1365}
1366EXPORT_SYMBOL_GPL(regulator_register_notifier);
1367
1368/**
1369 * regulator_unregister_notifier - unregister regulator event notifier
1370 * @regulator: regulator source
1371 * @notifier_block: notifier block
1372 *
1373 * Unregister regulator event notifier block.
1374 */
1375int regulator_unregister_notifier(struct regulator *regulator,
1376 struct notifier_block *nb)
1377{
1378 return blocking_notifier_chain_unregister(&regulator->rdev->notifier,
1379 nb);
1380}
1381EXPORT_SYMBOL_GPL(regulator_unregister_notifier);
1382
1383/* notify regulator consumers and downstream regulator consumers */
1384static void _notifier_call_chain(struct regulator_dev *rdev,
1385 unsigned long event, void *data)
1386{
1387 struct regulator_dev *_rdev;
1388
1389 /* call rdev chain first */
1390 mutex_lock(&rdev->mutex);
1391 blocking_notifier_call_chain(&rdev->notifier, event, NULL);
1392 mutex_unlock(&rdev->mutex);
1393
1394 /* now notify regulator we supply */
1395 list_for_each_entry(_rdev, &rdev->supply_list, slist)
1396 _notifier_call_chain(_rdev, event, data);
1397}
1398
1399/**
1400 * regulator_bulk_get - get multiple regulator consumers
1401 *
1402 * @dev: Device to supply
1403 * @num_consumers: Number of consumers to register
1404 * @consumers: Configuration of consumers; clients are stored here.
1405 *
1406 * @return 0 on success, an errno on failure.
1407 *
1408 * This helper function allows drivers to get several regulator
1409 * consumers in one operation. If any of the regulators cannot be
1410 * acquired then any regulators that were allocated will be freed
1411 * before returning to the caller.
1412 */
1413int regulator_bulk_get(struct device *dev, int num_consumers,
1414 struct regulator_bulk_data *consumers)
1415{
1416 int i;
1417 int ret;
1418
1419 for (i = 0; i < num_consumers; i++)
1420 consumers[i].consumer = NULL;
1421
1422 for (i = 0; i < num_consumers; i++) {
1423 consumers[i].consumer = regulator_get(dev,
1424 consumers[i].supply);
1425 if (IS_ERR(consumers[i].consumer)) {
1426 dev_err(dev, "Failed to get supply '%s'\n",
1427 consumers[i].supply);
1428 ret = PTR_ERR(consumers[i].consumer);
1429 consumers[i].consumer = NULL;
1430 goto err;
1431 }
1432 }
1433
1434 return 0;
1435
1436err:
1437 for (i = 0; i < num_consumers && consumers[i].consumer; i++)
1438 regulator_put(consumers[i].consumer);
1439
1440 return ret;
1441}
1442EXPORT_SYMBOL_GPL(regulator_bulk_get);
1443
1444/**
1445 * regulator_bulk_enable - enable multiple regulator consumers
1446 *
1447 * @num_consumers: Number of consumers
1448 * @consumers: Consumer data; clients are stored here.
1449 * @return 0 on success, an errno on failure
1450 *
1451 * This convenience API allows consumers to enable multiple regulator
1452 * clients in a single API call. If any consumers cannot be enabled
1453 * then any others that were enabled will be disabled again prior to
1454 * return.
1455 */
1456int regulator_bulk_enable(int num_consumers,
1457 struct regulator_bulk_data *consumers)
1458{
1459 int i;
1460 int ret;
1461
1462 for (i = 0; i < num_consumers; i++) {
1463 ret = regulator_enable(consumers[i].consumer);
1464 if (ret != 0)
1465 goto err;
1466 }
1467
1468 return 0;
1469
1470err:
1471 printk(KERN_ERR "Failed to enable %s\n", consumers[i].supply);
1472 for (i = 0; i < num_consumers; i++)
1473 regulator_disable(consumers[i].consumer);
1474
1475 return ret;
1476}
1477EXPORT_SYMBOL_GPL(regulator_bulk_enable);
1478
1479/**
1480 * regulator_bulk_disable - disable multiple regulator consumers
1481 *
1482 * @num_consumers: Number of consumers
1483 * @consumers: Consumer data; clients are stored here.
1484 * @return 0 on success, an errno on failure
1485 *
1486 * This convenience API allows consumers to disable multiple regulator
1487 * clients in a single API call. If any consumers cannot be enabled
1488 * then any others that were disabled will be disabled again prior to
1489 * return.
1490 */
1491int regulator_bulk_disable(int num_consumers,
1492 struct regulator_bulk_data *consumers)
1493{
1494 int i;
1495 int ret;
1496
1497 for (i = 0; i < num_consumers; i++) {
1498 ret = regulator_disable(consumers[i].consumer);
1499 if (ret != 0)
1500 goto err;
1501 }
1502
1503 return 0;
1504
1505err:
1506 printk(KERN_ERR "Failed to disable %s\n", consumers[i].supply);
1507 for (i = 0; i < num_consumers; i++)
1508 regulator_enable(consumers[i].consumer);
1509
1510 return ret;
1511}
1512EXPORT_SYMBOL_GPL(regulator_bulk_disable);
1513
1514/**
1515 * regulator_bulk_free - free multiple regulator consumers
1516 *
1517 * @num_consumers: Number of consumers
1518 * @consumers: Consumer data; clients are stored here.
1519 *
1520 * This convenience API allows consumers to free multiple regulator
1521 * clients in a single API call.
1522 */
1523void regulator_bulk_free(int num_consumers,
1524 struct regulator_bulk_data *consumers)
1525{
1526 int i;
1527
1528 for (i = 0; i < num_consumers; i++) {
1529 regulator_put(consumers[i].consumer);
1530 consumers[i].consumer = NULL;
1531 }
1532}
1533EXPORT_SYMBOL_GPL(regulator_bulk_free);
1534
1535/**
1536 * regulator_notifier_call_chain - call regulator event notifier
1537 * @regulator: regulator source
1538 * @event: notifier block
1539 * @data:
1540 *
1541 * Called by regulator drivers to notify clients a regulator event has
1542 * occurred. We also notify regulator clients downstream.
1543 */
1544int regulator_notifier_call_chain(struct regulator_dev *rdev,
1545 unsigned long event, void *data)
1546{
1547 _notifier_call_chain(rdev, event, data);
1548 return NOTIFY_DONE;
1549
1550}
1551EXPORT_SYMBOL_GPL(regulator_notifier_call_chain);
1552
1553/**
1554 * regulator_register - register regulator
1555 * @regulator: regulator source
1556 * @reg_data: private regulator data
1557 *
1558 * Called by regulator drivers to register a regulator.
1559 * Returns 0 on success.
1560 */
1561struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
1562 void *reg_data)
1563{
1564 static atomic_t regulator_no = ATOMIC_INIT(0);
1565 struct regulator_dev *rdev;
1566 int ret;
1567
1568 if (regulator_desc == NULL)
1569 return ERR_PTR(-EINVAL);
1570
1571 if (regulator_desc->name == NULL || regulator_desc->ops == NULL)
1572 return ERR_PTR(-EINVAL);
1573
1574 if (!regulator_desc->type == REGULATOR_VOLTAGE &&
1575 !regulator_desc->type == REGULATOR_CURRENT)
1576 return ERR_PTR(-EINVAL);
1577
1578 rdev = kzalloc(sizeof(struct regulator_dev), GFP_KERNEL);
1579 if (rdev == NULL)
1580 return ERR_PTR(-ENOMEM);
1581
1582 mutex_lock(&regulator_list_mutex);
1583
1584 mutex_init(&rdev->mutex);
1585 rdev->reg_data = reg_data;
1586 rdev->owner = regulator_desc->owner;
1587 rdev->desc = regulator_desc;
1588 INIT_LIST_HEAD(&rdev->consumer_list);
1589 INIT_LIST_HEAD(&rdev->supply_list);
1590 INIT_LIST_HEAD(&rdev->list);
1591 INIT_LIST_HEAD(&rdev->slist);
1592 BLOCKING_INIT_NOTIFIER_HEAD(&rdev->notifier);
1593
1594 rdev->dev.class = &regulator_class;
1595 device_initialize(&rdev->dev);
1596 snprintf(rdev->dev.bus_id, sizeof(rdev->dev.bus_id),
1597 "regulator_%ld_%s",
1598 (unsigned long)atomic_inc_return(&regulator_no) - 1,
1599 regulator_desc->name);
1600
1601 ret = device_add(&rdev->dev);
1602 if (ret == 0)
1603 list_add(&rdev->list, &regulator_list);
1604 else {
1605 kfree(rdev);
1606 rdev = ERR_PTR(ret);
1607 }
1608 mutex_unlock(&regulator_list_mutex);
1609 return rdev;
1610}
1611EXPORT_SYMBOL_GPL(regulator_register);
1612
1613/**
1614 * regulator_unregister - unregister regulator
1615 * @regulator: regulator source
1616 *
1617 * Called by regulator drivers to unregister a regulator.
1618 */
1619void regulator_unregister(struct regulator_dev *rdev)
1620{
1621 if (rdev == NULL)
1622 return;
1623
1624 mutex_lock(&regulator_list_mutex);
1625 list_del(&rdev->list);
1626 if (rdev->supply)
1627 sysfs_remove_link(&rdev->dev.kobj, "supply");
1628 device_unregister(&rdev->dev);
1629 mutex_unlock(&regulator_list_mutex);
1630}
1631EXPORT_SYMBOL_GPL(regulator_unregister);
1632
1633/**
1634 * regulator_set_supply - set regulator supply regulator
1635 * @regulator: regulator name
1636 * @supply: supply regulator name
1637 *
1638 * Called by platform initialisation code to set the supply regulator for this
1639 * regulator. This ensures that a regulators supply will also be enabled by the
1640 * core if it's child is enabled.
1641 */
1642int regulator_set_supply(const char *regulator, const char *supply)
1643{
1644 struct regulator_dev *rdev, *supply_rdev;
1645 int err;
1646
1647 if (regulator == NULL || supply == NULL)
1648 return -EINVAL;
1649
1650 mutex_lock(&regulator_list_mutex);
1651
1652 list_for_each_entry(rdev, &regulator_list, list) {
1653 if (!strcmp(rdev->desc->name, regulator))
1654 goto found_regulator;
1655 }
1656 mutex_unlock(&regulator_list_mutex);
1657 return -ENODEV;
1658
1659found_regulator:
1660 list_for_each_entry(supply_rdev, &regulator_list, list) {
1661 if (!strcmp(supply_rdev->desc->name, supply))
1662 goto found_supply;
1663 }
1664 mutex_unlock(&regulator_list_mutex);
1665 return -ENODEV;
1666
1667found_supply:
1668 err = sysfs_create_link(&rdev->dev.kobj, &supply_rdev->dev.kobj,
1669 "supply");
1670 if (err) {
1671 printk(KERN_ERR
1672 "%s: could not add device link %s err %d\n",
1673 __func__, supply_rdev->dev.kobj.name, err);
1674 goto out;
1675 }
1676 rdev->supply = supply_rdev;
1677 list_add(&rdev->slist, &supply_rdev->supply_list);
1678out:
1679 mutex_unlock(&regulator_list_mutex);
1680 return err;
1681}
1682EXPORT_SYMBOL_GPL(regulator_set_supply);
1683
1684/**
1685 * regulator_get_supply - get regulator supply regulator
1686 * @regulator: regulator name
1687 *
1688 * Returns the supply supply regulator name or NULL if no supply regulator
1689 * exists (i.e the regulator is supplied directly from USB, Line, Battery, etc)
1690 */
1691const char *regulator_get_supply(const char *regulator)
1692{
1693 struct regulator_dev *rdev;
1694
1695 if (regulator == NULL)
1696 return NULL;
1697
1698 mutex_lock(&regulator_list_mutex);
1699 list_for_each_entry(rdev, &regulator_list, list) {
1700 if (!strcmp(rdev->desc->name, regulator))
1701 goto found;
1702 }
1703 mutex_unlock(&regulator_list_mutex);
1704 return NULL;
1705
1706found:
1707 mutex_unlock(&regulator_list_mutex);
1708 if (rdev->supply)
1709 return rdev->supply->desc->name;
1710 else
1711 return NULL;
1712}
1713EXPORT_SYMBOL_GPL(regulator_get_supply);
1714
1715/**
1716 * regulator_set_machine_constraints - sets regulator constraints
1717 * @regulator: regulator source
1718 *
1719 * Allows platform initialisation code to define and constrain
1720 * regulator circuits e.g. valid voltage/current ranges, etc. NOTE:
1721 * Constraints *must* be set by platform code in order for some
1722 * regulator operations to proceed i.e. set_voltage, set_current_limit,
1723 * set_mode.
1724 */
1725int regulator_set_machine_constraints(const char *regulator_name,
1726 struct regulation_constraints *constraints)
1727{
1728 struct regulator_dev *rdev;
1729 int ret = 0;
1730
1731 if (regulator_name == NULL)
1732 return -EINVAL;
1733
1734 mutex_lock(&regulator_list_mutex);
1735
1736 list_for_each_entry(rdev, &regulator_list, list) {
1737 if (!strcmp(regulator_name, rdev->desc->name))
1738 goto found;
1739 }
1740 ret = -ENODEV;
1741 goto out;
1742
1743found:
1744 mutex_lock(&rdev->mutex);
1745 rdev->constraints = constraints;
1746
1747 /* do we need to apply the constraint voltage */
1748 if (rdev->constraints->apply_uV &&
1749 rdev->constraints->min_uV == rdev->constraints->max_uV &&
1750 rdev->desc->ops->set_voltage) {
1751 ret = rdev->desc->ops->set_voltage(rdev,
1752 rdev->constraints->min_uV, rdev->constraints->max_uV);
1753 if (ret < 0) {
1754 printk(KERN_ERR "%s: failed to apply %duV"
1755 " constraint\n", __func__,
1756 rdev->constraints->min_uV);
1757 rdev->constraints = NULL;
1758 goto out;
1759 }
1760 }
1761
1762 /* are we enabled at boot time by firmware / bootloader */
1763 if (rdev->constraints->boot_on)
1764 rdev->use_count = 1;
1765
1766 /* do we need to setup our suspend state */
1767 if (constraints->initial_state)
1768 ret = suspend_prepare(rdev, constraints->initial_state);
1769
1770 print_constraints(rdev);
1771 mutex_unlock(&rdev->mutex);
1772
1773out:
1774 mutex_unlock(&regulator_list_mutex);
1775 return ret;
1776}
1777EXPORT_SYMBOL_GPL(regulator_set_machine_constraints);
1778
1779
1780/**
1781 * regulator_set_device_supply: Bind a regulator to a symbolic supply
1782 * @regulator: regulator source
1783 * @dev: device the supply applies to
1784 * @supply: symbolic name for supply
1785 *
1786 * Allows platform initialisation code to map physical regulator
1787 * sources to symbolic names for supplies for use by devices. Devices
1788 * should use these symbolic names to request regulators, avoiding the
1789 * need to provide board-specific regulator names as platform data.
1790 */
1791int regulator_set_device_supply(const char *regulator, struct device *dev,
1792 const char *supply)
1793{
1794 struct regulator_map *node;
1795
1796 if (regulator == NULL || supply == NULL)
1797 return -EINVAL;
1798
1799 node = kmalloc(sizeof(struct regulator_map), GFP_KERNEL);
1800 if (node == NULL)
1801 return -ENOMEM;
1802
1803 node->regulator = regulator;
1804 node->dev = dev;
1805 node->supply = supply;
1806
1807 mutex_lock(&regulator_list_mutex);
1808 list_add(&node->list, &regulator_map_list);
1809 mutex_unlock(&regulator_list_mutex);
1810 return 0;
1811}
1812EXPORT_SYMBOL_GPL(regulator_set_device_supply);
1813
1814/**
1815 * regulator_suspend_prepare: prepare regulators for system wide suspend
1816 * @state: system suspend state
1817 *
1818 * Configure each regulator with it's suspend operating parameters for state.
1819 * This will usually be called by machine suspend code prior to supending.
1820 */
1821int regulator_suspend_prepare(suspend_state_t state)
1822{
1823 struct regulator_dev *rdev;
1824 int ret = 0;
1825
1826 /* ON is handled by regulator active state */
1827 if (state == PM_SUSPEND_ON)
1828 return -EINVAL;
1829
1830 mutex_lock(&regulator_list_mutex);
1831 list_for_each_entry(rdev, &regulator_list, list) {
1832
1833 mutex_lock(&rdev->mutex);
1834 ret = suspend_prepare(rdev, state);
1835 mutex_unlock(&rdev->mutex);
1836
1837 if (ret < 0) {
1838 printk(KERN_ERR "%s: failed to prepare %s\n",
1839 __func__, rdev->desc->name);
1840 goto out;
1841 }
1842 }
1843out:
1844 mutex_unlock(&regulator_list_mutex);
1845 return ret;
1846}
1847EXPORT_SYMBOL_GPL(regulator_suspend_prepare);
1848
1849/**
1850 * rdev_get_drvdata - get rdev regulator driver data
1851 * @regulator: regulator
1852 *
1853 * Get rdev regulator driver private data. This call can be used in the
1854 * regulator driver context.
1855 */
1856void *rdev_get_drvdata(struct regulator_dev *rdev)
1857{
1858 return rdev->reg_data;
1859}
1860EXPORT_SYMBOL_GPL(rdev_get_drvdata);
1861
1862/**
1863 * regulator_get_drvdata - get regulator driver data
1864 * @regulator: regulator
1865 *
1866 * Get regulator driver private data. This call can be used in the consumer
1867 * driver context when non API regulator specific functions need to be called.
1868 */
1869void *regulator_get_drvdata(struct regulator *regulator)
1870{
1871 return regulator->rdev->reg_data;
1872}
1873EXPORT_SYMBOL_GPL(regulator_get_drvdata);
1874
1875/**
1876 * regulator_set_drvdata - set regulator driver data
1877 * @regulator: regulator
1878 * @data: data
1879 */
1880void regulator_set_drvdata(struct regulator *regulator, void *data)
1881{
1882 regulator->rdev->reg_data = data;
1883}
1884EXPORT_SYMBOL_GPL(regulator_set_drvdata);
1885
1886/**
1887 * regulator_get_id - get regulator ID
1888 * @regulator: regulator
1889 */
1890int rdev_get_id(struct regulator_dev *rdev)
1891{
1892 return rdev->desc->id;
1893}
1894EXPORT_SYMBOL_GPL(rdev_get_id);
1895
1896static int __init regulator_init(void)
1897{
1898 printk(KERN_INFO "regulator: core version %s\n", REGULATOR_VERSION);
1899 return class_register(&regulator_class);
1900}
1901
1902/* init early to allow our consumers to complete system booting */
1903core_initcall(regulator_init);
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
new file mode 100644
index 000000000000..d31db3e14913
--- /dev/null
+++ b/drivers/regulator/fixed.c
@@ -0,0 +1,129 @@
1/*
2 * fixed.c
3 *
4 * Copyright 2008 Wolfson Microelectronics PLC.
5 *
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of the
11 * License, or (at your option) any later version.
12 *
13 * This is useful for systems with mixed controllable and
14 * non-controllable regulators, as well as for allowing testing on
15 * systems with no controllable regulators.
16 */
17
18#include <linux/err.h>
19#include <linux/mutex.h>
20#include <linux/platform_device.h>
21#include <linux/regulator/driver.h>
22#include <linux/regulator/fixed.h>
23
24struct fixed_voltage_data {
25 struct regulator_desc desc;
26 struct regulator_dev *dev;
27 int microvolts;
28};
29
30static int fixed_voltage_is_enabled(struct regulator_dev *dev)
31{
32 return 1;
33}
34
35static int fixed_voltage_enable(struct regulator_dev *dev)
36{
37 return 0;
38}
39
40static int fixed_voltage_get_voltage(struct regulator_dev *dev)
41{
42 struct fixed_voltage_data *data = rdev_get_drvdata(dev);
43
44 return data->microvolts;
45}
46
47static struct regulator_ops fixed_voltage_ops = {
48 .is_enabled = fixed_voltage_is_enabled,
49 .enable = fixed_voltage_enable,
50 .get_voltage = fixed_voltage_get_voltage,
51};
52
53static int regulator_fixed_voltage_probe(struct platform_device *pdev)
54{
55 struct fixed_voltage_config *config = pdev->dev.platform_data;
56 struct fixed_voltage_data *drvdata;
57 int ret;
58
59 drvdata = kzalloc(sizeof(struct fixed_voltage_data), GFP_KERNEL);
60 if (drvdata == NULL) {
61 ret = -ENOMEM;
62 goto err;
63 }
64
65 drvdata->desc.name = kstrdup(config->supply_name, GFP_KERNEL);
66 if (drvdata->desc.name == NULL) {
67 ret = -ENOMEM;
68 goto err;
69 }
70 drvdata->desc.type = REGULATOR_VOLTAGE;
71 drvdata->desc.owner = THIS_MODULE;
72 drvdata->desc.ops = &fixed_voltage_ops,
73
74 drvdata->microvolts = config->microvolts;
75
76 drvdata->dev = regulator_register(&drvdata->desc, drvdata);
77 if (IS_ERR(drvdata->dev)) {
78 ret = PTR_ERR(drvdata->dev);
79 goto err_name;
80 }
81
82 platform_set_drvdata(pdev, drvdata);
83
84 dev_dbg(&pdev->dev, "%s supplying %duV\n", drvdata->desc.name,
85 drvdata->microvolts);
86
87 return 0;
88
89err_name:
90 kfree(drvdata->desc.name);
91err:
92 kfree(drvdata);
93 return ret;
94}
95
96static int regulator_fixed_voltage_remove(struct platform_device *pdev)
97{
98 struct fixed_voltage_data *drvdata = platform_get_drvdata(pdev);
99
100 regulator_unregister(drvdata->dev);
101 kfree(drvdata->desc.name);
102 kfree(drvdata);
103
104 return 0;
105}
106
107static struct platform_driver regulator_fixed_voltage_driver = {
108 .probe = regulator_fixed_voltage_probe,
109 .remove = regulator_fixed_voltage_remove,
110 .driver = {
111 .name = "reg-fixed-voltage",
112 },
113};
114
115static int __init regulator_fixed_voltage_init(void)
116{
117 return platform_driver_register(&regulator_fixed_voltage_driver);
118}
119module_init(regulator_fixed_voltage_init);
120
121static void __exit regulator_fixed_voltage_exit(void)
122{
123 platform_driver_unregister(&regulator_fixed_voltage_driver);
124}
125module_exit(regulator_fixed_voltage_exit);
126
127MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
128MODULE_DESCRIPTION("Fixed voltage regulator");
129MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/virtual.c b/drivers/regulator/virtual.c
new file mode 100644
index 000000000000..5ddb464b1c3f
--- /dev/null
+++ b/drivers/regulator/virtual.c
@@ -0,0 +1,345 @@
1/*
2 * reg-virtual-consumer.c
3 *
4 * Copyright 2008 Wolfson Microelectronics PLC.
5 *
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of the
11 * License, or (at your option) any later version.
12 */
13
14#include <linux/err.h>
15#include <linux/mutex.h>
16#include <linux/platform_device.h>
17#include <linux/regulator/consumer.h>
18
19struct virtual_consumer_data {
20 struct mutex lock;
21 struct regulator *regulator;
22 int enabled;
23 int min_uV;
24 int max_uV;
25 int min_uA;
26 int max_uA;
27 unsigned int mode;
28};
29
30static void update_voltage_constraints(struct virtual_consumer_data *data)
31{
32 int ret;
33
34 if (data->min_uV && data->max_uV
35 && data->min_uV <= data->max_uV) {
36 ret = regulator_set_voltage(data->regulator,
37 data->min_uV, data->max_uV);
38 if (ret != 0) {
39 printk(KERN_ERR "regulator_set_voltage() failed: %d\n",
40 ret);
41 return;
42 }
43 }
44
45 if (data->min_uV && data->max_uV && !data->enabled) {
46 ret = regulator_enable(data->regulator);
47 if (ret == 0)
48 data->enabled = 1;
49 else
50 printk(KERN_ERR "regulator_enable() failed: %d\n",
51 ret);
52 }
53
54 if (!(data->min_uV && data->max_uV) && data->enabled) {
55 ret = regulator_disable(data->regulator);
56 if (ret == 0)
57 data->enabled = 0;
58 else
59 printk(KERN_ERR "regulator_disable() failed: %d\n",
60 ret);
61 }
62}
63
64static void update_current_limit_constraints(struct virtual_consumer_data
65 *data)
66{
67 int ret;
68
69 if (data->max_uA
70 && data->min_uA <= data->max_uA) {
71 ret = regulator_set_current_limit(data->regulator,
72 data->min_uA, data->max_uA);
73 if (ret != 0) {
74 pr_err("regulator_set_current_limit() failed: %d\n",
75 ret);
76 return;
77 }
78 }
79
80 if (data->max_uA && !data->enabled) {
81 ret = regulator_enable(data->regulator);
82 if (ret == 0)
83 data->enabled = 1;
84 else
85 printk(KERN_ERR "regulator_enable() failed: %d\n",
86 ret);
87 }
88
89 if (!(data->min_uA && data->max_uA) && data->enabled) {
90 ret = regulator_disable(data->regulator);
91 if (ret == 0)
92 data->enabled = 0;
93 else
94 printk(KERN_ERR "regulator_disable() failed: %d\n",
95 ret);
96 }
97}
98
99static ssize_t show_min_uV(struct device *dev,
100 struct device_attribute *attr, char *buf)
101{
102 struct virtual_consumer_data *data = dev_get_drvdata(dev);
103 return sprintf(buf, "%d\n", data->min_uV);
104}
105
106static ssize_t set_min_uV(struct device *dev, struct device_attribute *attr,
107 const char *buf, size_t count)
108{
109 struct virtual_consumer_data *data = dev_get_drvdata(dev);
110 long val;
111
112 if (strict_strtol(buf, 10, &val) != 0)
113 return count;
114
115 mutex_lock(&data->lock);
116
117 data->min_uV = val;
118 update_voltage_constraints(data);
119
120 mutex_unlock(&data->lock);
121
122 return count;
123}
124
125static ssize_t show_max_uV(struct device *dev,
126 struct device_attribute *attr, char *buf)
127{
128 struct virtual_consumer_data *data = dev_get_drvdata(dev);
129 return sprintf(buf, "%d\n", data->max_uV);
130}
131
132static ssize_t set_max_uV(struct device *dev, struct device_attribute *attr,
133 const char *buf, size_t count)
134{
135 struct virtual_consumer_data *data = dev_get_drvdata(dev);
136 long val;
137
138 if (strict_strtol(buf, 10, &val) != 0)
139 return count;
140
141 mutex_lock(&data->lock);
142
143 data->max_uV = val;
144 update_voltage_constraints(data);
145
146 mutex_unlock(&data->lock);
147
148 return count;
149}
150
151static ssize_t show_min_uA(struct device *dev,
152 struct device_attribute *attr, char *buf)
153{
154 struct virtual_consumer_data *data = dev_get_drvdata(dev);
155 return sprintf(buf, "%d\n", data->min_uA);
156}
157
158static ssize_t set_min_uA(struct device *dev, struct device_attribute *attr,
159 const char *buf, size_t count)
160{
161 struct virtual_consumer_data *data = dev_get_drvdata(dev);
162 long val;
163
164 if (strict_strtol(buf, 10, &val) != 0)
165 return count;
166
167 mutex_lock(&data->lock);
168
169 data->min_uA = val;
170 update_current_limit_constraints(data);
171
172 mutex_unlock(&data->lock);
173
174 return count;
175}
176
177static ssize_t show_max_uA(struct device *dev,
178 struct device_attribute *attr, char *buf)
179{
180 struct virtual_consumer_data *data = dev_get_drvdata(dev);
181 return sprintf(buf, "%d\n", data->max_uA);
182}
183
184static ssize_t set_max_uA(struct device *dev, struct device_attribute *attr,
185 const char *buf, size_t count)
186{
187 struct virtual_consumer_data *data = dev_get_drvdata(dev);
188 long val;
189
190 if (strict_strtol(buf, 10, &val) != 0)
191 return count;
192
193 mutex_lock(&data->lock);
194
195 data->max_uA = val;
196 update_current_limit_constraints(data);
197
198 mutex_unlock(&data->lock);
199
200 return count;
201}
202
203static ssize_t show_mode(struct device *dev,
204 struct device_attribute *attr, char *buf)
205{
206 struct virtual_consumer_data *data = dev_get_drvdata(dev);
207
208 switch (data->mode) {
209 case REGULATOR_MODE_FAST:
210 return sprintf(buf, "fast\n");
211 case REGULATOR_MODE_NORMAL:
212 return sprintf(buf, "normal\n");
213 case REGULATOR_MODE_IDLE:
214 return sprintf(buf, "idle\n");
215 case REGULATOR_MODE_STANDBY:
216 return sprintf(buf, "standby\n");
217 default:
218 return sprintf(buf, "unknown\n");
219 }
220}
221
222static ssize_t set_mode(struct device *dev, struct device_attribute *attr,
223 const char *buf, size_t count)
224{
225 struct virtual_consumer_data *data = dev_get_drvdata(dev);
226 unsigned int mode;
227 int ret;
228
229 if (strncmp(buf, "fast", strlen("fast")) == 0)
230 mode = REGULATOR_MODE_FAST;
231 else if (strncmp(buf, "normal", strlen("normal")) == 0)
232 mode = REGULATOR_MODE_NORMAL;
233 else if (strncmp(buf, "idle", strlen("idle")) == 0)
234 mode = REGULATOR_MODE_IDLE;
235 else if (strncmp(buf, "standby", strlen("standby")) == 0)
236 mode = REGULATOR_MODE_STANDBY;
237 else {
238 dev_err(dev, "Configuring invalid mode\n");
239 return count;
240 }
241
242 mutex_lock(&data->lock);
243 ret = regulator_set_mode(data->regulator, mode);
244 if (ret == 0)
245 data->mode = mode;
246 else
247 dev_err(dev, "Failed to configure mode: %d\n", ret);
248 mutex_unlock(&data->lock);
249
250 return count;
251}
252
253static DEVICE_ATTR(min_microvolts, 0666, show_min_uV, set_min_uV);
254static DEVICE_ATTR(max_microvolts, 0666, show_max_uV, set_max_uV);
255static DEVICE_ATTR(min_microamps, 0666, show_min_uA, set_min_uA);
256static DEVICE_ATTR(max_microamps, 0666, show_max_uA, set_max_uA);
257static DEVICE_ATTR(mode, 0666, show_mode, set_mode);
258
259struct device_attribute *attributes[] = {
260 &dev_attr_min_microvolts,
261 &dev_attr_max_microvolts,
262 &dev_attr_min_microamps,
263 &dev_attr_max_microamps,
264 &dev_attr_mode,
265};
266
267static int regulator_virtual_consumer_probe(struct platform_device *pdev)
268{
269 char *reg_id = pdev->dev.platform_data;
270 struct virtual_consumer_data *drvdata;
271 int ret, i;
272
273 drvdata = kzalloc(sizeof(struct virtual_consumer_data), GFP_KERNEL);
274 if (drvdata == NULL) {
275 ret = -ENOMEM;
276 goto err;
277 }
278
279 mutex_init(&drvdata->lock);
280
281 drvdata->regulator = regulator_get(&pdev->dev, reg_id);
282 if (IS_ERR(drvdata->regulator)) {
283 ret = PTR_ERR(drvdata->regulator);
284 goto err;
285 }
286
287 for (i = 0; i < ARRAY_SIZE(attributes); i++) {
288 ret = device_create_file(&pdev->dev, attributes[i]);
289 if (ret != 0)
290 goto err;
291 }
292
293 drvdata->mode = regulator_get_mode(drvdata->regulator);
294
295 platform_set_drvdata(pdev, drvdata);
296
297 return 0;
298
299err:
300 for (i = 0; i < ARRAY_SIZE(attributes); i++)
301 device_remove_file(&pdev->dev, attributes[i]);
302 kfree(drvdata);
303 return ret;
304}
305
306static int regulator_virtual_consumer_remove(struct platform_device *pdev)
307{
308 struct virtual_consumer_data *drvdata = platform_get_drvdata(pdev);
309 int i;
310
311 for (i = 0; i < ARRAY_SIZE(attributes); i++)
312 device_remove_file(&pdev->dev, attributes[i]);
313 if (drvdata->enabled)
314 regulator_disable(drvdata->regulator);
315 regulator_put(drvdata->regulator);
316
317 kfree(drvdata);
318
319 return 0;
320}
321
322static struct platform_driver regulator_virtual_consumer_driver = {
323 .probe = regulator_virtual_consumer_probe,
324 .remove = regulator_virtual_consumer_remove,
325 .driver = {
326 .name = "reg-virt-consumer",
327 },
328};
329
330
331static int __init regulator_virtual_consumer_init(void)
332{
333 return platform_driver_register(&regulator_virtual_consumer_driver);
334}
335module_init(regulator_virtual_consumer_init);
336
337static void __exit regulator_virtual_consumer_exit(void)
338{
339 platform_driver_unregister(&regulator_virtual_consumer_driver);
340}
341module_exit(regulator_virtual_consumer_exit);
342
343MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
344MODULE_DESCRIPTION("Virtual regulator consumer");
345MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index d397fa5f3a91..7af60b98d8a4 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -20,7 +20,7 @@ int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm)
20 20
21 err = mutex_lock_interruptible(&rtc->ops_lock); 21 err = mutex_lock_interruptible(&rtc->ops_lock);
22 if (err) 22 if (err)
23 return -EBUSY; 23 return err;
24 24
25 if (!rtc->ops) 25 if (!rtc->ops)
26 err = -ENODEV; 26 err = -ENODEV;
@@ -46,7 +46,7 @@ int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm)
46 46
47 err = mutex_lock_interruptible(&rtc->ops_lock); 47 err = mutex_lock_interruptible(&rtc->ops_lock);
48 if (err) 48 if (err)
49 return -EBUSY; 49 return err;
50 50
51 if (!rtc->ops) 51 if (!rtc->ops)
52 err = -ENODEV; 52 err = -ENODEV;
@@ -66,7 +66,7 @@ int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs)
66 66
67 err = mutex_lock_interruptible(&rtc->ops_lock); 67 err = mutex_lock_interruptible(&rtc->ops_lock);
68 if (err) 68 if (err)
69 return -EBUSY; 69 return err;
70 70
71 if (!rtc->ops) 71 if (!rtc->ops)
72 err = -ENODEV; 72 err = -ENODEV;
@@ -106,7 +106,7 @@ static int rtc_read_alarm_internal(struct rtc_device *rtc, struct rtc_wkalrm *al
106 106
107 err = mutex_lock_interruptible(&rtc->ops_lock); 107 err = mutex_lock_interruptible(&rtc->ops_lock);
108 if (err) 108 if (err)
109 return -EBUSY; 109 return err;
110 110
111 if (rtc->ops == NULL) 111 if (rtc->ops == NULL)
112 err = -ENODEV; 112 err = -ENODEV;
@@ -293,7 +293,7 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
293 293
294 err = mutex_lock_interruptible(&rtc->ops_lock); 294 err = mutex_lock_interruptible(&rtc->ops_lock);
295 if (err) 295 if (err)
296 return -EBUSY; 296 return err;
297 297
298 if (!rtc->ops) 298 if (!rtc->ops)
299 err = -ENODEV; 299 err = -ENODEV;
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index cd32d05db773..4e888cc8be5b 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -29,7 +29,7 @@
29#include <linux/completion.h> 29#include <linux/completion.h>
30 30
31#include <asm/uaccess.h> 31#include <asm/uaccess.h>
32#include <asm/arch/at91_rtc.h> 32#include <mach/at91_rtc.h>
33 33
34 34
35#define AT91_RTC_FREQ 1 35#define AT91_RTC_FREQ 1
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index f0246ef413a4..2133f37906f2 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -19,8 +19,8 @@
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/ioctl.h> 20#include <linux/ioctl.h>
21 21
22#include <asm/arch/board.h> 22#include <mach/board.h>
23#include <asm/arch/at91_rtt.h> 23#include <mach/at91_rtt.h>
24 24
25 25
26/* 26/*
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c
index 8624f55d0560..a1af4c27939b 100644
--- a/drivers/rtc/rtc-bfin.c
+++ b/drivers/rtc/rtc-bfin.c
@@ -2,7 +2,7 @@
2 * Blackfin On-Chip Real Time Clock Driver 2 * Blackfin On-Chip Real Time Clock Driver
3 * Supports BF52[257]/BF53[123]/BF53[467]/BF54[24789] 3 * Supports BF52[257]/BF53[123]/BF53[467]/BF54[24789]
4 * 4 *
5 * Copyright 2004-2007 Analog Devices Inc. 5 * Copyright 2004-2008 Analog Devices Inc.
6 * 6 *
7 * Enter bugs at http://blackfin.uclinux.org/ 7 * Enter bugs at http://blackfin.uclinux.org/
8 * 8 *
@@ -32,6 +32,15 @@
32 * writes to clear status registers complete immediately. 32 * writes to clear status registers complete immediately.
33 */ 33 */
34 34
35/* It may seem odd that there is no SWCNT code in here (which would be exposed
36 * via the periodic interrupt event, or PIE). Since the Blackfin RTC peripheral
37 * runs in units of seconds (N/HZ) but the Linux framework runs in units of HZ
38 * (2^N HZ), there is no point in keeping code that only provides 1 HZ PIEs.
39 * The same exact behavior can be accomplished by using the update interrupt
40 * event (UIE). Maybe down the line the RTC peripheral will suck less in which
41 * case we can re-introduce PIE support.
42 */
43
35#include <linux/bcd.h> 44#include <linux/bcd.h>
36#include <linux/completion.h> 45#include <linux/completion.h>
37#include <linux/delay.h> 46#include <linux/delay.h>
@@ -144,14 +153,13 @@ static void bfin_rtc_sync_pending(struct device *dev)
144 * Initialize the RTC. Enable pre-scaler to scale RTC clock 153 * Initialize the RTC. Enable pre-scaler to scale RTC clock
145 * to 1Hz and clear interrupt/status registers. 154 * to 1Hz and clear interrupt/status registers.
146 */ 155 */
147static void bfin_rtc_reset(struct device *dev) 156static void bfin_rtc_reset(struct device *dev, u16 rtc_ictl)
148{ 157{
149 struct bfin_rtc *rtc = dev_get_drvdata(dev); 158 struct bfin_rtc *rtc = dev_get_drvdata(dev);
150 dev_dbg_stamp(dev); 159 dev_dbg_stamp(dev);
151 bfin_rtc_sync_pending(dev); 160 bfin_rtc_sync_pending(dev);
152 bfin_write_RTC_PREN(0x1); 161 bfin_write_RTC_PREN(0x1);
153 bfin_write_RTC_ICTL(RTC_ISTAT_WRITE_COMPLETE); 162 bfin_write_RTC_ICTL(rtc_ictl);
154 bfin_write_RTC_SWCNT(0);
155 bfin_write_RTC_ALARM(0); 163 bfin_write_RTC_ALARM(0);
156 bfin_write_RTC_ISTAT(0xFFFF); 164 bfin_write_RTC_ISTAT(0xFFFF);
157 rtc->rtc_wrote_regs = 0; 165 rtc->rtc_wrote_regs = 0;
@@ -194,14 +202,6 @@ static irqreturn_t bfin_rtc_interrupt(int irq, void *dev_id)
194 } 202 }
195 } 203 }
196 204
197 if (rtc_ictl & RTC_ISTAT_STOPWATCH) {
198 if (rtc_istat & RTC_ISTAT_STOPWATCH) {
199 bfin_write_RTC_ISTAT(RTC_ISTAT_STOPWATCH);
200 events |= RTC_PF | RTC_IRQF;
201 bfin_write_RTC_SWCNT(rtc->rtc_dev->irq_freq);
202 }
203 }
204
205 if (rtc_ictl & RTC_ISTAT_SEC) { 205 if (rtc_ictl & RTC_ISTAT_SEC) {
206 if (rtc_istat & RTC_ISTAT_SEC) { 206 if (rtc_istat & RTC_ISTAT_SEC) {
207 bfin_write_RTC_ISTAT(RTC_ISTAT_SEC); 207 bfin_write_RTC_ISTAT(RTC_ISTAT_SEC);
@@ -226,7 +226,7 @@ static int bfin_rtc_open(struct device *dev)
226 226
227 ret = request_irq(IRQ_RTC, bfin_rtc_interrupt, IRQF_SHARED, to_platform_device(dev)->name, dev); 227 ret = request_irq(IRQ_RTC, bfin_rtc_interrupt, IRQF_SHARED, to_platform_device(dev)->name, dev);
228 if (!ret) 228 if (!ret)
229 bfin_rtc_reset(dev); 229 bfin_rtc_reset(dev, RTC_ISTAT_WRITE_COMPLETE);
230 230
231 return ret; 231 return ret;
232} 232}
@@ -234,16 +234,16 @@ static int bfin_rtc_open(struct device *dev)
234static void bfin_rtc_release(struct device *dev) 234static void bfin_rtc_release(struct device *dev)
235{ 235{
236 dev_dbg_stamp(dev); 236 dev_dbg_stamp(dev);
237 bfin_rtc_reset(dev); 237 bfin_rtc_reset(dev, 0);
238 free_irq(IRQ_RTC, dev); 238 free_irq(IRQ_RTC, dev);
239} 239}
240 240
241static void bfin_rtc_int_set(struct bfin_rtc *rtc, u16 rtc_int) 241static void bfin_rtc_int_set(u16 rtc_int)
242{ 242{
243 bfin_write_RTC_ISTAT(rtc_int); 243 bfin_write_RTC_ISTAT(rtc_int);
244 bfin_write_RTC_ICTL(bfin_read_RTC_ICTL() | rtc_int); 244 bfin_write_RTC_ICTL(bfin_read_RTC_ICTL() | rtc_int);
245} 245}
246static void bfin_rtc_int_clear(struct bfin_rtc *rtc, u16 rtc_int) 246static void bfin_rtc_int_clear(u16 rtc_int)
247{ 247{
248 bfin_write_RTC_ICTL(bfin_read_RTC_ICTL() & rtc_int); 248 bfin_write_RTC_ICTL(bfin_read_RTC_ICTL() & rtc_int);
249} 249}
@@ -252,7 +252,7 @@ static void bfin_rtc_int_set_alarm(struct bfin_rtc *rtc)
252 /* Blackfin has different bits for whether the alarm is 252 /* Blackfin has different bits for whether the alarm is
253 * more than 24 hours away. 253 * more than 24 hours away.
254 */ 254 */
255 bfin_rtc_int_set(rtc, (rtc->rtc_alarm.tm_yday == -1 ? RTC_ISTAT_ALARM : RTC_ISTAT_ALARM_DAY)); 255 bfin_rtc_int_set(rtc->rtc_alarm.tm_yday == -1 ? RTC_ISTAT_ALARM : RTC_ISTAT_ALARM_DAY);
256} 256}
257static int bfin_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) 257static int bfin_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
258{ 258{
@@ -264,23 +264,13 @@ static int bfin_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long ar
264 bfin_rtc_sync_pending(dev); 264 bfin_rtc_sync_pending(dev);
265 265
266 switch (cmd) { 266 switch (cmd) {
267 case RTC_PIE_ON:
268 dev_dbg_stamp(dev);
269 bfin_rtc_int_set(rtc, RTC_ISTAT_STOPWATCH);
270 bfin_write_RTC_SWCNT(rtc->rtc_dev->irq_freq);
271 break;
272 case RTC_PIE_OFF:
273 dev_dbg_stamp(dev);
274 bfin_rtc_int_clear(rtc, ~RTC_ISTAT_STOPWATCH);
275 break;
276
277 case RTC_UIE_ON: 267 case RTC_UIE_ON:
278 dev_dbg_stamp(dev); 268 dev_dbg_stamp(dev);
279 bfin_rtc_int_set(rtc, RTC_ISTAT_SEC); 269 bfin_rtc_int_set(RTC_ISTAT_SEC);
280 break; 270 break;
281 case RTC_UIE_OFF: 271 case RTC_UIE_OFF:
282 dev_dbg_stamp(dev); 272 dev_dbg_stamp(dev);
283 bfin_rtc_int_clear(rtc, ~RTC_ISTAT_SEC); 273 bfin_rtc_int_clear(~RTC_ISTAT_SEC);
284 break; 274 break;
285 275
286 case RTC_AIE_ON: 276 case RTC_AIE_ON:
@@ -289,7 +279,7 @@ static int bfin_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long ar
289 break; 279 break;
290 case RTC_AIE_OFF: 280 case RTC_AIE_OFF:
291 dev_dbg_stamp(dev); 281 dev_dbg_stamp(dev);
292 bfin_rtc_int_clear(rtc, ~(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY)); 282 bfin_rtc_int_clear(~(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY));
293 break; 283 break;
294 284
295 default: 285 default:
@@ -371,30 +361,14 @@ static int bfin_rtc_proc(struct device *dev, struct seq_file *seq)
371 seq_printf(seq, 361 seq_printf(seq,
372 "alarm_IRQ\t: %s\n" 362 "alarm_IRQ\t: %s\n"
373 "wkalarm_IRQ\t: %s\n" 363 "wkalarm_IRQ\t: %s\n"
374 "seconds_IRQ\t: %s\n" 364 "seconds_IRQ\t: %s\n",
375 "periodic_IRQ\t: %s\n",
376 yesno(ictl & RTC_ISTAT_ALARM), 365 yesno(ictl & RTC_ISTAT_ALARM),
377 yesno(ictl & RTC_ISTAT_ALARM_DAY), 366 yesno(ictl & RTC_ISTAT_ALARM_DAY),
378 yesno(ictl & RTC_ISTAT_SEC), 367 yesno(ictl & RTC_ISTAT_SEC));
379 yesno(ictl & RTC_ISTAT_STOPWATCH));
380 return 0; 368 return 0;
381#undef yesno 369#undef yesno
382} 370}
383 371
384/**
385 * bfin_irq_set_freq - make sure hardware supports requested freq
386 * @dev: pointer to RTC device structure
387 * @freq: requested frequency rate
388 *
389 * The Blackfin RTC can only generate periodic events at 1 per
390 * second (1 Hz), so reject any attempt at changing it.
391 */
392static int bfin_irq_set_freq(struct device *dev, int freq)
393{
394 dev_dbg_stamp(dev);
395 return -ENOTTY;
396}
397
398static struct rtc_class_ops bfin_rtc_ops = { 372static struct rtc_class_ops bfin_rtc_ops = {
399 .open = bfin_rtc_open, 373 .open = bfin_rtc_open,
400 .release = bfin_rtc_release, 374 .release = bfin_rtc_release,
@@ -404,7 +378,6 @@ static struct rtc_class_ops bfin_rtc_ops = {
404 .read_alarm = bfin_rtc_read_alarm, 378 .read_alarm = bfin_rtc_read_alarm,
405 .set_alarm = bfin_rtc_set_alarm, 379 .set_alarm = bfin_rtc_set_alarm,
406 .proc = bfin_rtc_proc, 380 .proc = bfin_rtc_proc,
407 .irq_set_freq = bfin_irq_set_freq,
408}; 381};
409 382
410static int __devinit bfin_rtc_probe(struct platform_device *pdev) 383static int __devinit bfin_rtc_probe(struct platform_device *pdev)
@@ -423,10 +396,14 @@ static int __devinit bfin_rtc_probe(struct platform_device *pdev)
423 ret = PTR_ERR(rtc->rtc_dev); 396 ret = PTR_ERR(rtc->rtc_dev);
424 goto err; 397 goto err;
425 } 398 }
426 rtc->rtc_dev->irq_freq = 1; 399
400 /* see comment at top of file about stopwatch/PIE */
401 bfin_write_RTC_SWCNT(0);
427 402
428 platform_set_drvdata(pdev, rtc); 403 platform_set_drvdata(pdev, rtc);
429 404
405 device_init_wakeup(&pdev->dev, 1);
406
430 return 0; 407 return 0;
431 408
432 err: 409 err:
@@ -445,6 +422,32 @@ static int __devexit bfin_rtc_remove(struct platform_device *pdev)
445 return 0; 422 return 0;
446} 423}
447 424
425#ifdef CONFIG_PM
426static int bfin_rtc_suspend(struct platform_device *pdev, pm_message_t state)
427{
428 if (device_may_wakeup(&pdev->dev)) {
429 enable_irq_wake(IRQ_RTC);
430 bfin_rtc_sync_pending(&pdev->dev);
431 } else
432 bfin_rtc_int_clear(-1);
433
434 return 0;
435}
436
437static int bfin_rtc_resume(struct platform_device *pdev)
438{
439 if (device_may_wakeup(&pdev->dev))
440 disable_irq_wake(IRQ_RTC);
441 else
442 bfin_write_RTC_ISTAT(-1);
443
444 return 0;
445}
446#else
447# define bfin_rtc_suspend NULL
448# define bfin_rtc_resume NULL
449#endif
450
448static struct platform_driver bfin_rtc_driver = { 451static struct platform_driver bfin_rtc_driver = {
449 .driver = { 452 .driver = {
450 .name = "rtc-bfin", 453 .name = "rtc-bfin",
@@ -452,6 +455,8 @@ static struct platform_driver bfin_rtc_driver = {
452 }, 455 },
453 .probe = bfin_rtc_probe, 456 .probe = bfin_rtc_probe,
454 .remove = __devexit_p(bfin_rtc_remove), 457 .remove = __devexit_p(bfin_rtc_remove),
458 .suspend = bfin_rtc_suspend,
459 .resume = bfin_rtc_resume,
455}; 460};
456 461
457static int __init bfin_rtc_init(void) 462static int __init bfin_rtc_init(void)
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index 0a870b7e5c32..856cc1af40df 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -221,7 +221,7 @@ static long rtc_dev_ioctl(struct file *file,
221 221
222 err = mutex_lock_interruptible(&rtc->ops_lock); 222 err = mutex_lock_interruptible(&rtc->ops_lock);
223 if (err) 223 if (err)
224 return -EBUSY; 224 return err;
225 225
226 /* check that the calling task has appropriate permissions 226 /* check that the calling task has appropriate permissions
227 * for certain ioctls. doing this check here is useful 227 * for certain ioctls. doing this check here is useful
@@ -432,6 +432,8 @@ static int rtc_dev_release(struct inode *inode, struct file *file)
432#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL 432#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
433 clear_uie(rtc); 433 clear_uie(rtc);
434#endif 434#endif
435 rtc_irq_set_state(rtc, NULL, 0);
436
435 if (rtc->ops->release) 437 if (rtc->ops->release)
436 rtc->ops->release(rtc->dev.parent); 438 rtc->ops->release(rtc->dev.parent);
437 439
diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c
index 1e99325270df..36e4ac0bd69c 100644
--- a/drivers/rtc/rtc-ep93xx.c
+++ b/drivers/rtc/rtc-ep93xx.c
@@ -12,7 +12,7 @@
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/rtc.h> 13#include <linux/rtc.h>
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <asm/hardware.h> 15#include <mach/hardware.h>
16 16
17#define EP93XX_RTC_REG(x) (EP93XX_RTC_BASE + (x)) 17#define EP93XX_RTC_REG(x) (EP93XX_RTC_BASE + (x))
18#define EP93XX_RTC_DATA EP93XX_RTC_REG(0x0000) 18#define EP93XX_RTC_DATA EP93XX_RTC_REG(0x0000)
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 54b1ebb01502..e7d19b6c265a 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -22,7 +22,7 @@
22#include <linux/clk.h> 22#include <linux/clk.h>
23#include <linux/log2.h> 23#include <linux/log2.h>
24 24
25#include <asm/hardware.h> 25#include <mach/hardware.h>
26#include <asm/uaccess.h> 26#include <asm/uaccess.h>
27#include <asm/io.h> 27#include <asm/io.h>
28#include <asm/irq.h> 28#include <asm/irq.h>
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index f47294c60148..66a9bb85bbe8 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -31,11 +31,11 @@
31#include <linux/pm.h> 31#include <linux/pm.h>
32#include <linux/bitops.h> 32#include <linux/bitops.h>
33 33
34#include <asm/hardware.h> 34#include <mach/hardware.h>
35#include <asm/irq.h> 35#include <asm/irq.h>
36 36
37#ifdef CONFIG_ARCH_PXA 37#ifdef CONFIG_ARCH_PXA
38#include <asm/arch/pxa-regs.h> 38#include <mach/pxa-regs.h>
39#endif 39#endif
40 40
41#define TIMER_FREQ CLOCK_TICK_RATE 41#define TIMER_FREQ CLOCK_TICK_RATE
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 2d8df0b30538..20676cdef4a5 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -91,7 +91,8 @@ static struct alias_pav_group *_find_group(struct alias_lcu *lcu,
91 else 91 else
92 search_unit_addr = uid->base_unit_addr; 92 search_unit_addr = uid->base_unit_addr;
93 list_for_each_entry(pos, &lcu->grouplist, group) { 93 list_for_each_entry(pos, &lcu->grouplist, group) {
94 if (pos->uid.base_unit_addr == search_unit_addr) 94 if (pos->uid.base_unit_addr == search_unit_addr &&
95 !strncmp(pos->uid.vduit, uid->vduit, sizeof(uid->vduit)))
95 return pos; 96 return pos;
96 }; 97 };
97 return NULL; 98 return NULL;
@@ -332,6 +333,7 @@ static int _add_device_to_lcu(struct alias_lcu *lcu,
332 group->uid.base_unit_addr = uid->real_unit_addr; 333 group->uid.base_unit_addr = uid->real_unit_addr;
333 else 334 else
334 group->uid.base_unit_addr = uid->base_unit_addr; 335 group->uid.base_unit_addr = uid->base_unit_addr;
336 memcpy(group->uid.vduit, uid->vduit, sizeof(uid->vduit));
335 INIT_LIST_HEAD(&group->group); 337 INIT_LIST_HEAD(&group->group);
336 INIT_LIST_HEAD(&group->baselist); 338 INIT_LIST_HEAD(&group->baselist);
337 INIT_LIST_HEAD(&group->aliaslist); 339 INIT_LIST_HEAD(&group->aliaslist);
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index d774e79476fe..cd3335c1c307 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -913,7 +913,8 @@ dasd_vendor_show(struct device *dev, struct device_attribute *attr, char *buf)
913static DEVICE_ATTR(vendor, 0444, dasd_vendor_show, NULL); 913static DEVICE_ATTR(vendor, 0444, dasd_vendor_show, NULL);
914 914
915#define UID_STRLEN ( /* vendor */ 3 + 1 + /* serial */ 14 + 1 +\ 915#define UID_STRLEN ( /* vendor */ 3 + 1 + /* serial */ 14 + 1 +\
916 /* SSID */ 4 + 1 + /* unit addr */ 2 + 1) 916 /* SSID */ 4 + 1 + /* unit addr */ 2 + 1 +\
917 /* vduit */ 32 + 1)
917 918
918static ssize_t 919static ssize_t
919dasd_uid_show(struct device *dev, struct device_attribute *attr, char *buf) 920dasd_uid_show(struct device *dev, struct device_attribute *attr, char *buf)
@@ -945,8 +946,17 @@ dasd_uid_show(struct device *dev, struct device_attribute *attr, char *buf)
945 sprintf(ua_string, "%02x", uid->real_unit_addr); 946 sprintf(ua_string, "%02x", uid->real_unit_addr);
946 break; 947 break;
947 } 948 }
948 snprintf(uid_string, sizeof(uid_string), "%s.%s.%04x.%s", 949 if (strlen(uid->vduit) > 0)
949 uid->vendor, uid->serial, uid->ssid, ua_string); 950 snprintf(uid_string, sizeof(uid_string),
951 "%s.%s.%04x.%s.%s",
952 uid->vendor, uid->serial,
953 uid->ssid, ua_string,
954 uid->vduit);
955 else
956 snprintf(uid_string, sizeof(uid_string),
957 "%s.%s.%04x.%s",
958 uid->vendor, uid->serial,
959 uid->ssid, ua_string);
950 spin_unlock(&dasd_devmap_lock); 960 spin_unlock(&dasd_devmap_lock);
951 return snprintf(buf, PAGE_SIZE, "%s\n", uid_string); 961 return snprintf(buf, PAGE_SIZE, "%s\n", uid_string);
952} 962}
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 3590fdb5b2fd..773b3fe275b2 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -313,8 +313,8 @@ static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata, int trk,
313 memset(pfxdata, 0, sizeof(*pfxdata)); 313 memset(pfxdata, 0, sizeof(*pfxdata));
314 /* prefix data */ 314 /* prefix data */
315 pfxdata->format = 0; 315 pfxdata->format = 0;
316 pfxdata->base_address = basepriv->conf_data.ned1.unit_addr; 316 pfxdata->base_address = basepriv->ned->unit_addr;
317 pfxdata->base_lss = basepriv->conf_data.ned1.ID; 317 pfxdata->base_lss = basepriv->ned->ID;
318 pfxdata->validity.define_extend = 1; 318 pfxdata->validity.define_extend = 1;
319 319
320 /* private uid is kept up to date, conf_data may be outdated */ 320 /* private uid is kept up to date, conf_data may be outdated */
@@ -536,36 +536,40 @@ dasd_eckd_cdl_reclen(int recid)
536/* 536/*
537 * Generate device unique id that specifies the physical device. 537 * Generate device unique id that specifies the physical device.
538 */ 538 */
539static int 539static int dasd_eckd_generate_uid(struct dasd_device *device,
540dasd_eckd_generate_uid(struct dasd_device *device, struct dasd_uid *uid) 540 struct dasd_uid *uid)
541{ 541{
542 struct dasd_eckd_private *private; 542 struct dasd_eckd_private *private;
543 struct dasd_eckd_confdata *confdata; 543 int count;
544 544
545 private = (struct dasd_eckd_private *) device->private; 545 private = (struct dasd_eckd_private *) device->private;
546 if (!private) 546 if (!private)
547 return -ENODEV; 547 return -ENODEV;
548 confdata = &private->conf_data; 548 if (!private->ned || !private->gneq)
549 if (!confdata)
550 return -ENODEV; 549 return -ENODEV;
551 550
552 memset(uid, 0, sizeof(struct dasd_uid)); 551 memset(uid, 0, sizeof(struct dasd_uid));
553 memcpy(uid->vendor, confdata->ned1.HDA_manufacturer, 552 memcpy(uid->vendor, private->ned->HDA_manufacturer,
554 sizeof(uid->vendor) - 1); 553 sizeof(uid->vendor) - 1);
555 EBCASC(uid->vendor, sizeof(uid->vendor) - 1); 554 EBCASC(uid->vendor, sizeof(uid->vendor) - 1);
556 memcpy(uid->serial, confdata->ned1.HDA_location, 555 memcpy(uid->serial, private->ned->HDA_location,
557 sizeof(uid->serial) - 1); 556 sizeof(uid->serial) - 1);
558 EBCASC(uid->serial, sizeof(uid->serial) - 1); 557 EBCASC(uid->serial, sizeof(uid->serial) - 1);
559 uid->ssid = confdata->neq.subsystemID; 558 uid->ssid = private->gneq->subsystemID;
560 uid->real_unit_addr = confdata->ned1.unit_addr; 559 uid->real_unit_addr = private->ned->unit_addr;;
561 if (confdata->ned2.sneq.flags == 0x40 && 560 if (private->sneq) {
562 confdata->ned2.sneq.format == 0x0001) { 561 uid->type = private->sneq->sua_flags;
563 uid->type = confdata->ned2.sneq.sua_flags;
564 if (uid->type == UA_BASE_PAV_ALIAS) 562 if (uid->type == UA_BASE_PAV_ALIAS)
565 uid->base_unit_addr = confdata->ned2.sneq.base_unit_addr; 563 uid->base_unit_addr = private->sneq->base_unit_addr;
566 } else { 564 } else {
567 uid->type = UA_BASE_DEVICE; 565 uid->type = UA_BASE_DEVICE;
568 } 566 }
567 if (private->vdsneq) {
568 for (count = 0; count < 16; count++) {
569 sprintf(uid->vduit+2*count, "%02x",
570 private->vdsneq->uit[count]);
571 }
572 }
569 return 0; 573 return 0;
570} 574}
571 575
@@ -623,6 +627,15 @@ static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
623 ret = -ENOMEM; 627 ret = -ENOMEM;
624 goto out_error; 628 goto out_error;
625 } 629 }
630
631 /*
632 * buffer has to start with EBCDIC "V1.0" to show
633 * support for virtual device SNEQ
634 */
635 rcd_buf[0] = 0xE5;
636 rcd_buf[1] = 0xF1;
637 rcd_buf[2] = 0x4B;
638 rcd_buf[3] = 0xF0;
626 cqr = dasd_eckd_build_rcd_lpm(device, rcd_buf, ciw, lpm); 639 cqr = dasd_eckd_build_rcd_lpm(device, rcd_buf, ciw, lpm);
627 if (IS_ERR(cqr)) { 640 if (IS_ERR(cqr)) {
628 ret = PTR_ERR(cqr); 641 ret = PTR_ERR(cqr);
@@ -646,8 +659,62 @@ out_error:
646 return ret; 659 return ret;
647} 660}
648 661
649static int 662static int dasd_eckd_identify_conf_parts(struct dasd_eckd_private *private)
650dasd_eckd_read_conf(struct dasd_device *device) 663{
664
665 struct dasd_sneq *sneq;
666 int i, count;
667
668 private->ned = NULL;
669 private->sneq = NULL;
670 private->vdsneq = NULL;
671 private->gneq = NULL;
672 count = private->conf_len / sizeof(struct dasd_sneq);
673 sneq = (struct dasd_sneq *)private->conf_data;
674 for (i = 0; i < count; ++i) {
675 if (sneq->flags.identifier == 1 && sneq->format == 1)
676 private->sneq = sneq;
677 else if (sneq->flags.identifier == 1 && sneq->format == 4)
678 private->vdsneq = (struct vd_sneq *)sneq;
679 else if (sneq->flags.identifier == 2)
680 private->gneq = (struct dasd_gneq *)sneq;
681 else if (sneq->flags.identifier == 3 && sneq->res1 == 1)
682 private->ned = (struct dasd_ned *)sneq;
683 sneq++;
684 }
685 if (!private->ned || !private->gneq) {
686 private->ned = NULL;
687 private->sneq = NULL;
688 private->vdsneq = NULL;
689 private->gneq = NULL;
690 return -EINVAL;
691 }
692 return 0;
693
694};
695
696static unsigned char dasd_eckd_path_access(void *conf_data, int conf_len)
697{
698 struct dasd_gneq *gneq;
699 int i, count, found;
700
701 count = conf_len / sizeof(*gneq);
702 gneq = (struct dasd_gneq *)conf_data;
703 found = 0;
704 for (i = 0; i < count; ++i) {
705 if (gneq->flags.identifier == 2) {
706 found = 1;
707 break;
708 }
709 gneq++;
710 }
711 if (found)
712 return ((char *)gneq)[18] & 0x07;
713 else
714 return 0;
715}
716
717static int dasd_eckd_read_conf(struct dasd_device *device)
651{ 718{
652 void *conf_data; 719 void *conf_data;
653 int conf_len, conf_data_saved; 720 int conf_len, conf_data_saved;
@@ -661,7 +728,6 @@ dasd_eckd_read_conf(struct dasd_device *device)
661 path_data->opm = ccw_device_get_path_mask(device->cdev); 728 path_data->opm = ccw_device_get_path_mask(device->cdev);
662 lpm = 0x80; 729 lpm = 0x80;
663 conf_data_saved = 0; 730 conf_data_saved = 0;
664
665 /* get configuration data per operational path */ 731 /* get configuration data per operational path */
666 for (lpm = 0x80; lpm; lpm>>= 1) { 732 for (lpm = 0x80; lpm; lpm>>= 1) {
667 if (lpm & path_data->opm){ 733 if (lpm & path_data->opm){
@@ -678,22 +744,20 @@ dasd_eckd_read_conf(struct dasd_device *device)
678 "data retrieved"); 744 "data retrieved");
679 continue; /* no error */ 745 continue; /* no error */
680 } 746 }
681 if (conf_len != sizeof(struct dasd_eckd_confdata)) {
682 MESSAGE(KERN_WARNING,
683 "sizes of configuration data mismatch"
684 "%d (read) vs %ld (expected)",
685 conf_len,
686 sizeof(struct dasd_eckd_confdata));
687 kfree(conf_data);
688 continue; /* no error */
689 }
690 /* save first valid configuration data */ 747 /* save first valid configuration data */
691 if (!conf_data_saved){ 748 if (!conf_data_saved) {
692 memcpy(&private->conf_data, conf_data, 749 kfree(private->conf_data);
693 sizeof(struct dasd_eckd_confdata)); 750 private->conf_data = conf_data;
751 private->conf_len = conf_len;
752 if (dasd_eckd_identify_conf_parts(private)) {
753 private->conf_data = NULL;
754 private->conf_len = 0;
755 kfree(conf_data);
756 continue;
757 }
694 conf_data_saved++; 758 conf_data_saved++;
695 } 759 }
696 switch (((char *)conf_data)[242] & 0x07){ 760 switch (dasd_eckd_path_access(conf_data, conf_len)) {
697 case 0x02: 761 case 0x02:
698 path_data->npm |= lpm; 762 path_data->npm |= lpm;
699 break; 763 break;
@@ -701,7 +765,8 @@ dasd_eckd_read_conf(struct dasd_device *device)
701 path_data->ppm |= lpm; 765 path_data->ppm |= lpm;
702 break; 766 break;
703 } 767 }
704 kfree(conf_data); 768 if (conf_data != private->conf_data)
769 kfree(conf_data);
705 } 770 }
706 } 771 }
707 return 0; 772 return 0;
@@ -952,6 +1017,7 @@ out_err2:
952 dasd_free_block(device->block); 1017 dasd_free_block(device->block);
953 device->block = NULL; 1018 device->block = NULL;
954out_err1: 1019out_err1:
1020 kfree(private->conf_data);
955 kfree(device->private); 1021 kfree(device->private);
956 device->private = NULL; 1022 device->private = NULL;
957 return rc; 1023 return rc;
@@ -959,7 +1025,17 @@ out_err1:
959 1025
960static void dasd_eckd_uncheck_device(struct dasd_device *device) 1026static void dasd_eckd_uncheck_device(struct dasd_device *device)
961{ 1027{
1028 struct dasd_eckd_private *private;
1029
1030 private = (struct dasd_eckd_private *) device->private;
962 dasd_alias_disconnect_device_from_lcu(device); 1031 dasd_alias_disconnect_device_from_lcu(device);
1032 private->ned = NULL;
1033 private->sneq = NULL;
1034 private->vdsneq = NULL;
1035 private->gneq = NULL;
1036 private->conf_len = 0;
1037 kfree(private->conf_data);
1038 private->conf_data = NULL;
963} 1039}
964 1040
965static struct dasd_ccw_req * 1041static struct dasd_ccw_req *
@@ -1746,9 +1822,10 @@ dasd_eckd_fill_info(struct dasd_device * device,
1746 info->characteristics_size = sizeof(struct dasd_eckd_characteristics); 1822 info->characteristics_size = sizeof(struct dasd_eckd_characteristics);
1747 memcpy(info->characteristics, &private->rdc_data, 1823 memcpy(info->characteristics, &private->rdc_data,
1748 sizeof(struct dasd_eckd_characteristics)); 1824 sizeof(struct dasd_eckd_characteristics));
1749 info->confdata_size = sizeof(struct dasd_eckd_confdata); 1825 info->confdata_size = min((unsigned long)private->conf_len,
1750 memcpy(info->configuration_data, &private->conf_data, 1826 sizeof(info->configuration_data));
1751 sizeof(struct dasd_eckd_confdata)); 1827 memcpy(info->configuration_data, private->conf_data,
1828 info->confdata_size);
1752 return 0; 1829 return 0;
1753} 1830}
1754 1831
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index fc2509c939bc..4bf0aa5112c1 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -231,133 +231,62 @@ struct dasd_eckd_characteristics {
231 __u8 reserved3[10]; 231 __u8 reserved3[10];
232} __attribute__ ((packed)); 232} __attribute__ ((packed));
233 233
234struct dasd_eckd_confdata { 234/* elements of the configuration data */
235struct dasd_ned {
235 struct { 236 struct {
236 struct { 237 __u8 identifier:2;
237 unsigned char identifier:2; 238 __u8 token_id:1;
238 unsigned char token_id:1; 239 __u8 sno_valid:1;
239 unsigned char sno_valid:1; 240 __u8 subst_sno:1;
240 unsigned char subst_sno:1; 241 __u8 recNED:1;
241 unsigned char recNED:1; 242 __u8 emuNED:1;
242 unsigned char emuNED:1; 243 __u8 reserved:1;
243 unsigned char reserved:1; 244 } __attribute__ ((packed)) flags;
244 } __attribute__ ((packed)) flags; 245 __u8 descriptor;
245 __u8 descriptor; 246 __u8 dev_class;
246 __u8 dev_class; 247 __u8 reserved;
247 __u8 reserved; 248 __u8 dev_type[6];
248 unsigned char dev_type[6]; 249 __u8 dev_model[3];
249 unsigned char dev_model[3]; 250 __u8 HDA_manufacturer[3];
250 unsigned char HDA_manufacturer[3]; 251 __u8 HDA_location[2];
251 unsigned char HDA_location[2]; 252 __u8 HDA_seqno[12];
252 unsigned char HDA_seqno[12]; 253 __u8 ID;
253 __u8 ID; 254 __u8 unit_addr;
254 __u8 unit_addr; 255} __attribute__ ((packed));
255 } __attribute__ ((packed)) ned1; 256
256 union { 257struct dasd_sneq {
257 struct {
258 struct {
259 unsigned char identifier:2;
260 unsigned char token_id:1;
261 unsigned char sno_valid:1;
262 unsigned char subst_sno:1;
263 unsigned char recNED:1;
264 unsigned char emuNED:1;
265 unsigned char reserved:1;
266 } __attribute__ ((packed)) flags;
267 __u8 descriptor;
268 __u8 reserved[2];
269 unsigned char dev_type[6];
270 unsigned char dev_model[3];
271 unsigned char DASD_manufacturer[3];
272 unsigned char DASD_location[2];
273 unsigned char DASD_seqno[12];
274 __u16 ID;
275 } __attribute__ ((packed)) ned;
276 struct {
277 unsigned char flags; /* byte 0 */
278 unsigned char res1; /* byte 1 */
279 __u16 format; /* byte 2-3 */
280 unsigned char res2[4]; /* byte 4-7 */
281 unsigned char sua_flags; /* byte 8 */
282 __u8 base_unit_addr; /* byte 9 */
283 unsigned char res3[22]; /* byte 10-31 */
284 } __attribute__ ((packed)) sneq;
285 } __attribute__ ((packed)) ned2;
286 struct { 258 struct {
287 struct { 259 __u8 identifier:2;
288 unsigned char identifier:2; 260 __u8 reserved:6;
289 unsigned char token_id:1; 261 } __attribute__ ((packed)) flags;
290 unsigned char sno_valid:1; 262 __u8 res1;
291 unsigned char subst_sno:1; 263 __u16 format;
292 unsigned char recNED:1; 264 __u8 res2[4]; /* byte 4- 7 */
293 unsigned char emuNED:1; 265 __u8 sua_flags; /* byte 8 */
294 unsigned char reserved:1; 266 __u8 base_unit_addr; /* byte 9 */
295 } __attribute__ ((packed)) flags; 267 __u8 res3[22]; /* byte 10-31 */
296 __u8 descriptor; 268} __attribute__ ((packed));
297 __u8 reserved[2]; 269
298 unsigned char cont_type[6]; 270struct vd_sneq {
299 unsigned char cont_model[3];
300 unsigned char cont_manufacturer[3];
301 unsigned char cont_location[2];
302 unsigned char cont_seqno[12];
303 __u16 ID;
304 } __attribute__ ((packed)) ned3;
305 struct { 271 struct {
306 struct { 272 __u8 identifier:2;
307 unsigned char identifier:2; 273 __u8 reserved:6;
308 unsigned char token_id:1; 274 } __attribute__ ((packed)) flags;
309 unsigned char sno_valid:1; 275 __u8 res1;
310 unsigned char subst_sno:1; 276 __u16 format;
311 unsigned char recNED:1; 277 __u8 res2[4]; /* byte 4- 7 */
312 unsigned char emuNED:1; 278 __u8 uit[16]; /* byte 8-23 */
313 unsigned char reserved:1; 279 __u8 res3[8]; /* byte 24-31 */
314 } __attribute__ ((packed)) flags; 280} __attribute__ ((packed));
315 __u8 descriptor; 281
316 __u8 reserved[2]; 282struct dasd_gneq {
317 unsigned char cont_type[6];
318 unsigned char empty[3];
319 unsigned char cont_manufacturer[3];
320 unsigned char cont_location[2];
321 unsigned char cont_seqno[12];
322 __u16 ID;
323 } __attribute__ ((packed)) ned4;
324 unsigned char ned5[32];
325 unsigned char ned6[32];
326 unsigned char ned7[32];
327 struct { 283 struct {
328 struct { 284 __u8 identifier:2;
329 unsigned char identifier:2; 285 __u8 reserved:6;
330 unsigned char reserved:6; 286 } __attribute__ ((packed)) flags;
331 } __attribute__ ((packed)) flags; 287 __u8 reserved[7];
332 __u8 selector; 288 __u16 subsystemID;
333 __u16 interfaceID; 289 __u8 reserved2[22];
334 __u32 reserved;
335 __u16 subsystemID;
336 struct {
337 unsigned char sp0:1;
338 unsigned char sp1:1;
339 unsigned char reserved:5;
340 unsigned char scluster:1;
341 } __attribute__ ((packed)) spathID;
342 __u8 unit_address;
343 __u8 dev_ID;
344 __u8 dev_address;
345 __u8 adapterID;
346 __u16 link_address;
347 struct {
348 unsigned char parallel:1;
349 unsigned char escon:1;
350 unsigned char reserved:1;
351 unsigned char ficon:1;
352 unsigned char reserved2:4;
353 } __attribute__ ((packed)) protocol_type;
354 struct {
355 unsigned char PID_in_236:1;
356 unsigned char reserved:7;
357 } __attribute__ ((packed)) format_flags;
358 __u8 log_dev_address;
359 unsigned char reserved2[12];
360 } __attribute__ ((packed)) neq;
361} __attribute__ ((packed)); 290} __attribute__ ((packed));
362 291
363struct dasd_eckd_path { 292struct dasd_eckd_path {
@@ -463,7 +392,14 @@ struct alias_pav_group {
463 392
464struct dasd_eckd_private { 393struct dasd_eckd_private {
465 struct dasd_eckd_characteristics rdc_data; 394 struct dasd_eckd_characteristics rdc_data;
466 struct dasd_eckd_confdata conf_data; 395 u8 *conf_data;
396 int conf_len;
397 /* pointers to specific parts in the conf_data */
398 struct dasd_ned *ned;
399 struct dasd_sneq *sneq;
400 struct vd_sneq *vdsneq;
401 struct dasd_gneq *gneq;
402
467 struct dasd_eckd_path path_data; 403 struct dasd_eckd_path path_data;
468 struct eckd_count count_area[5]; 404 struct eckd_count count_area[5];
469 int init_cqr_status; 405 int init_cqr_status;
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index fb2f931cf844..31ecaa4a40e4 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -307,6 +307,7 @@ struct dasd_uid {
307 __u16 ssid; 307 __u16 ssid;
308 __u8 real_unit_addr; 308 __u8 real_unit_addr;
309 __u8 base_unit_addr; 309 __u8 base_unit_addr;
310 char vduit[33];
310}; 311};
311 312
312/* 313/*
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 3c8b25e6c345..1fd8f2193ed8 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -399,6 +399,7 @@ sclp_tod_from_jiffies(unsigned long jiffies)
399void 399void
400sclp_sync_wait(void) 400sclp_sync_wait(void)
401{ 401{
402 unsigned long long old_tick;
402 unsigned long flags; 403 unsigned long flags;
403 unsigned long cr0, cr0_sync; 404 unsigned long cr0, cr0_sync;
404 u64 timeout; 405 u64 timeout;
@@ -419,11 +420,12 @@ sclp_sync_wait(void)
419 if (!irq_context) 420 if (!irq_context)
420 local_bh_disable(); 421 local_bh_disable();
421 /* Enable service-signal interruption, disable timer interrupts */ 422 /* Enable service-signal interruption, disable timer interrupts */
423 old_tick = local_tick_disable();
422 trace_hardirqs_on(); 424 trace_hardirqs_on();
423 __ctl_store(cr0, 0, 0); 425 __ctl_store(cr0, 0, 0);
424 cr0_sync = cr0; 426 cr0_sync = cr0;
427 cr0_sync &= 0xffff00a0;
425 cr0_sync |= 0x00000200; 428 cr0_sync |= 0x00000200;
426 cr0_sync &= 0xFFFFF3AC;
427 __ctl_load(cr0_sync, 0, 0); 429 __ctl_load(cr0_sync, 0, 0);
428 __raw_local_irq_stosm(0x01); 430 __raw_local_irq_stosm(0x01);
429 /* Loop until driver state indicates finished request */ 431 /* Loop until driver state indicates finished request */
@@ -439,9 +441,9 @@ sclp_sync_wait(void)
439 __ctl_load(cr0, 0, 0); 441 __ctl_load(cr0, 0, 0);
440 if (!irq_context) 442 if (!irq_context)
441 _local_bh_enable(); 443 _local_bh_enable();
444 local_tick_enable(old_tick);
442 local_irq_restore(flags); 445 local_irq_restore(flags);
443} 446}
444
445EXPORT_SYMBOL(sclp_sync_wait); 447EXPORT_SYMBOL(sclp_sync_wait);
446 448
447/* Dispatch changes in send and receive mask to registered listeners. */ 449/* Dispatch changes in send and receive mask to registered listeners. */
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 0c2b77493db4..eb5f1b8bc57f 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -427,6 +427,8 @@ static int sclp_mem_notifier(struct notifier_block *nb,
427 sclp_attach_storage(id); 427 sclp_attach_storage(id);
428 switch (action) { 428 switch (action) {
429 case MEM_ONLINE: 429 case MEM_ONLINE:
430 case MEM_GOING_OFFLINE:
431 case MEM_CANCEL_OFFLINE:
430 break; 432 break;
431 case MEM_GOING_ONLINE: 433 case MEM_GOING_ONLINE:
432 rc = sclp_mem_change_state(start, size, 1); 434 rc = sclp_mem_change_state(start, size, 1);
@@ -434,6 +436,9 @@ static int sclp_mem_notifier(struct notifier_block *nb,
434 case MEM_CANCEL_ONLINE: 436 case MEM_CANCEL_ONLINE:
435 sclp_mem_change_state(start, size, 0); 437 sclp_mem_change_state(start, size, 0);
436 break; 438 break;
439 case MEM_OFFLINE:
440 sclp_mem_change_state(start, size, 0);
441 break;
437 default: 442 default:
438 rc = -EINVAL; 443 rc = -EINVAL;
439 break; 444 break;
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
index fff4ff485d9b..4cebd6ee6d27 100644
--- a/drivers/s390/char/sclp_config.c
+++ b/drivers/s390/char/sclp_config.c
@@ -8,7 +8,6 @@
8#include <linux/init.h> 8#include <linux/init.h>
9#include <linux/errno.h> 9#include <linux/errno.h>
10#include <linux/cpu.h> 10#include <linux/cpu.h>
11#include <linux/kthread.h>
12#include <linux/sysdev.h> 11#include <linux/sysdev.h>
13#include <linux/workqueue.h> 12#include <linux/workqueue.h>
14#include <asm/smp.h> 13#include <asm/smp.h>
@@ -41,19 +40,9 @@ static void sclp_cpu_capability_notify(struct work_struct *work)
41 put_online_cpus(); 40 put_online_cpus();
42} 41}
43 42
44static int sclp_cpu_kthread(void *data)
45{
46 smp_rescan_cpus();
47 return 0;
48}
49
50static void __ref sclp_cpu_change_notify(struct work_struct *work) 43static void __ref sclp_cpu_change_notify(struct work_struct *work)
51{ 44{
52 /* Can't call smp_rescan_cpus() from workqueue context since it may 45 smp_rescan_cpus();
53 * deadlock in case of cpu hotplug. So we have to create a kernel
54 * thread in order to call it.
55 */
56 kthread_run(sclp_cpu_kthread, NULL, "cpu_rescan");
57} 46}
58 47
59static void sclp_conf_receiver_fn(struct evbuf_header *evbuf) 48static void sclp_conf_receiver_fn(struct evbuf_header *evbuf)
diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c
index ef7bc0a125ef..cf8f24a4b5eb 100644
--- a/drivers/s390/cio/idset.c
+++ b/drivers/s390/cio/idset.c
@@ -5,7 +5,7 @@
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> 5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */ 6 */
7 7
8#include <linux/slab.h> 8#include <linux/vmalloc.h>
9#include <linux/bitops.h> 9#include <linux/bitops.h>
10#include "idset.h" 10#include "idset.h"
11#include "css.h" 11#include "css.h"
@@ -25,18 +25,18 @@ static struct idset *idset_new(int num_ssid, int num_id)
25{ 25{
26 struct idset *set; 26 struct idset *set;
27 27
28 set = kzalloc(sizeof(struct idset) + bitmap_size(num_ssid, num_id), 28 set = vmalloc(sizeof(struct idset) + bitmap_size(num_ssid, num_id));
29 GFP_KERNEL);
30 if (set) { 29 if (set) {
31 set->num_ssid = num_ssid; 30 set->num_ssid = num_ssid;
32 set->num_id = num_id; 31 set->num_id = num_id;
32 memset(set->bitmap, 0, bitmap_size(num_ssid, num_id));
33 } 33 }
34 return set; 34 return set;
35} 35}
36 36
37void idset_free(struct idset *set) 37void idset_free(struct idset *set)
38{ 38{
39 kfree(set); 39 vfree(set);
40} 40}
41 41
42void idset_clear(struct idset *set) 42void idset_clear(struct idset *set)
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index d10c73cc1688..d15648514a0f 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -1355,7 +1355,7 @@ int qdio_allocate(struct qdio_initialize *init_data)
1355 goto out_rel; 1355 goto out_rel;
1356 1356
1357 /* qdr is used in ccw1.cda which is u32 */ 1357 /* qdr is used in ccw1.cda which is u32 */
1358 irq_ptr->qdr = kzalloc(sizeof(struct qdr), GFP_KERNEL | GFP_DMA); 1358 irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1359 if (!irq_ptr->qdr) 1359 if (!irq_ptr->qdr)
1360 goto out_rel; 1360 goto out_rel;
1361 WARN_ON((unsigned long)irq_ptr->qdr & 0xfff); 1361 WARN_ON((unsigned long)irq_ptr->qdr & 0xfff);
diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c
index ea01b85b1cc9..ec5c4a414235 100644
--- a/drivers/s390/cio/qdio_perf.c
+++ b/drivers/s390/cio/qdio_perf.c
@@ -142,7 +142,7 @@ int __init qdio_setup_perf_stats(void)
142 return 0; 142 return 0;
143} 143}
144 144
145void __exit qdio_remove_perf_stats(void) 145void qdio_remove_perf_stats(void)
146{ 146{
147#ifdef CONFIG_PROC_FS 147#ifdef CONFIG_PROC_FS
148 remove_proc_entry("qdio_perf", NULL); 148 remove_proc_entry("qdio_perf", NULL);
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index f0923a8aceda..1bd2a208db28 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -325,7 +325,7 @@ void qdio_release_memory(struct qdio_irq *irq_ptr)
325 kmem_cache_free(qdio_q_cache, q); 325 kmem_cache_free(qdio_q_cache, q);
326 } 326 }
327 } 327 }
328 kfree(irq_ptr->qdr); 328 free_page((unsigned long) irq_ptr->qdr);
329 free_page(irq_ptr->chsc_page); 329 free_page(irq_ptr->chsc_page);
330 free_page((unsigned long) irq_ptr); 330 free_page((unsigned long) irq_ptr);
331} 331}
@@ -515,7 +515,7 @@ int __init qdio_setup_init(void)
515 return 0; 515 return 0;
516} 516}
517 517
518void __exit qdio_setup_exit(void) 518void qdio_setup_exit(void)
519{ 519{
520 kmem_cache_destroy(qdio_q_cache); 520 kmem_cache_destroy(qdio_q_cache);
521} 521}
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index 79954bd6bfa5..292b60da6dc7 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -352,7 +352,7 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
352 return len; 352 return len;
353} 353}
354 354
355void s390_virtio_console_init(void) 355void __init s390_virtio_console_init(void)
356{ 356{
357 virtio_cons_early_init(early_put_chars); 357 virtio_cons_early_init(early_put_chars);
358} 358}
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 1895dbb553cd..80971c21ea1a 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -419,6 +419,7 @@ struct qeth_qdio_out_buffer {
419 int next_element_to_fill; 419 int next_element_to_fill;
420 struct sk_buff_head skb_list; 420 struct sk_buff_head skb_list;
421 struct list_head ctx_list; 421 struct list_head ctx_list;
422 int is_header[16];
422}; 423};
423 424
424struct qeth_card; 425struct qeth_card;
@@ -785,7 +786,7 @@ void qeth_core_remove_osn_attributes(struct device *);
785 786
786/* exports for qeth discipline device drivers */ 787/* exports for qeth discipline device drivers */
787extern struct qeth_card_list_struct qeth_core_card_list; 788extern struct qeth_card_list_struct qeth_core_card_list;
788 789extern struct kmem_cache *qeth_core_header_cache;
789extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS]; 790extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS];
790 791
791void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int); 792void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int);
@@ -843,7 +844,7 @@ int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int);
843int qeth_get_elements_no(struct qeth_card *, void *, struct sk_buff *, int); 844int qeth_get_elements_no(struct qeth_card *, void *, struct sk_buff *, int);
844int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *, 845int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *,
845 struct sk_buff *, struct qeth_hdr *, int, 846 struct sk_buff *, struct qeth_hdr *, int,
846 struct qeth_eddp_context *); 847 struct qeth_eddp_context *, int, int);
847int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *, 848int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *,
848 struct sk_buff *, struct qeth_hdr *, 849 struct sk_buff *, struct qeth_hdr *,
849 int, struct qeth_eddp_context *); 850 int, struct qeth_eddp_context *);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index cebb25e36e82..bd420d1b9a0d 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -19,8 +19,8 @@
19#include <linux/mii.h> 19#include <linux/mii.h>
20#include <linux/kthread.h> 20#include <linux/kthread.h>
21 21
22#include <asm-s390/ebcdic.h> 22#include <asm/ebcdic.h>
23#include <asm-s390/io.h> 23#include <asm/io.h>
24#include <asm/s390_rdev.h> 24#include <asm/s390_rdev.h>
25 25
26#include "qeth_core.h" 26#include "qeth_core.h"
@@ -48,6 +48,8 @@ EXPORT_SYMBOL_GPL(qeth_dbf);
48 48
49struct qeth_card_list_struct qeth_core_card_list; 49struct qeth_card_list_struct qeth_core_card_list;
50EXPORT_SYMBOL_GPL(qeth_core_card_list); 50EXPORT_SYMBOL_GPL(qeth_core_card_list);
51struct kmem_cache *qeth_core_header_cache;
52EXPORT_SYMBOL_GPL(qeth_core_header_cache);
51 53
52static struct device *qeth_core_root_dev; 54static struct device *qeth_core_root_dev;
53static unsigned int known_devices[][10] = QETH_MODELLIST_ARRAY; 55static unsigned int known_devices[][10] = QETH_MODELLIST_ARRAY;
@@ -933,6 +935,10 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
933 } 935 }
934 qeth_eddp_buf_release_contexts(buf); 936 qeth_eddp_buf_release_contexts(buf);
935 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) { 937 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) {
938 if (buf->buffer->element[i].addr && buf->is_header[i])
939 kmem_cache_free(qeth_core_header_cache,
940 buf->buffer->element[i].addr);
941 buf->is_header[i] = 0;
936 buf->buffer->element[i].length = 0; 942 buf->buffer->element[i].length = 0;
937 buf->buffer->element[i].addr = NULL; 943 buf->buffer->element[i].addr = NULL;
938 buf->buffer->element[i].flags = 0; 944 buf->buffer->element[i].flags = 0;
@@ -3002,8 +3008,8 @@ int qeth_get_elements_no(struct qeth_card *card, void *hdr,
3002 if (skb_shinfo(skb)->nr_frags > 0) 3008 if (skb_shinfo(skb)->nr_frags > 0)
3003 elements_needed = (skb_shinfo(skb)->nr_frags + 1); 3009 elements_needed = (skb_shinfo(skb)->nr_frags + 1);
3004 if (elements_needed == 0) 3010 if (elements_needed == 0)
3005 elements_needed = 1 + (((((unsigned long) hdr) % PAGE_SIZE) 3011 elements_needed = 1 + (((((unsigned long) skb->data) %
3006 + skb->len) >> PAGE_SHIFT); 3012 PAGE_SIZE) + skb->len) >> PAGE_SHIFT);
3007 if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { 3013 if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
3008 QETH_DBF_MESSAGE(2, "Invalid size of IP packet " 3014 QETH_DBF_MESSAGE(2, "Invalid size of IP packet "
3009 "(Number=%d / Length=%d). Discarded.\n", 3015 "(Number=%d / Length=%d). Discarded.\n",
@@ -3015,7 +3021,8 @@ int qeth_get_elements_no(struct qeth_card *card, void *hdr,
3015EXPORT_SYMBOL_GPL(qeth_get_elements_no); 3021EXPORT_SYMBOL_GPL(qeth_get_elements_no);
3016 3022
3017static inline void __qeth_fill_buffer(struct sk_buff *skb, 3023static inline void __qeth_fill_buffer(struct sk_buff *skb,
3018 struct qdio_buffer *buffer, int is_tso, int *next_element_to_fill) 3024 struct qdio_buffer *buffer, int is_tso, int *next_element_to_fill,
3025 int offset)
3019{ 3026{
3020 int length = skb->len; 3027 int length = skb->len;
3021 int length_here; 3028 int length_here;
@@ -3027,6 +3034,11 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb,
3027 data = skb->data; 3034 data = skb->data;
3028 first_lap = (is_tso == 0 ? 1 : 0); 3035 first_lap = (is_tso == 0 ? 1 : 0);
3029 3036
3037 if (offset >= 0) {
3038 data = skb->data + offset;
3039 first_lap = 0;
3040 }
3041
3030 while (length > 0) { 3042 while (length > 0) {
3031 /* length_here is the remaining amount of data in this page */ 3043 /* length_here is the remaining amount of data in this page */
3032 length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE); 3044 length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE);
@@ -3058,22 +3070,22 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb,
3058} 3070}
3059 3071
3060static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue, 3072static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
3061 struct qeth_qdio_out_buffer *buf, struct sk_buff *skb) 3073 struct qeth_qdio_out_buffer *buf, struct sk_buff *skb,
3074 struct qeth_hdr *hdr, int offset, int hd_len)
3062{ 3075{
3063 struct qdio_buffer *buffer; 3076 struct qdio_buffer *buffer;
3064 struct qeth_hdr_tso *hdr;
3065 int flush_cnt = 0, hdr_len, large_send = 0; 3077 int flush_cnt = 0, hdr_len, large_send = 0;
3066 3078
3067 buffer = buf->buffer; 3079 buffer = buf->buffer;
3068 atomic_inc(&skb->users); 3080 atomic_inc(&skb->users);
3069 skb_queue_tail(&buf->skb_list, skb); 3081 skb_queue_tail(&buf->skb_list, skb);
3070 3082
3071 hdr = (struct qeth_hdr_tso *) skb->data;
3072 /*check first on TSO ....*/ 3083 /*check first on TSO ....*/
3073 if (hdr->hdr.hdr.l3.id == QETH_HEADER_TYPE_TSO) { 3084 if (hdr->hdr.l3.id == QETH_HEADER_TYPE_TSO) {
3074 int element = buf->next_element_to_fill; 3085 int element = buf->next_element_to_fill;
3075 3086
3076 hdr_len = sizeof(struct qeth_hdr_tso) + hdr->ext.dg_hdr_len; 3087 hdr_len = sizeof(struct qeth_hdr_tso) +
3088 ((struct qeth_hdr_tso *)hdr)->ext.dg_hdr_len;
3077 /*fill first buffer entry only with header information */ 3089 /*fill first buffer entry only with header information */
3078 buffer->element[element].addr = skb->data; 3090 buffer->element[element].addr = skb->data;
3079 buffer->element[element].length = hdr_len; 3091 buffer->element[element].length = hdr_len;
@@ -3083,9 +3095,20 @@ static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
3083 skb->len -= hdr_len; 3095 skb->len -= hdr_len;
3084 large_send = 1; 3096 large_send = 1;
3085 } 3097 }
3098
3099 if (offset >= 0) {
3100 int element = buf->next_element_to_fill;
3101 buffer->element[element].addr = hdr;
3102 buffer->element[element].length = sizeof(struct qeth_hdr) +
3103 hd_len;
3104 buffer->element[element].flags = SBAL_FLAGS_FIRST_FRAG;
3105 buf->is_header[element] = 1;
3106 buf->next_element_to_fill++;
3107 }
3108
3086 if (skb_shinfo(skb)->nr_frags == 0) 3109 if (skb_shinfo(skb)->nr_frags == 0)
3087 __qeth_fill_buffer(skb, buffer, large_send, 3110 __qeth_fill_buffer(skb, buffer, large_send,
3088 (int *)&buf->next_element_to_fill); 3111 (int *)&buf->next_element_to_fill, offset);
3089 else 3112 else
3090 __qeth_fill_buffer_frag(skb, buffer, large_send, 3113 __qeth_fill_buffer_frag(skb, buffer, large_send,
3091 (int *)&buf->next_element_to_fill); 3114 (int *)&buf->next_element_to_fill);
@@ -3115,7 +3138,7 @@ static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
3115int qeth_do_send_packet_fast(struct qeth_card *card, 3138int qeth_do_send_packet_fast(struct qeth_card *card,
3116 struct qeth_qdio_out_q *queue, struct sk_buff *skb, 3139 struct qeth_qdio_out_q *queue, struct sk_buff *skb,
3117 struct qeth_hdr *hdr, int elements_needed, 3140 struct qeth_hdr *hdr, int elements_needed,
3118 struct qeth_eddp_context *ctx) 3141 struct qeth_eddp_context *ctx, int offset, int hd_len)
3119{ 3142{
3120 struct qeth_qdio_out_buffer *buffer; 3143 struct qeth_qdio_out_buffer *buffer;
3121 int buffers_needed = 0; 3144 int buffers_needed = 0;
@@ -3148,7 +3171,7 @@ int qeth_do_send_packet_fast(struct qeth_card *card,
3148 } 3171 }
3149 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); 3172 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3150 if (ctx == NULL) { 3173 if (ctx == NULL) {
3151 qeth_fill_buffer(queue, buffer, skb); 3174 qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len);
3152 qeth_flush_buffers(queue, index, 1); 3175 qeth_flush_buffers(queue, index, 1);
3153 } else { 3176 } else {
3154 flush_cnt = qeth_eddp_fill_buffer(queue, ctx, index); 3177 flush_cnt = qeth_eddp_fill_buffer(queue, ctx, index);
@@ -3224,7 +3247,7 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
3224 } 3247 }
3225 } 3248 }
3226 if (ctx == NULL) 3249 if (ctx == NULL)
3227 tmp = qeth_fill_buffer(queue, buffer, skb); 3250 tmp = qeth_fill_buffer(queue, buffer, skb, hdr, -1, 0);
3228 else { 3251 else {
3229 tmp = qeth_eddp_fill_buffer(queue, ctx, 3252 tmp = qeth_eddp_fill_buffer(queue, ctx,
3230 queue->next_buf_to_fill); 3253 queue->next_buf_to_fill);
@@ -4443,8 +4466,17 @@ static int __init qeth_core_init(void)
4443 rc = IS_ERR(qeth_core_root_dev) ? PTR_ERR(qeth_core_root_dev) : 0; 4466 rc = IS_ERR(qeth_core_root_dev) ? PTR_ERR(qeth_core_root_dev) : 0;
4444 if (rc) 4467 if (rc)
4445 goto register_err; 4468 goto register_err;
4446 return 0;
4447 4469
4470 qeth_core_header_cache = kmem_cache_create("qeth_hdr",
4471 sizeof(struct qeth_hdr) + ETH_HLEN, 64, 0, NULL);
4472 if (!qeth_core_header_cache) {
4473 rc = -ENOMEM;
4474 goto slab_err;
4475 }
4476
4477 return 0;
4478slab_err:
4479 s390_root_dev_unregister(qeth_core_root_dev);
4448register_err: 4480register_err:
4449 driver_remove_file(&qeth_core_ccwgroup_driver.driver, 4481 driver_remove_file(&qeth_core_ccwgroup_driver.driver,
4450 &driver_attr_group); 4482 &driver_attr_group);
@@ -4466,6 +4498,7 @@ static void __exit qeth_core_exit(void)
4466 &driver_attr_group); 4498 &driver_attr_group);
4467 ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver); 4499 ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
4468 ccw_driver_unregister(&qeth_ccw_driver); 4500 ccw_driver_unregister(&qeth_ccw_driver);
4501 kmem_cache_destroy(qeth_core_header_cache);
4469 qeth_unregister_dbf_views(); 4502 qeth_unregister_dbf_views();
4470 PRINT_INFO("core functions removed\n"); 4503 PRINT_INFO("core functions removed\n");
4471} 4504}
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index a8b069cd9a4c..b3cee032f578 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -243,8 +243,7 @@ static void qeth_l2_get_packet_type(struct qeth_card *card,
243static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, 243static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
244 struct sk_buff *skb, int ipv, int cast_type) 244 struct sk_buff *skb, int ipv, int cast_type)
245{ 245{
246 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)((skb->data) + 246 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb);
247 QETH_HEADER_SIZE);
248 247
249 memset(hdr, 0, sizeof(struct qeth_hdr)); 248 memset(hdr, 0, sizeof(struct qeth_hdr));
250 hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2; 249 hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2;
@@ -621,6 +620,9 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
621 int tx_bytes = skb->len; 620 int tx_bytes = skb->len;
622 enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO; 621 enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO;
623 struct qeth_eddp_context *ctx = NULL; 622 struct qeth_eddp_context *ctx = NULL;
623 int data_offset = -1;
624 int elements_needed = 0;
625 int hd_len = 0;
624 626
625 if ((card->state != CARD_STATE_UP) || !card->lan_online) { 627 if ((card->state != CARD_STATE_UP) || !card->lan_online) {
626 card->stats.tx_carrier_errors++; 628 card->stats.tx_carrier_errors++;
@@ -643,13 +645,32 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
643 if (card->info.type == QETH_CARD_TYPE_OSN) 645 if (card->info.type == QETH_CARD_TYPE_OSN)
644 hdr = (struct qeth_hdr *)skb->data; 646 hdr = (struct qeth_hdr *)skb->data;
645 else { 647 else {
646 /* create a clone with writeable headroom */ 648 if ((card->info.type == QETH_CARD_TYPE_IQD) && (!large_send) &&
647 new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr)); 649 (skb_shinfo(skb)->nr_frags == 0)) {
648 if (!new_skb) 650 new_skb = skb;
649 goto tx_drop; 651 data_offset = ETH_HLEN;
650 hdr = (struct qeth_hdr *)skb_push(new_skb, 652 hd_len = ETH_HLEN;
653 hdr = kmem_cache_alloc(qeth_core_header_cache,
654 GFP_ATOMIC);
655 if (!hdr)
656 goto tx_drop;
657 elements_needed++;
658 skb_reset_mac_header(new_skb);
659 qeth_l2_fill_header(card, hdr, new_skb, ipv, cast_type);
660 hdr->hdr.l2.pkt_length = new_skb->len;
661 memcpy(((char *)hdr) + sizeof(struct qeth_hdr),
662 skb_mac_header(new_skb), ETH_HLEN);
663 } else {
664 /* create a clone with writeable headroom */
665 new_skb = skb_realloc_headroom(skb,
666 sizeof(struct qeth_hdr));
667 if (!new_skb)
668 goto tx_drop;
669 hdr = (struct qeth_hdr *)skb_push(new_skb,
651 sizeof(struct qeth_hdr)); 670 sizeof(struct qeth_hdr));
652 qeth_l2_fill_header(card, hdr, new_skb, ipv, cast_type); 671 skb_set_mac_header(new_skb, sizeof(struct qeth_hdr));
672 qeth_l2_fill_header(card, hdr, new_skb, ipv, cast_type);
673 }
653 } 674 }
654 675
655 if (large_send == QETH_LARGE_SEND_EDDP) { 676 if (large_send == QETH_LARGE_SEND_EDDP) {
@@ -660,9 +681,13 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
660 goto tx_drop; 681 goto tx_drop;
661 } 682 }
662 } else { 683 } else {
663 elements = qeth_get_elements_no(card, (void *)hdr, new_skb, 0); 684 elements = qeth_get_elements_no(card, (void *)hdr, new_skb,
664 if (!elements) 685 elements_needed);
686 if (!elements) {
687 if (data_offset >= 0)
688 kmem_cache_free(qeth_core_header_cache, hdr);
665 goto tx_drop; 689 goto tx_drop;
690 }
666 } 691 }
667 692
668 if ((large_send == QETH_LARGE_SEND_NO) && 693 if ((large_send == QETH_LARGE_SEND_NO) &&
@@ -674,7 +699,7 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
674 elements, ctx); 699 elements, ctx);
675 else 700 else
676 rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr, 701 rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
677 elements, ctx); 702 elements, ctx, data_offset, hd_len);
678 if (!rc) { 703 if (!rc) {
679 card->stats.tx_packets++; 704 card->stats.tx_packets++;
680 card->stats.tx_bytes += tx_bytes; 705 card->stats.tx_bytes += tx_bytes;
@@ -701,6 +726,9 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
701 if (ctx != NULL) 726 if (ctx != NULL)
702 qeth_eddp_put_context(ctx); 727 qeth_eddp_put_context(ctx);
703 728
729 if (data_offset >= 0)
730 kmem_cache_free(qeth_core_header_cache, hdr);
731
704 if (rc == -EBUSY) { 732 if (rc == -EBUSY) {
705 if (new_skb != skb) 733 if (new_skb != skb)
706 dev_kfree_skb_any(new_skb); 734 dev_kfree_skb_any(new_skb);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 3e1d13857350..dd72c3c20165 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2604,6 +2604,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2604 int tx_bytes = skb->len; 2604 int tx_bytes = skb->len;
2605 enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO; 2605 enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO;
2606 struct qeth_eddp_context *ctx = NULL; 2606 struct qeth_eddp_context *ctx = NULL;
2607 int data_offset = -1;
2607 2608
2608 if ((card->info.type == QETH_CARD_TYPE_IQD) && 2609 if ((card->info.type == QETH_CARD_TYPE_IQD) &&
2609 (skb->protocol != htons(ETH_P_IPV6)) && 2610 (skb->protocol != htons(ETH_P_IPV6)) &&
@@ -2624,14 +2625,28 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2624 card->perf_stats.outbound_start_time = qeth_get_micros(); 2625 card->perf_stats.outbound_start_time = qeth_get_micros();
2625 } 2626 }
2626 2627
2627 /* create a clone with writeable headroom */ 2628 if (skb_is_gso(skb))
2628 new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr_tso) + 2629 large_send = card->options.large_send;
2629 VLAN_HLEN); 2630
2630 if (!new_skb) 2631 if ((card->info.type == QETH_CARD_TYPE_IQD) && (!large_send) &&
2631 goto tx_drop; 2632 (skb_shinfo(skb)->nr_frags == 0)) {
2633 new_skb = skb;
2634 data_offset = ETH_HLEN;
2635 hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
2636 if (!hdr)
2637 goto tx_drop;
2638 elements_needed++;
2639 } else {
2640 /* create a clone with writeable headroom */
2641 new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr_tso)
2642 + VLAN_HLEN);
2643 if (!new_skb)
2644 goto tx_drop;
2645 }
2632 2646
2633 if (card->info.type == QETH_CARD_TYPE_IQD) { 2647 if (card->info.type == QETH_CARD_TYPE_IQD) {
2634 skb_pull(new_skb, ETH_HLEN); 2648 if (data_offset < 0)
2649 skb_pull(new_skb, ETH_HLEN);
2635 } else { 2650 } else {
2636 if (new_skb->protocol == htons(ETH_P_IP)) { 2651 if (new_skb->protocol == htons(ETH_P_IP)) {
2637 if (card->dev->type == ARPHRD_IEEE802_TR) 2652 if (card->dev->type == ARPHRD_IEEE802_TR)
@@ -2657,9 +2672,6 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2657 2672
2658 netif_stop_queue(dev); 2673 netif_stop_queue(dev);
2659 2674
2660 if (skb_is_gso(new_skb))
2661 large_send = card->options.large_send;
2662
2663 /* fix hardware limitation: as long as we do not have sbal 2675 /* fix hardware limitation: as long as we do not have sbal
2664 * chaining we can not send long frag lists so we temporary 2676 * chaining we can not send long frag lists so we temporary
2665 * switch to EDDP 2677 * switch to EDDP
@@ -2677,9 +2689,16 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2677 qeth_tso_fill_header(card, hdr, new_skb); 2689 qeth_tso_fill_header(card, hdr, new_skb);
2678 elements_needed++; 2690 elements_needed++;
2679 } else { 2691 } else {
2680 hdr = (struct qeth_hdr *)skb_push(new_skb, 2692 if (data_offset < 0) {
2693 hdr = (struct qeth_hdr *)skb_push(new_skb,
2681 sizeof(struct qeth_hdr)); 2694 sizeof(struct qeth_hdr));
2682 qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type); 2695 qeth_l3_fill_header(card, hdr, new_skb, ipv,
2696 cast_type);
2697 } else {
2698 qeth_l3_fill_header(card, hdr, new_skb, ipv,
2699 cast_type);
2700 hdr->hdr.l3.length = new_skb->len - data_offset;
2701 }
2683 } 2702 }
2684 2703
2685 if (large_send == QETH_LARGE_SEND_EDDP) { 2704 if (large_send == QETH_LARGE_SEND_EDDP) {
@@ -2695,8 +2714,11 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2695 } else { 2714 } else {
2696 int elems = qeth_get_elements_no(card, (void *)hdr, new_skb, 2715 int elems = qeth_get_elements_no(card, (void *)hdr, new_skb,
2697 elements_needed); 2716 elements_needed);
2698 if (!elems) 2717 if (!elems) {
2718 if (data_offset >= 0)
2719 kmem_cache_free(qeth_core_header_cache, hdr);
2699 goto tx_drop; 2720 goto tx_drop;
2721 }
2700 elements_needed += elems; 2722 elements_needed += elems;
2701 } 2723 }
2702 2724
@@ -2709,7 +2731,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2709 elements_needed, ctx); 2731 elements_needed, ctx);
2710 else 2732 else
2711 rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr, 2733 rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
2712 elements_needed, ctx); 2734 elements_needed, ctx, data_offset, 0);
2713 2735
2714 if (!rc) { 2736 if (!rc) {
2715 card->stats.tx_packets++; 2737 card->stats.tx_packets++;
@@ -2737,6 +2759,9 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2737 if (ctx != NULL) 2759 if (ctx != NULL)
2738 qeth_eddp_put_context(ctx); 2760 qeth_eddp_put_context(ctx);
2739 2761
2762 if (data_offset >= 0)
2763 kmem_cache_free(qeth_core_header_cache, hdr);
2764
2740 if (rc == -EBUSY) { 2765 if (rc == -EBUSY) {
2741 if (new_skb != skb) 2766 if (new_skb != skb)
2742 dev_kfree_skb_any(new_skb); 2767 dev_kfree_skb_any(new_skb);
diff --git a/drivers/scsi/arm/acornscsi-io.S b/drivers/scsi/arm/acornscsi-io.S
index 5cebe3105260..22171b2110a8 100644
--- a/drivers/scsi/arm/acornscsi-io.S
+++ b/drivers/scsi/arm/acornscsi-io.S
@@ -8,7 +8,7 @@
8#include <linux/linkage.h> 8#include <linux/linkage.h>
9 9
10#include <asm/assembler.h> 10#include <asm/assembler.h>
11#include <asm/hardware.h> 11#include <mach/hardware.h>
12 12
13#if defined(__APCS_32__) 13#if defined(__APCS_32__)
14#define LOADREGS(t,r,l...) ldm##t r, l 14#define LOADREGS(t,r,l...) ldm##t r, l
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index da876d3924be..a48e4990fe12 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -25,7 +25,6 @@
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <linux/timer.h> 26#include <linux/timer.h>
27#include <linux/spinlock.h> 27#include <linux/spinlock.h>
28#include <linux/hdreg.h>
29#include <asm/uaccess.h> 28#include <asm/uaccess.h>
30#include <asm/io.h> 29#include <asm/io.h>
31#include <asm/div64.h> 30#include <asm/div64.h>
@@ -1249,6 +1248,13 @@ static struct pci_device_id hptiop_id_table[] = {
1249 { PCI_VDEVICE(TTI, 0x3522), (kernel_ulong_t)&hptiop_itl_ops }, 1248 { PCI_VDEVICE(TTI, 0x3522), (kernel_ulong_t)&hptiop_itl_ops },
1250 { PCI_VDEVICE(TTI, 0x3410), (kernel_ulong_t)&hptiop_itl_ops }, 1249 { PCI_VDEVICE(TTI, 0x3410), (kernel_ulong_t)&hptiop_itl_ops },
1251 { PCI_VDEVICE(TTI, 0x3540), (kernel_ulong_t)&hptiop_itl_ops }, 1250 { PCI_VDEVICE(TTI, 0x3540), (kernel_ulong_t)&hptiop_itl_ops },
1251 { PCI_VDEVICE(TTI, 0x3530), (kernel_ulong_t)&hptiop_itl_ops },
1252 { PCI_VDEVICE(TTI, 0x3560), (kernel_ulong_t)&hptiop_itl_ops },
1253 { PCI_VDEVICE(TTI, 0x4322), (kernel_ulong_t)&hptiop_itl_ops },
1254 { PCI_VDEVICE(TTI, 0x4210), (kernel_ulong_t)&hptiop_itl_ops },
1255 { PCI_VDEVICE(TTI, 0x4211), (kernel_ulong_t)&hptiop_itl_ops },
1256 { PCI_VDEVICE(TTI, 0x4310), (kernel_ulong_t)&hptiop_itl_ops },
1257 { PCI_VDEVICE(TTI, 0x4311), (kernel_ulong_t)&hptiop_itl_ops },
1252 { PCI_VDEVICE(TTI, 0x3120), (kernel_ulong_t)&hptiop_mv_ops }, 1258 { PCI_VDEVICE(TTI, 0x3120), (kernel_ulong_t)&hptiop_mv_ops },
1253 { PCI_VDEVICE(TTI, 0x3122), (kernel_ulong_t)&hptiop_mv_ops }, 1259 { PCI_VDEVICE(TTI, 0x3122), (kernel_ulong_t)&hptiop_mv_ops },
1254 { PCI_VDEVICE(TTI, 0x3020), (kernel_ulong_t)&hptiop_mv_ops }, 1260 { PCI_VDEVICE(TTI, 0x3020), (kernel_ulong_t)&hptiop_mv_ops },
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index b40a673985aa..461331d3dc45 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -102,11 +102,10 @@ static struct ide_scsi_obj *ide_scsi_get(struct gendisk *disk)
102 mutex_lock(&idescsi_ref_mutex); 102 mutex_lock(&idescsi_ref_mutex);
103 scsi = ide_scsi_g(disk); 103 scsi = ide_scsi_g(disk);
104 if (scsi) { 104 if (scsi) {
105 scsi_host_get(scsi->host); 105 if (ide_device_get(scsi->drive))
106 if (ide_device_get(scsi->drive)) {
107 scsi_host_put(scsi->host);
108 scsi = NULL; 106 scsi = NULL;
109 } 107 else
108 scsi_host_get(scsi->host);
110 } 109 }
111 mutex_unlock(&idescsi_ref_mutex); 110 mutex_unlock(&idescsi_ref_mutex);
112 return scsi; 111 return scsi;
@@ -114,9 +113,11 @@ static struct ide_scsi_obj *ide_scsi_get(struct gendisk *disk)
114 113
115static void ide_scsi_put(struct ide_scsi_obj *scsi) 114static void ide_scsi_put(struct ide_scsi_obj *scsi)
116{ 115{
116 ide_drive_t *drive = scsi->drive;
117
117 mutex_lock(&idescsi_ref_mutex); 118 mutex_lock(&idescsi_ref_mutex);
118 ide_device_put(scsi->drive);
119 scsi_host_put(scsi->host); 119 scsi_host_put(scsi->host);
120 ide_device_put(drive);
120 mutex_unlock(&idescsi_ref_mutex); 121 mutex_unlock(&idescsi_ref_mutex);
121} 122}
122 123
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 75a64a6cae8c..b29360ed0bdc 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -366,12 +366,14 @@ spi_transport_rd_attr(rti, "%d\n");
366spi_transport_rd_attr(pcomp_en, "%d\n"); 366spi_transport_rd_attr(pcomp_en, "%d\n");
367spi_transport_rd_attr(hold_mcs, "%d\n"); 367spi_transport_rd_attr(hold_mcs, "%d\n");
368 368
369/* we only care about the first child device so we return 1 */ 369/* we only care about the first child device that's a real SCSI device
370 * so we return 1 to terminate the iteration when we find it */
370static int child_iter(struct device *dev, void *data) 371static int child_iter(struct device *dev, void *data)
371{ 372{
372 struct scsi_device *sdev = to_scsi_device(dev); 373 if (!scsi_is_sdev_device(dev))
374 return 0;
373 375
374 spi_dv_device(sdev); 376 spi_dv_device(to_scsi_device(dev));
375 return 1; 377 return 1;
376} 378}
377 379
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 0fe031f003e7..1bcf3c33d7ff 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -345,14 +345,14 @@ static int ses_enclosure_find_by_addr(struct enclosure_device *edev,
345 return 0; 345 return 0;
346} 346}
347 347
348#define VPD_INQUIRY_SIZE 512 348#define VPD_INQUIRY_SIZE 36
349 349
350static void ses_match_to_enclosure(struct enclosure_device *edev, 350static void ses_match_to_enclosure(struct enclosure_device *edev,
351 struct scsi_device *sdev) 351 struct scsi_device *sdev)
352{ 352{
353 unsigned char *buf = kmalloc(VPD_INQUIRY_SIZE, GFP_KERNEL); 353 unsigned char *buf = kmalloc(VPD_INQUIRY_SIZE, GFP_KERNEL);
354 unsigned char *desc; 354 unsigned char *desc;
355 int len; 355 u16 vpd_len;
356 struct efd efd = { 356 struct efd efd = {
357 .addr = 0, 357 .addr = 0,
358 }; 358 };
@@ -372,9 +372,19 @@ static void ses_match_to_enclosure(struct enclosure_device *edev,
372 VPD_INQUIRY_SIZE, NULL, SES_TIMEOUT, SES_RETRIES)) 372 VPD_INQUIRY_SIZE, NULL, SES_TIMEOUT, SES_RETRIES))
373 goto free; 373 goto free;
374 374
375 len = (buf[2] << 8) + buf[3]; 375 vpd_len = (buf[2] << 8) + buf[3];
376 kfree(buf);
377 buf = kmalloc(vpd_len, GFP_KERNEL);
378 if (!buf)
379 return;
380 cmd[3] = vpd_len >> 8;
381 cmd[4] = vpd_len & 0xff;
382 if (scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf,
383 vpd_len, NULL, SES_TIMEOUT, SES_RETRIES))
384 goto free;
385
376 desc = buf + 4; 386 desc = buf + 4;
377 while (desc < buf + len) { 387 while (desc < buf + vpd_len) {
378 enum scsi_protocol proto = desc[0] >> 4; 388 enum scsi_protocol proto = desc[0] >> 4;
379 u8 code_set = desc[0] & 0x0f; 389 u8 code_set = desc[0] & 0x0f;
380 u8 piv = desc[1] & 0x80; 390 u8 piv = desc[1] & 0x80;
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index d3b8ebb83776..3d36270a8b4d 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1747,7 +1747,7 @@ st_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages,
1747 */ 1747 */
1748 flush_dcache_page(pages[i]); 1748 flush_dcache_page(pages[i]);
1749 /* ?? Is locking needed? I don't think so */ 1749 /* ?? Is locking needed? I don't think so */
1750 /* if (TestSetPageLocked(pages[i])) 1750 /* if (!trylock_page(pages[i]))
1751 goto out_unlock; */ 1751 goto out_unlock; */
1752 } 1752 }
1753 1753
diff --git a/drivers/serial/21285.c b/drivers/serial/21285.c
index 6558a4037806..f31c6698419c 100644
--- a/drivers/serial/21285.c
+++ b/drivers/serial/21285.c
@@ -19,7 +19,7 @@
19#include <asm/irq.h> 19#include <asm/irq.h>
20#include <asm/mach-types.h> 20#include <asm/mach-types.h>
21#include <asm/hardware/dec21285.h> 21#include <asm/hardware/dec21285.h>
22#include <asm/hardware.h> 22#include <mach/hardware.h>
23 23
24#define BAUD_BASE (mem_fclk_21285/64) 24#define BAUD_BASE (mem_fclk_21285/64)
25 25
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index a97f1ae11f78..342e12fb1c25 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -1885,7 +1885,7 @@ static int serial8250_startup(struct uart_port *port)
1885 * the interrupt is enabled. Delays are necessary to 1885 * the interrupt is enabled. Delays are necessary to
1886 * allow register changes to become visible. 1886 * allow register changes to become visible.
1887 */ 1887 */
1888 spin_lock(&up->port.lock); 1888 spin_lock_irqsave(&up->port.lock, flags);
1889 if (up->port.flags & UPF_SHARE_IRQ) 1889 if (up->port.flags & UPF_SHARE_IRQ)
1890 disable_irq_nosync(up->port.irq); 1890 disable_irq_nosync(up->port.irq);
1891 1891
@@ -1901,7 +1901,7 @@ static int serial8250_startup(struct uart_port *port)
1901 1901
1902 if (up->port.flags & UPF_SHARE_IRQ) 1902 if (up->port.flags & UPF_SHARE_IRQ)
1903 enable_irq(up->port.irq); 1903 enable_irq(up->port.irq);
1904 spin_unlock(&up->port.lock); 1904 spin_unlock_irqrestore(&up->port.lock, flags);
1905 1905
1906 /* 1906 /*
1907 * If the interrupt is not reasserted, setup a timer to 1907 * If the interrupt is not reasserted, setup a timer to
diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile
index 3a0bbbe17aa3..7e7383e890d8 100644
--- a/drivers/serial/Makefile
+++ b/drivers/serial/Makefile
@@ -42,7 +42,6 @@ obj-$(CONFIG_SERIAL_68328) += 68328serial.o
42obj-$(CONFIG_SERIAL_68360) += 68360serial.o 42obj-$(CONFIG_SERIAL_68360) += 68360serial.o
43obj-$(CONFIG_SERIAL_COLDFIRE) += mcfserial.o 43obj-$(CONFIG_SERIAL_COLDFIRE) += mcfserial.o
44obj-$(CONFIG_SERIAL_MCF) += mcf.o 44obj-$(CONFIG_SERIAL_MCF) += mcf.o
45obj-$(CONFIG_V850E_UART) += v850e_uart.o
46obj-$(CONFIG_SERIAL_PMACZILOG) += pmac_zilog.o 45obj-$(CONFIG_SERIAL_PMACZILOG) += pmac_zilog.o
47obj-$(CONFIG_SERIAL_LH7A40X) += serial_lh7a40x.o 46obj-$(CONFIG_SERIAL_LH7A40X) += serial_lh7a40x.o
48obj-$(CONFIG_SERIAL_DZ) += dz.o 47obj-$(CONFIG_SERIAL_DZ) += dz.o
diff --git a/drivers/serial/atmel_serial.c b/drivers/serial/atmel_serial.c
index 1fee12c1f4f8..3a6da80b081c 100644
--- a/drivers/serial/atmel_serial.c
+++ b/drivers/serial/atmel_serial.c
@@ -42,11 +42,11 @@
42#include <asm/io.h> 42#include <asm/io.h>
43 43
44#include <asm/mach/serial_at91.h> 44#include <asm/mach/serial_at91.h>
45#include <asm/arch/board.h> 45#include <mach/board.h>
46 46
47#ifdef CONFIG_ARM 47#ifdef CONFIG_ARM
48#include <asm/arch/cpu.h> 48#include <mach/cpu.h>
49#include <asm/arch/gpio.h> 49#include <mach/gpio.h>
50#endif 50#endif
51 51
52#define PDC_BUFFER_SIZE 512 52#define PDC_BUFFER_SIZE 512
diff --git a/drivers/serial/bfin_5xx.c b/drivers/serial/bfin_5xx.c
index 9d8543762a30..efcd44344fb1 100644
--- a/drivers/serial/bfin_5xx.c
+++ b/drivers/serial/bfin_5xx.c
@@ -817,7 +817,7 @@ static void bfin_serial_set_ldisc(struct uart_port *port)
817 if (line >= port->info->port.tty->driver->num) 817 if (line >= port->info->port.tty->driver->num)
818 return; 818 return;
819 819
820 switch (port->info->port.tty->ldisc.num) { 820 switch (port->info->port.tty->termios->c_line) {
821 case N_IRDA: 821 case N_IRDA:
822 val = UART_GET_GCTL(&bfin_serial_ports[line]); 822 val = UART_GET_GCTL(&bfin_serial_ports[line]);
823 val |= (IREN | RPOLC); 823 val |= (IREN | RPOLC);
diff --git a/drivers/serial/clps711x.c b/drivers/serial/clps711x.c
index fc1fa9267c59..459f3420a429 100644
--- a/drivers/serial/clps711x.c
+++ b/drivers/serial/clps711x.c
@@ -39,7 +39,7 @@
39#include <linux/serial_core.h> 39#include <linux/serial_core.h>
40#include <linux/serial.h> 40#include <linux/serial.h>
41 41
42#include <asm/hardware.h> 42#include <mach/hardware.h>
43#include <asm/io.h> 43#include <asm/io.h>
44#include <asm/irq.h> 44#include <asm/irq.h>
45#include <asm/hardware/clps7111.h> 45#include <asm/hardware/clps7111.h>
diff --git a/drivers/serial/cpm_uart/cpm_uart.h b/drivers/serial/cpm_uart/cpm_uart.h
index 5c76e0ae0582..7274b527a3c1 100644
--- a/drivers/serial/cpm_uart/cpm_uart.h
+++ b/drivers/serial/cpm_uart/cpm_uart.h
@@ -50,6 +50,15 @@
50 50
51#define SCC_WAIT_CLOSING 100 51#define SCC_WAIT_CLOSING 100
52 52
53#define GPIO_CTS 0
54#define GPIO_RTS 1
55#define GPIO_DCD 2
56#define GPIO_DSR 3
57#define GPIO_DTR 4
58#define GPIO_RI 5
59
60#define NUM_GPIOS (GPIO_RI+1)
61
53struct uart_cpm_port { 62struct uart_cpm_port {
54 struct uart_port port; 63 struct uart_port port;
55 u16 rx_nrfifos; 64 u16 rx_nrfifos;
@@ -68,6 +77,7 @@ struct uart_cpm_port {
68 unsigned char *rx_buf; 77 unsigned char *rx_buf;
69 u32 flags; 78 u32 flags;
70 void (*set_lineif)(struct uart_cpm_port *); 79 void (*set_lineif)(struct uart_cpm_port *);
80 struct clk *clk;
71 u8 brg; 81 u8 brg;
72 uint dp_addr; 82 uint dp_addr;
73 void *mem_addr; 83 void *mem_addr;
@@ -82,6 +92,7 @@ struct uart_cpm_port {
82 int wait_closing; 92 int wait_closing;
83 /* value to combine with opcode to form cpm command */ 93 /* value to combine with opcode to form cpm command */
84 u32 command; 94 u32 command;
95 int gpios[NUM_GPIOS];
85}; 96};
86 97
87extern int cpm_uart_nr; 98extern int cpm_uart_nr;
diff --git a/drivers/serial/cpm_uart/cpm_uart_core.c b/drivers/serial/cpm_uart/cpm_uart_core.c
index a4f86927a74b..25efca5a7a1f 100644
--- a/drivers/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/serial/cpm_uart/cpm_uart_core.c
@@ -43,6 +43,9 @@
43#include <linux/dma-mapping.h> 43#include <linux/dma-mapping.h>
44#include <linux/fs_uart_pd.h> 44#include <linux/fs_uart_pd.h>
45#include <linux/of_platform.h> 45#include <linux/of_platform.h>
46#include <linux/gpio.h>
47#include <linux/of_gpio.h>
48#include <linux/clk.h>
46 49
47#include <asm/io.h> 50#include <asm/io.h>
48#include <asm/irq.h> 51#include <asm/irq.h>
@@ -96,13 +99,41 @@ static unsigned int cpm_uart_tx_empty(struct uart_port *port)
96 99
97static void cpm_uart_set_mctrl(struct uart_port *port, unsigned int mctrl) 100static void cpm_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
98{ 101{
99 /* Whee. Do nothing. */ 102 struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
103
104 if (pinfo->gpios[GPIO_RTS] >= 0)
105 gpio_set_value(pinfo->gpios[GPIO_RTS], !(mctrl & TIOCM_RTS));
106
107 if (pinfo->gpios[GPIO_DTR] >= 0)
108 gpio_set_value(pinfo->gpios[GPIO_DTR], !(mctrl & TIOCM_DTR));
100} 109}
101 110
102static unsigned int cpm_uart_get_mctrl(struct uart_port *port) 111static unsigned int cpm_uart_get_mctrl(struct uart_port *port)
103{ 112{
104 /* Whee. Do nothing. */ 113 struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
105 return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS; 114 unsigned int mctrl = TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
115
116 if (pinfo->gpios[GPIO_CTS] >= 0) {
117 if (gpio_get_value(pinfo->gpios[GPIO_CTS]))
118 mctrl &= ~TIOCM_CTS;
119 }
120
121 if (pinfo->gpios[GPIO_DSR] >= 0) {
122 if (gpio_get_value(pinfo->gpios[GPIO_DSR]))
123 mctrl &= ~TIOCM_DSR;
124 }
125
126 if (pinfo->gpios[GPIO_DCD] >= 0) {
127 if (gpio_get_value(pinfo->gpios[GPIO_DCD]))
128 mctrl &= ~TIOCM_CAR;
129 }
130
131 if (pinfo->gpios[GPIO_RI] >= 0) {
132 if (!gpio_get_value(pinfo->gpios[GPIO_RI]))
133 mctrl |= TIOCM_RNG;
134 }
135
136 return mctrl;
106} 137}
107 138
108/* 139/*
@@ -566,7 +597,10 @@ static void cpm_uart_set_termios(struct uart_port *port,
566 out_be16(&sccp->scc_psmr, (sbits << 12) | scval); 597 out_be16(&sccp->scc_psmr, (sbits << 12) | scval);
567 } 598 }
568 599
569 cpm_set_brg(pinfo->brg - 1, baud); 600 if (pinfo->clk)
601 clk_set_rate(pinfo->clk, baud);
602 else
603 cpm_set_brg(pinfo->brg - 1, baud);
570 spin_unlock_irqrestore(&port->lock, flags); 604 spin_unlock_irqrestore(&port->lock, flags);
571} 605}
572 606
@@ -991,14 +1025,23 @@ static int cpm_uart_init_port(struct device_node *np,
991 void __iomem *mem, *pram; 1025 void __iomem *mem, *pram;
992 int len; 1026 int len;
993 int ret; 1027 int ret;
1028 int i;
994 1029
995 data = of_get_property(np, "fsl,cpm-brg", &len); 1030 data = of_get_property(np, "clock", NULL);
996 if (!data || len != 4) { 1031 if (data) {
997 printk(KERN_ERR "CPM UART %s has no/invalid " 1032 struct clk *clk = clk_get(NULL, (const char*)data);
998 "fsl,cpm-brg property.\n", np->name); 1033 if (!IS_ERR(clk))
999 return -EINVAL; 1034 pinfo->clk = clk;
1035 }
1036 if (!pinfo->clk) {
1037 data = of_get_property(np, "fsl,cpm-brg", &len);
1038 if (!data || len != 4) {
1039 printk(KERN_ERR "CPM UART %s has no/invalid "
1040 "fsl,cpm-brg property.\n", np->name);
1041 return -EINVAL;
1042 }
1043 pinfo->brg = *data;
1000 } 1044 }
1001 pinfo->brg = *data;
1002 1045
1003 data = of_get_property(np, "fsl,cpm-command", &len); 1046 data = of_get_property(np, "fsl,cpm-command", &len);
1004 if (!data || len != 4) { 1047 if (!data || len != 4) {
@@ -1050,6 +1093,9 @@ static int cpm_uart_init_port(struct device_node *np,
1050 goto out_pram; 1093 goto out_pram;
1051 } 1094 }
1052 1095
1096 for (i = 0; i < NUM_GPIOS; i++)
1097 pinfo->gpios[i] = of_get_gpio(np, i);
1098
1053 return cpm_uart_request_port(&pinfo->port); 1099 return cpm_uart_request_port(&pinfo->port);
1054 1100
1055out_pram: 1101out_pram:
diff --git a/drivers/serial/crisv10.c b/drivers/serial/crisv10.c
index 8249ac490559..bf94a770bb44 100644
--- a/drivers/serial/crisv10.c
+++ b/drivers/serial/crisv10.c
@@ -234,7 +234,7 @@ unsigned long r_alt_ser_baudrate_shadow = 0;
234 234
235static struct e100_serial rs_table[] = { 235static struct e100_serial rs_table[] = {
236 { .baud = DEF_BAUD, 236 { .baud = DEF_BAUD,
237 .port = (unsigned char *)R_SERIAL0_CTRL, 237 .ioport = (unsigned char *)R_SERIAL0_CTRL,
238 .irq = 1U << 12, /* uses DMA 6 and 7 */ 238 .irq = 1U << 12, /* uses DMA 6 and 7 */
239 .oclrintradr = R_DMA_CH6_CLR_INTR, 239 .oclrintradr = R_DMA_CH6_CLR_INTR,
240 .ofirstadr = R_DMA_CH6_FIRST, 240 .ofirstadr = R_DMA_CH6_FIRST,
@@ -288,7 +288,7 @@ static struct e100_serial rs_table[] = {
288}, /* ttyS0 */ 288}, /* ttyS0 */
289#ifndef CONFIG_SVINTO_SIM 289#ifndef CONFIG_SVINTO_SIM
290 { .baud = DEF_BAUD, 290 { .baud = DEF_BAUD,
291 .port = (unsigned char *)R_SERIAL1_CTRL, 291 .ioport = (unsigned char *)R_SERIAL1_CTRL,
292 .irq = 1U << 16, /* uses DMA 8 and 9 */ 292 .irq = 1U << 16, /* uses DMA 8 and 9 */
293 .oclrintradr = R_DMA_CH8_CLR_INTR, 293 .oclrintradr = R_DMA_CH8_CLR_INTR,
294 .ofirstadr = R_DMA_CH8_FIRST, 294 .ofirstadr = R_DMA_CH8_FIRST,
@@ -344,7 +344,7 @@ static struct e100_serial rs_table[] = {
344}, /* ttyS1 */ 344}, /* ttyS1 */
345 345
346 { .baud = DEF_BAUD, 346 { .baud = DEF_BAUD,
347 .port = (unsigned char *)R_SERIAL2_CTRL, 347 .ioport = (unsigned char *)R_SERIAL2_CTRL,
348 .irq = 1U << 4, /* uses DMA 2 and 3 */ 348 .irq = 1U << 4, /* uses DMA 2 and 3 */
349 .oclrintradr = R_DMA_CH2_CLR_INTR, 349 .oclrintradr = R_DMA_CH2_CLR_INTR,
350 .ofirstadr = R_DMA_CH2_FIRST, 350 .ofirstadr = R_DMA_CH2_FIRST,
@@ -398,7 +398,7 @@ static struct e100_serial rs_table[] = {
398 }, /* ttyS2 */ 398 }, /* ttyS2 */
399 399
400 { .baud = DEF_BAUD, 400 { .baud = DEF_BAUD,
401 .port = (unsigned char *)R_SERIAL3_CTRL, 401 .ioport = (unsigned char *)R_SERIAL3_CTRL,
402 .irq = 1U << 8, /* uses DMA 4 and 5 */ 402 .irq = 1U << 8, /* uses DMA 4 and 5 */
403 .oclrintradr = R_DMA_CH4_CLR_INTR, 403 .oclrintradr = R_DMA_CH4_CLR_INTR,
404 .ofirstadr = R_DMA_CH4_FIRST, 404 .ofirstadr = R_DMA_CH4_FIRST,
@@ -939,7 +939,7 @@ static const struct control_pins e100_modem_pins[NR_PORTS] =
939/* Output */ 939/* Output */
940#define E100_RTS_GET(info) ((info)->rx_ctrl & E100_RTS_MASK) 940#define E100_RTS_GET(info) ((info)->rx_ctrl & E100_RTS_MASK)
941/* Input */ 941/* Input */
942#define E100_CTS_GET(info) ((info)->port[REG_STATUS] & E100_CTS_MASK) 942#define E100_CTS_GET(info) ((info)->ioport[REG_STATUS] & E100_CTS_MASK)
943 943
944/* These are typically PA or PB and 0 means 0V, 1 means 3.3V */ 944/* These are typically PA or PB and 0 means 0V, 1 means 3.3V */
945/* Is an output */ 945/* Is an output */
@@ -1092,7 +1092,7 @@ e100_rts(struct e100_serial *info, int set)
1092 local_irq_save(flags); 1092 local_irq_save(flags);
1093 info->rx_ctrl &= ~E100_RTS_MASK; 1093 info->rx_ctrl &= ~E100_RTS_MASK;
1094 info->rx_ctrl |= (set ? 0 : E100_RTS_MASK); /* RTS is active low */ 1094 info->rx_ctrl |= (set ? 0 : E100_RTS_MASK); /* RTS is active low */
1095 info->port[REG_REC_CTRL] = info->rx_ctrl; 1095 info->ioport[REG_REC_CTRL] = info->rx_ctrl;
1096 local_irq_restore(flags); 1096 local_irq_restore(flags);
1097#ifdef SERIAL_DEBUG_IO 1097#ifdef SERIAL_DEBUG_IO
1098 printk("ser%i rts %i\n", info->line, set); 1098 printk("ser%i rts %i\n", info->line, set);
@@ -1142,7 +1142,7 @@ e100_disable_rx(struct e100_serial *info)
1142{ 1142{
1143#ifndef CONFIG_SVINTO_SIM 1143#ifndef CONFIG_SVINTO_SIM
1144 /* disable the receiver */ 1144 /* disable the receiver */
1145 info->port[REG_REC_CTRL] = 1145 info->ioport[REG_REC_CTRL] =
1146 (info->rx_ctrl &= ~IO_MASK(R_SERIAL0_REC_CTRL, rec_enable)); 1146 (info->rx_ctrl &= ~IO_MASK(R_SERIAL0_REC_CTRL, rec_enable));
1147#endif 1147#endif
1148} 1148}
@@ -1152,7 +1152,7 @@ e100_enable_rx(struct e100_serial *info)
1152{ 1152{
1153#ifndef CONFIG_SVINTO_SIM 1153#ifndef CONFIG_SVINTO_SIM
1154 /* enable the receiver */ 1154 /* enable the receiver */
1155 info->port[REG_REC_CTRL] = 1155 info->ioport[REG_REC_CTRL] =
1156 (info->rx_ctrl |= IO_MASK(R_SERIAL0_REC_CTRL, rec_enable)); 1156 (info->rx_ctrl |= IO_MASK(R_SERIAL0_REC_CTRL, rec_enable));
1157#endif 1157#endif
1158} 1158}
@@ -1490,7 +1490,7 @@ rs_stop(struct tty_struct *tty)
1490 xoff |= IO_STATE(R_SERIAL0_XOFF, auto_xoff, enable); 1490 xoff |= IO_STATE(R_SERIAL0_XOFF, auto_xoff, enable);
1491 } 1491 }
1492 1492
1493 *((unsigned long *)&info->port[REG_XOFF]) = xoff; 1493 *((unsigned long *)&info->ioport[REG_XOFF]) = xoff;
1494 local_irq_restore(flags); 1494 local_irq_restore(flags);
1495 } 1495 }
1496} 1496}
@@ -1513,7 +1513,7 @@ rs_start(struct tty_struct *tty)
1513 xoff |= IO_STATE(R_SERIAL0_XOFF, auto_xoff, enable); 1513 xoff |= IO_STATE(R_SERIAL0_XOFF, auto_xoff, enable);
1514 } 1514 }
1515 1515
1516 *((unsigned long *)&info->port[REG_XOFF]) = xoff; 1516 *((unsigned long *)&info->ioport[REG_XOFF]) = xoff;
1517 if (!info->uses_dma_out && 1517 if (!info->uses_dma_out &&
1518 info->xmit.head != info->xmit.tail && info->xmit.buf) 1518 info->xmit.head != info->xmit.tail && info->xmit.buf)
1519 e100_enable_serial_tx_ready_irq(info); 1519 e100_enable_serial_tx_ready_irq(info);
@@ -1888,7 +1888,7 @@ static void receive_chars_dma(struct e100_serial *info)
1888 handle_all_descr_data(info); 1888 handle_all_descr_data(info);
1889 1889
1890 /* Read the status register to detect errors */ 1890 /* Read the status register to detect errors */
1891 rstat = info->port[REG_STATUS]; 1891 rstat = info->ioport[REG_STATUS];
1892 if (rstat & IO_MASK(R_SERIAL0_STATUS, xoff_detect) ) { 1892 if (rstat & IO_MASK(R_SERIAL0_STATUS, xoff_detect) ) {
1893 DFLOW(DEBUG_LOG(info->line, "XOFF detect stat %x\n", rstat)); 1893 DFLOW(DEBUG_LOG(info->line, "XOFF detect stat %x\n", rstat));
1894 } 1894 }
@@ -1897,7 +1897,7 @@ static void receive_chars_dma(struct e100_serial *info)
1897 /* If we got an error, we must reset it by reading the 1897 /* If we got an error, we must reset it by reading the
1898 * data_in field 1898 * data_in field
1899 */ 1899 */
1900 unsigned char data = info->port[REG_DATA]; 1900 unsigned char data = info->ioport[REG_DATA];
1901 1901
1902 PROCSTAT(ser_stat[info->line].errors_cnt++); 1902 PROCSTAT(ser_stat[info->line].errors_cnt++);
1903 DEBUG_LOG(info->line, "#dERR: s d 0x%04X\n", 1903 DEBUG_LOG(info->line, "#dERR: s d 0x%04X\n",
@@ -2077,7 +2077,7 @@ static int force_eop_if_needed(struct e100_serial *info)
2077 /* We check data_avail bit to determine if data has 2077 /* We check data_avail bit to determine if data has
2078 * arrived since last time 2078 * arrived since last time
2079 */ 2079 */
2080 unsigned char rstat = info->port[REG_STATUS]; 2080 unsigned char rstat = info->ioport[REG_STATUS];
2081 2081
2082 /* error or datavail? */ 2082 /* error or datavail? */
2083 if (rstat & SER_ERROR_MASK) { 2083 if (rstat & SER_ERROR_MASK) {
@@ -2096,7 +2096,7 @@ static int force_eop_if_needed(struct e100_serial *info)
2096 TIMERD(DEBUG_LOG(info->line, "timeout: rstat 0x%03X\n", 2096 TIMERD(DEBUG_LOG(info->line, "timeout: rstat 0x%03X\n",
2097 rstat | (info->line << 8))); 2097 rstat | (info->line << 8)));
2098 /* Read data to clear status flags */ 2098 /* Read data to clear status flags */
2099 (void)info->port[REG_DATA]; 2099 (void)info->ioport[REG_DATA];
2100 2100
2101 info->forced_eop = 0; 2101 info->forced_eop = 0;
2102 START_FLUSH_FAST_TIMER(info, "magic"); 2102 START_FLUSH_FAST_TIMER(info, "magic");
@@ -2296,7 +2296,7 @@ struct e100_serial * handle_ser_rx_interrupt_no_dma(struct e100_serial *info)
2296 } 2296 }
2297 2297
2298 /* Read data and status at the same time */ 2298 /* Read data and status at the same time */
2299 data_read = *((unsigned long *)&info->port[REG_DATA_STATUS32]); 2299 data_read = *((unsigned long *)&info->ioport[REG_DATA_STATUS32]);
2300more_data: 2300more_data:
2301 if (data_read & IO_MASK(R_SERIAL0_READ, xoff_detect) ) { 2301 if (data_read & IO_MASK(R_SERIAL0_READ, xoff_detect) ) {
2302 DFLOW(DEBUG_LOG(info->line, "XOFF detect\n", 0)); 2302 DFLOW(DEBUG_LOG(info->line, "XOFF detect\n", 0));
@@ -2391,7 +2391,7 @@ more_data:
2391 2391
2392 2392
2393 info->icount.rx++; 2393 info->icount.rx++;
2394 data_read = *((unsigned long *)&info->port[REG_DATA_STATUS32]); 2394 data_read = *((unsigned long *)&info->ioport[REG_DATA_STATUS32]);
2395 if (data_read & IO_MASK(R_SERIAL0_READ, data_avail)) { 2395 if (data_read & IO_MASK(R_SERIAL0_READ, data_avail)) {
2396 DEBUG_LOG(info->line, "ser_rx %c in loop\n", IO_EXTRACT(R_SERIAL0_READ, data_in, data_read)); 2396 DEBUG_LOG(info->line, "ser_rx %c in loop\n", IO_EXTRACT(R_SERIAL0_READ, data_in, data_read));
2397 goto more_data; 2397 goto more_data;
@@ -2413,7 +2413,7 @@ static struct e100_serial* handle_ser_rx_interrupt(struct e100_serial *info)
2413 return handle_ser_rx_interrupt_no_dma(info); 2413 return handle_ser_rx_interrupt_no_dma(info);
2414 } 2414 }
2415 /* DMA is used */ 2415 /* DMA is used */
2416 rstat = info->port[REG_STATUS]; 2416 rstat = info->ioport[REG_STATUS];
2417 if (rstat & IO_MASK(R_SERIAL0_STATUS, xoff_detect) ) { 2417 if (rstat & IO_MASK(R_SERIAL0_STATUS, xoff_detect) ) {
2418 DFLOW(DEBUG_LOG(info->line, "XOFF detect\n", 0)); 2418 DFLOW(DEBUG_LOG(info->line, "XOFF detect\n", 0));
2419 } 2419 }
@@ -2426,7 +2426,7 @@ static struct e100_serial* handle_ser_rx_interrupt(struct e100_serial *info)
2426 /* If we got an error, we must reset it by reading the 2426 /* If we got an error, we must reset it by reading the
2427 * data_in field 2427 * data_in field
2428 */ 2428 */
2429 data = info->port[REG_DATA]; 2429 data = info->ioport[REG_DATA];
2430 DINTR1(DEBUG_LOG(info->line, "ser_rx! %c\n", data)); 2430 DINTR1(DEBUG_LOG(info->line, "ser_rx! %c\n", data));
2431 DINTR1(DEBUG_LOG(info->line, "ser_rx err stat %02X\n", rstat)); 2431 DINTR1(DEBUG_LOG(info->line, "ser_rx err stat %02X\n", rstat));
2432 if (!data && (rstat & SER_FRAMING_ERR_MASK)) { 2432 if (!data && (rstat & SER_FRAMING_ERR_MASK)) {
@@ -2528,10 +2528,10 @@ static void handle_ser_tx_interrupt(struct e100_serial *info)
2528 unsigned char rstat; 2528 unsigned char rstat;
2529 DFLOW(DEBUG_LOG(info->line, "tx_int: xchar 0x%02X\n", info->x_char)); 2529 DFLOW(DEBUG_LOG(info->line, "tx_int: xchar 0x%02X\n", info->x_char));
2530 local_irq_save(flags); 2530 local_irq_save(flags);
2531 rstat = info->port[REG_STATUS]; 2531 rstat = info->ioport[REG_STATUS];
2532 DFLOW(DEBUG_LOG(info->line, "stat %x\n", rstat)); 2532 DFLOW(DEBUG_LOG(info->line, "stat %x\n", rstat));
2533 2533
2534 info->port[REG_TR_DATA] = info->x_char; 2534 info->ioport[REG_TR_DATA] = info->x_char;
2535 info->icount.tx++; 2535 info->icount.tx++;
2536 info->x_char = 0; 2536 info->x_char = 0;
2537 /* We must enable since it is disabled in ser_interrupt */ 2537 /* We must enable since it is disabled in ser_interrupt */
@@ -2545,7 +2545,7 @@ static void handle_ser_tx_interrupt(struct e100_serial *info)
2545 /* We only use normal tx interrupt when sending x_char */ 2545 /* We only use normal tx interrupt when sending x_char */
2546 DFLOW(DEBUG_LOG(info->line, "tx_int: xchar sent\n", 0)); 2546 DFLOW(DEBUG_LOG(info->line, "tx_int: xchar sent\n", 0));
2547 local_irq_save(flags); 2547 local_irq_save(flags);
2548 rstat = info->port[REG_STATUS]; 2548 rstat = info->ioport[REG_STATUS];
2549 DFLOW(DEBUG_LOG(info->line, "stat %x\n", rstat)); 2549 DFLOW(DEBUG_LOG(info->line, "stat %x\n", rstat));
2550 e100_disable_serial_tx_ready_irq(info); 2550 e100_disable_serial_tx_ready_irq(info);
2551 if (info->port.tty->stopped) 2551 if (info->port.tty->stopped)
@@ -2573,7 +2573,7 @@ static void handle_ser_tx_interrupt(struct e100_serial *info)
2573 DINTR2(DEBUG_LOG(info->line, "tx_int %c\n", info->xmit.buf[info->xmit.tail])); 2573 DINTR2(DEBUG_LOG(info->line, "tx_int %c\n", info->xmit.buf[info->xmit.tail]));
2574 /* Send a byte, rs485 timing is critical so turn of ints */ 2574 /* Send a byte, rs485 timing is critical so turn of ints */
2575 local_irq_save(flags); 2575 local_irq_save(flags);
2576 info->port[REG_TR_DATA] = info->xmit.buf[info->xmit.tail]; 2576 info->ioport[REG_TR_DATA] = info->xmit.buf[info->xmit.tail];
2577 info->xmit.tail = (info->xmit.tail + 1) & (SERIAL_XMIT_SIZE-1); 2577 info->xmit.tail = (info->xmit.tail + 1) & (SERIAL_XMIT_SIZE-1);
2578 info->icount.tx++; 2578 info->icount.tx++;
2579 if (info->xmit.head == info->xmit.tail) { 2579 if (info->xmit.head == info->xmit.tail) {
@@ -2848,7 +2848,7 @@ startup(struct e100_serial * info)
2848 2848
2849 /* dummy read to reset any serial errors */ 2849 /* dummy read to reset any serial errors */
2850 2850
2851 (void)info->port[REG_DATA]; 2851 (void)info->ioport[REG_DATA];
2852 2852
2853 /* enable the interrupts */ 2853 /* enable the interrupts */
2854 if (info->uses_dma_out) 2854 if (info->uses_dma_out)
@@ -2897,7 +2897,7 @@ shutdown(struct e100_serial * info)
2897 /* shut down the transmitter and receiver */ 2897 /* shut down the transmitter and receiver */
2898 DFLOW(DEBUG_LOG(info->line, "shutdown %i\n", info->line)); 2898 DFLOW(DEBUG_LOG(info->line, "shutdown %i\n", info->line));
2899 e100_disable_rx(info); 2899 e100_disable_rx(info);
2900 info->port[REG_TR_CTRL] = (info->tx_ctrl &= ~0x40); 2900 info->ioport[REG_TR_CTRL] = (info->tx_ctrl &= ~0x40);
2901 2901
2902 /* disable interrupts, reset dma channels */ 2902 /* disable interrupts, reset dma channels */
2903 if (info->uses_dma_in) { 2903 if (info->uses_dma_in) {
@@ -2968,7 +2968,7 @@ change_speed(struct e100_serial *info)
2968 2968
2969 if (!info->port.tty || !info->port.tty->termios) 2969 if (!info->port.tty || !info->port.tty->termios)
2970 return; 2970 return;
2971 if (!info->port) 2971 if (!info->ioport)
2972 return; 2972 return;
2973 2973
2974 cflag = info->port.tty->termios->c_cflag; 2974 cflag = info->port.tty->termios->c_cflag;
@@ -3037,7 +3037,7 @@ change_speed(struct e100_serial *info)
3037 3037
3038 info->baud = cflag_to_baud(cflag); 3038 info->baud = cflag_to_baud(cflag);
3039#ifndef CONFIG_SVINTO_SIM 3039#ifndef CONFIG_SVINTO_SIM
3040 info->port[REG_BAUD] = cflag_to_etrax_baud(cflag); 3040 info->ioport[REG_BAUD] = cflag_to_etrax_baud(cflag);
3041#endif /* CONFIG_SVINTO_SIM */ 3041#endif /* CONFIG_SVINTO_SIM */
3042 } 3042 }
3043 3043
@@ -3097,8 +3097,8 @@ change_speed(struct e100_serial *info)
3097 3097
3098 /* actually write the control regs to the hardware */ 3098 /* actually write the control regs to the hardware */
3099 3099
3100 info->port[REG_TR_CTRL] = info->tx_ctrl; 3100 info->ioport[REG_TR_CTRL] = info->tx_ctrl;
3101 info->port[REG_REC_CTRL] = info->rx_ctrl; 3101 info->ioport[REG_REC_CTRL] = info->rx_ctrl;
3102 xoff = IO_FIELD(R_SERIAL0_XOFF, xoff_char, STOP_CHAR(info->port.tty)); 3102 xoff = IO_FIELD(R_SERIAL0_XOFF, xoff_char, STOP_CHAR(info->port.tty));
3103 xoff |= IO_STATE(R_SERIAL0_XOFF, tx_stop, enable); 3103 xoff |= IO_STATE(R_SERIAL0_XOFF, tx_stop, enable);
3104 if (info->port.tty->termios->c_iflag & IXON ) { 3104 if (info->port.tty->termios->c_iflag & IXON ) {
@@ -3107,7 +3107,7 @@ change_speed(struct e100_serial *info)
3107 xoff |= IO_STATE(R_SERIAL0_XOFF, auto_xoff, enable); 3107 xoff |= IO_STATE(R_SERIAL0_XOFF, auto_xoff, enable);
3108 } 3108 }
3109 3109
3110 *((unsigned long *)&info->port[REG_XOFF]) = xoff; 3110 *((unsigned long *)&info->ioport[REG_XOFF]) = xoff;
3111 local_irq_restore(flags); 3111 local_irq_restore(flags);
3112#endif /* !CONFIG_SVINTO_SIM */ 3112#endif /* !CONFIG_SVINTO_SIM */
3113 3113
@@ -3156,7 +3156,7 @@ static int rs_raw_write(struct tty_struct *tty,
3156#ifdef SERIAL_DEBUG_DATA 3156#ifdef SERIAL_DEBUG_DATA
3157 if (info->line == SERIAL_DEBUG_LINE) 3157 if (info->line == SERIAL_DEBUG_LINE)
3158 printk("rs_raw_write (%d), status %d\n", 3158 printk("rs_raw_write (%d), status %d\n",
3159 count, info->port[REG_STATUS]); 3159 count, info->ioport[REG_STATUS]);
3160#endif 3160#endif
3161 3161
3162#ifdef CONFIG_SVINTO_SIM 3162#ifdef CONFIG_SVINTO_SIM
@@ -3427,7 +3427,7 @@ get_serial_info(struct e100_serial * info,
3427 memset(&tmp, 0, sizeof(tmp)); 3427 memset(&tmp, 0, sizeof(tmp));
3428 tmp.type = info->type; 3428 tmp.type = info->type;
3429 tmp.line = info->line; 3429 tmp.line = info->line;
3430 tmp.port = (int)info->port; 3430 tmp.port = (int)info->ioport;
3431 tmp.irq = info->irq; 3431 tmp.irq = info->irq;
3432 tmp.flags = info->flags; 3432 tmp.flags = info->flags;
3433 tmp.baud_base = info->baud_base; 3433 tmp.baud_base = info->baud_base;
@@ -3557,14 +3557,14 @@ char *get_control_state_str(int MLines, char *s)
3557} 3557}
3558#endif 3558#endif
3559 3559
3560static void 3560static int
3561rs_break(struct tty_struct *tty, int break_state) 3561rs_break(struct tty_struct *tty, int break_state)
3562{ 3562{
3563 struct e100_serial *info = (struct e100_serial *)tty->driver_data; 3563 struct e100_serial *info = (struct e100_serial *)tty->driver_data;
3564 unsigned long flags; 3564 unsigned long flags;
3565 3565
3566 if (!info->port) 3566 if (!info->ioport)
3567 return; 3567 return -EIO;
3568 3568
3569 local_irq_save(flags); 3569 local_irq_save(flags);
3570 if (break_state == -1) { 3570 if (break_state == -1) {
@@ -3575,8 +3575,9 @@ rs_break(struct tty_struct *tty, int break_state)
3575 /* Set bit 7 (txd) and 6 (tr_enable) */ 3575 /* Set bit 7 (txd) and 6 (tr_enable) */
3576 info->tx_ctrl |= (0x80 | 0x40); 3576 info->tx_ctrl |= (0x80 | 0x40);
3577 } 3577 }
3578 info->port[REG_TR_CTRL] = info->tx_ctrl; 3578 info->ioport[REG_TR_CTRL] = info->tx_ctrl;
3579 local_irq_restore(flags); 3579 local_irq_restore(flags);
3580 return 0;
3580} 3581}
3581 3582
3582static int 3583static int
@@ -4231,9 +4232,9 @@ static int line_info(char *buf, struct e100_serial *info)
4231 unsigned long tmp; 4232 unsigned long tmp;
4232 4233
4233 ret = sprintf(buf, "%d: uart:E100 port:%lX irq:%d", 4234 ret = sprintf(buf, "%d: uart:E100 port:%lX irq:%d",
4234 info->line, (unsigned long)info->port, info->irq); 4235 info->line, (unsigned long)info->ioport, info->irq);
4235 4236
4236 if (!info->port || (info->type == PORT_UNKNOWN)) { 4237 if (!info->ioport || (info->type == PORT_UNKNOWN)) {
4237 ret += sprintf(buf+ret, "\n"); 4238 ret += sprintf(buf+ret, "\n");
4238 return ret; 4239 return ret;
4239 } 4240 }
@@ -4281,7 +4282,7 @@ static int line_info(char *buf, struct e100_serial *info)
4281 } 4282 }
4282 4283
4283 { 4284 {
4284 unsigned char rstat = info->port[REG_STATUS]; 4285 unsigned char rstat = info->ioport[REG_STATUS];
4285 if (rstat & IO_MASK(R_SERIAL0_STATUS, xoff_detect) ) 4286 if (rstat & IO_MASK(R_SERIAL0_STATUS, xoff_detect) )
4286 ret += sprintf(buf+ret, " xoff_detect:1"); 4287 ret += sprintf(buf+ret, " xoff_detect:1");
4287 } 4288 }
@@ -4502,7 +4503,7 @@ rs_init(void)
4502 4503
4503 if (info->enabled) { 4504 if (info->enabled) {
4504 printk(KERN_INFO "%s%d at 0x%x is a builtin UART with DMA\n", 4505 printk(KERN_INFO "%s%d at 0x%x is a builtin UART with DMA\n",
4505 serial_driver->name, info->line, (unsigned int)info->port); 4506 serial_driver->name, info->line, (unsigned int)info->ioport);
4506 } 4507 }
4507 } 4508 }
4508#ifdef CONFIG_ETRAX_FAST_TIMER 4509#ifdef CONFIG_ETRAX_FAST_TIMER
diff --git a/drivers/serial/crisv10.h b/drivers/serial/crisv10.h
index ccd0f32b7372..e3c5c8c3c09b 100644
--- a/drivers/serial/crisv10.h
+++ b/drivers/serial/crisv10.h
@@ -36,8 +36,9 @@ struct etrax_recv_buffer {
36}; 36};
37 37
38struct e100_serial { 38struct e100_serial {
39 struct tty_port port;
39 int baud; 40 int baud;
40 volatile u8 *port; /* R_SERIALx_CTRL */ 41 volatile u8 *ioport; /* R_SERIALx_CTRL */
41 u32 irq; /* bitnr in R_IRQ_MASK2 for dmaX_descr */ 42 u32 irq; /* bitnr in R_IRQ_MASK2 for dmaX_descr */
42 43
43 /* Output registers */ 44 /* Output registers */
diff --git a/drivers/serial/imx.c b/drivers/serial/imx.c
index e0da4dc7bbf6..6a29f9330a73 100644
--- a/drivers/serial/imx.c
+++ b/drivers/serial/imx.c
@@ -44,8 +44,8 @@
44 44
45#include <asm/io.h> 45#include <asm/io.h>
46#include <asm/irq.h> 46#include <asm/irq.h>
47#include <asm/hardware.h> 47#include <mach/hardware.h>
48#include <asm/arch/imx-uart.h> 48#include <mach/imx-uart.h>
49 49
50/* Register definitions */ 50/* Register definitions */
51#define URXD0 0x0 /* Receiver Register */ 51#define URXD0 0x0 /* Receiver Register */
diff --git a/drivers/serial/netx-serial.c b/drivers/serial/netx-serial.c
index 9f8ccb735c19..3f489329e8d3 100644
--- a/drivers/serial/netx-serial.c
+++ b/drivers/serial/netx-serial.c
@@ -35,8 +35,8 @@
35 35
36#include <asm/io.h> 36#include <asm/io.h>
37#include <asm/irq.h> 37#include <asm/irq.h>
38#include <asm/hardware.h> 38#include <mach/hardware.h>
39#include <asm/arch/netx-regs.h> 39#include <mach/netx-regs.h>
40 40
41/* We've been assigned a range on the "Low-density serial ports" major */ 41/* We've been assigned a range on the "Low-density serial ports" major */
42#define SERIAL_NX_MAJOR 204 42#define SERIAL_NX_MAJOR 204
diff --git a/drivers/serial/pxa.c b/drivers/serial/pxa.c
index b9a93f326fb8..f7a0d37c4221 100644
--- a/drivers/serial/pxa.c
+++ b/drivers/serial/pxa.c
@@ -45,9 +45,9 @@
45#include <linux/clk.h> 45#include <linux/clk.h>
46 46
47#include <asm/io.h> 47#include <asm/io.h>
48#include <asm/hardware.h> 48#include <mach/hardware.h>
49#include <asm/irq.h> 49#include <asm/irq.h>
50#include <asm/arch/pxa-regs.h> 50#include <mach/pxa-regs.h>
51 51
52 52
53struct uart_pxa_port { 53struct uart_pxa_port {
diff --git a/drivers/serial/s3c2400.c b/drivers/serial/s3c2400.c
index a1102053e553..c8b4266ac35f 100644
--- a/drivers/serial/s3c2400.c
+++ b/drivers/serial/s3c2400.c
@@ -17,10 +17,10 @@
17 17
18#include <asm/irq.h> 18#include <asm/irq.h>
19 19
20#include <asm/hardware.h> 20#include <mach/hardware.h>
21 21
22#include <asm/plat-s3c/regs-serial.h> 22#include <asm/plat-s3c/regs-serial.h>
23#include <asm/arch/regs-gpio.h> 23#include <mach/regs-gpio.h>
24 24
25#include "samsung.h" 25#include "samsung.h"
26 26
diff --git a/drivers/serial/s3c2410.c b/drivers/serial/s3c2410.c
index c5f03f41686f..40a2531b5541 100644
--- a/drivers/serial/s3c2410.c
+++ b/drivers/serial/s3c2410.c
@@ -19,10 +19,10 @@
19#include <linux/serial.h> 19#include <linux/serial.h>
20 20
21#include <asm/irq.h> 21#include <asm/irq.h>
22#include <asm/hardware.h> 22#include <mach/hardware.h>
23 23
24#include <asm/plat-s3c/regs-serial.h> 24#include <asm/plat-s3c/regs-serial.h>
25#include <asm/arch/regs-gpio.h> 25#include <mach/regs-gpio.h>
26 26
27#include "samsung.h" 27#include "samsung.h"
28 28
diff --git a/drivers/serial/s3c2412.c b/drivers/serial/s3c2412.c
index ce0c220e3e92..d0170319c729 100644
--- a/drivers/serial/s3c2412.c
+++ b/drivers/serial/s3c2412.c
@@ -19,10 +19,10 @@
19#include <linux/serial.h> 19#include <linux/serial.h>
20 20
21#include <asm/irq.h> 21#include <asm/irq.h>
22#include <asm/hardware.h> 22#include <mach/hardware.h>
23 23
24#include <asm/plat-s3c/regs-serial.h> 24#include <asm/plat-s3c/regs-serial.h>
25#include <asm/arch/regs-gpio.h> 25#include <mach/regs-gpio.h>
26 26
27#include "samsung.h" 27#include "samsung.h"
28 28
diff --git a/drivers/serial/s3c2440.c b/drivers/serial/s3c2440.c
index 38f954bd39c6..d4a2b17b2498 100644
--- a/drivers/serial/s3c2440.c
+++ b/drivers/serial/s3c2440.c
@@ -19,10 +19,10 @@
19#include <linux/serial.h> 19#include <linux/serial.h>
20 20
21#include <asm/irq.h> 21#include <asm/irq.h>
22#include <asm/hardware.h> 22#include <mach/hardware.h>
23 23
24#include <asm/plat-s3c/regs-serial.h> 24#include <asm/plat-s3c/regs-serial.h>
25#include <asm/arch/regs-gpio.h> 25#include <mach/regs-gpio.h>
26 26
27#include "samsung.h" 27#include "samsung.h"
28 28
diff --git a/drivers/serial/sa1100.c b/drivers/serial/sa1100.c
index a5e76cc18073..b24a25ea6bc5 100644
--- a/drivers/serial/sa1100.c
+++ b/drivers/serial/sa1100.c
@@ -39,7 +39,7 @@
39 39
40#include <asm/io.h> 40#include <asm/io.h>
41#include <asm/irq.h> 41#include <asm/irq.h>
42#include <asm/hardware.h> 42#include <mach/hardware.h>
43#include <asm/mach/serial_sa1100.h> 43#include <asm/mach/serial_sa1100.h>
44 44
45/* We've been assigned a range on the "Low-density serial ports" major */ 45/* We've been assigned a range on the "Low-density serial ports" major */
diff --git a/drivers/serial/samsung.c b/drivers/serial/samsung.c
index d852f83f8900..5a88b3f9fe9b 100644
--- a/drivers/serial/samsung.c
+++ b/drivers/serial/samsung.c
@@ -45,10 +45,10 @@
45 45
46#include <asm/irq.h> 46#include <asm/irq.h>
47 47
48#include <asm/hardware.h> 48#include <mach/hardware.h>
49 49
50#include <asm/plat-s3c/regs-serial.h> 50#include <asm/plat-s3c/regs-serial.h>
51#include <asm/arch/regs-gpio.h> 51#include <mach/regs-gpio.h>
52 52
53#include "samsung.h" 53#include "samsung.h"
54 54
diff --git a/drivers/serial/serial_ks8695.c b/drivers/serial/serial_ks8695.c
index 0edbc5dd378b..b9cbfc87f616 100644
--- a/drivers/serial/serial_ks8695.c
+++ b/drivers/serial/serial_ks8695.c
@@ -26,8 +26,8 @@
26#include <asm/irq.h> 26#include <asm/irq.h>
27#include <asm/mach/irq.h> 27#include <asm/mach/irq.h>
28 28
29#include <asm/arch/regs-uart.h> 29#include <mach/regs-uart.h>
30#include <asm/arch/regs-irq.h> 30#include <mach/regs-irq.h>
31 31
32#if defined(CONFIG_SERIAL_KS8695_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) 32#if defined(CONFIG_SERIAL_KS8695_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
33#define SUPPORT_SYSRQ 33#define SUPPORT_SYSRQ
diff --git a/drivers/serial/sh-sci.h b/drivers/serial/sh-sci.h
index cd728df6a01a..8a0749e34ca3 100644
--- a/drivers/serial/sh-sci.h
+++ b/drivers/serial/sh-sci.h
@@ -451,19 +451,21 @@ SCIx_FNS(SCxSR, 0x08, 8, 0x10, 8, 0x08, 16, 0x10, 16, 0x04, 8)
451SCIx_FNS(SCxRDR, 0x0a, 8, 0x14, 8, 0x0A, 8, 0x14, 8, 0x05, 8) 451SCIx_FNS(SCxRDR, 0x0a, 8, 0x14, 8, 0x0A, 8, 0x14, 8, 0x05, 8)
452SCIF_FNS(SCFCR, 0x0c, 8, 0x18, 16) 452SCIF_FNS(SCFCR, 0x0c, 8, 0x18, 16)
453#if defined(CONFIG_CPU_SUBTYPE_SH7760) || \ 453#if defined(CONFIG_CPU_SUBTYPE_SH7760) || \
454 defined(CONFIG_CPU_SUBTYPE_SH7763) || \
455 defined(CONFIG_CPU_SUBTYPE_SH7780) || \ 454 defined(CONFIG_CPU_SUBTYPE_SH7780) || \
456 defined(CONFIG_CPU_SUBTYPE_SH7785) 455 defined(CONFIG_CPU_SUBTYPE_SH7785)
456SCIF_FNS(SCFDR, 0x0e, 16, 0x1C, 16)
457SCIF_FNS(SCTFDR, 0x0e, 16, 0x1C, 16) 457SCIF_FNS(SCTFDR, 0x0e, 16, 0x1C, 16)
458SCIF_FNS(SCRFDR, 0x0e, 16, 0x20, 16) 458SCIF_FNS(SCRFDR, 0x0e, 16, 0x20, 16)
459SCIF_FNS(SCSPTR, 0, 0, 0x24, 16) 459SCIF_FNS(SCSPTR, 0, 0, 0x24, 16)
460SCIF_FNS(SCLSR, 0, 0, 0x28, 16) 460SCIF_FNS(SCLSR, 0, 0, 0x28, 16)
461#if defined(CONFIG_CPU_SUBTYPE_SH7763) 461#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
462/* SH7763 SCIF2 */
463SCIF_FNS(SCFDR, 0, 0, 0x1C, 16) 462SCIF_FNS(SCFDR, 0, 0, 0x1C, 16)
464SCIF_FNS(SCSPTR2, 0, 0, 0x20, 16) 463SCIF_FNS(SCSPTR2, 0, 0, 0x20, 16)
465SCIF_FNS(SCLSR2, 0, 0, 0x24, 16) 464SCIF_FNS(SCLSR2, 0, 0, 0x24, 16)
466#endif /* CONFIG_CPU_SUBTYPE_SH7763 */ 465SCIF_FNS(SCTFDR, 0x0e, 16, 0x1C, 16)
466SCIF_FNS(SCRFDR, 0x0e, 16, 0x20, 16)
467SCIF_FNS(SCSPTR, 0, 0, 0x24, 16)
468SCIF_FNS(SCLSR, 0, 0, 0x28, 16)
467#else 469#else
468SCIF_FNS(SCFDR, 0x0e, 16, 0x1C, 16) 470SCIF_FNS(SCFDR, 0x0e, 16, 0x1C, 16)
469#if defined(CONFIG_CPU_SUBTYPE_SH7722) 471#if defined(CONFIG_CPU_SUBTYPE_SH7722)
diff --git a/drivers/serial/v850e_uart.c b/drivers/serial/v850e_uart.c
deleted file mode 100644
index 5acf061b6cd2..000000000000
--- a/drivers/serial/v850e_uart.c
+++ /dev/null
@@ -1,548 +0,0 @@
1/*
2 * drivers/serial/v850e_uart.c -- Serial I/O using V850E on-chip UART or UARTB
3 *
4 * Copyright (C) 2001,02,03 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14/* This driver supports both the original V850E UART interface (called
15 merely `UART' in the docs) and the newer `UARTB' interface, which is
16 roughly a superset of the first one. The selection is made at
17 configure time -- if CONFIG_V850E_UARTB is defined, then UARTB is
18 presumed, otherwise the old UART -- as these are on-CPU UARTS, a system
19 can never have both.
20
21 The UARTB interface also has a 16-entry FIFO mode, which is not
22 yet supported by this driver. */
23
24#include <linux/kernel.h>
25#include <linux/init.h>
26#include <linux/module.h>
27#include <linux/console.h>
28#include <linux/tty.h>
29#include <linux/tty_flip.h>
30#include <linux/serial.h>
31#include <linux/serial_core.h>
32
33#include <asm/v850e_uart.h>
34
35/* Initial UART state. This may be overridden by machine-dependent headers. */
36#ifndef V850E_UART_INIT_BAUD
37#define V850E_UART_INIT_BAUD 115200
38#endif
39#ifndef V850E_UART_INIT_CFLAGS
40#define V850E_UART_INIT_CFLAGS (B115200 | CS8 | CREAD)
41#endif
42
43/* A string used for prefixing printed descriptions; since the same UART
44 macro is actually used on other chips than the V850E. This must be a
45 constant string. */
46#ifndef V850E_UART_CHIP_NAME
47#define V850E_UART_CHIP_NAME "V850E"
48#endif
49
50#define V850E_UART_MINOR_BASE 64 /* First tty minor number */
51
52
53/* Low-level UART functions. */
54
55/* Configure and turn on uart channel CHAN, using the termios `control
56 modes' bits in CFLAGS, and a baud-rate of BAUD. */
57void v850e_uart_configure (unsigned chan, unsigned cflags, unsigned baud)
58{
59 int flags;
60 v850e_uart_speed_t old_speed;
61 v850e_uart_config_t old_config;
62 v850e_uart_speed_t new_speed = v850e_uart_calc_speed (baud);
63 v850e_uart_config_t new_config = v850e_uart_calc_config (cflags);
64
65 /* Disable interrupts while we're twiddling the hardware. */
66 local_irq_save (flags);
67
68#ifdef V850E_UART_PRE_CONFIGURE
69 V850E_UART_PRE_CONFIGURE (chan, cflags, baud);
70#endif
71
72 old_config = V850E_UART_CONFIG (chan);
73 old_speed = v850e_uart_speed (chan);
74
75 if (! v850e_uart_speed_eq (old_speed, new_speed)) {
76 /* The baud rate has changed. First, disable the UART. */
77 V850E_UART_CONFIG (chan) = V850E_UART_CONFIG_FINI;
78 old_config = 0; /* Force the uart to be re-initialized. */
79
80 /* Reprogram the baud-rate generator. */
81 v850e_uart_set_speed (chan, new_speed);
82 }
83
84 if (! (old_config & V850E_UART_CONFIG_ENABLED)) {
85 /* If we are using the uart for the first time, start by
86 enabling it, which must be done before turning on any
87 other bits. */
88 V850E_UART_CONFIG (chan) = V850E_UART_CONFIG_INIT;
89 /* See the initial state. */
90 old_config = V850E_UART_CONFIG (chan);
91 }
92
93 if (new_config != old_config) {
94 /* Which of the TXE/RXE bits we'll temporarily turn off
95 before changing other control bits. */
96 unsigned temp_disable = 0;
97 /* Which of the TXE/RXE bits will be enabled. */
98 unsigned enable = 0;
99 unsigned changed_bits = new_config ^ old_config;
100
101 /* Which of RX/TX will be enabled in the new configuration. */
102 if (new_config & V850E_UART_CONFIG_RX_BITS)
103 enable |= (new_config & V850E_UART_CONFIG_RX_ENABLE);
104 if (new_config & V850E_UART_CONFIG_TX_BITS)
105 enable |= (new_config & V850E_UART_CONFIG_TX_ENABLE);
106
107 /* Figure out which of RX/TX needs to be disabled; note
108 that this will only happen if they're not already
109 disabled. */
110 if (changed_bits & V850E_UART_CONFIG_RX_BITS)
111 temp_disable
112 |= (old_config & V850E_UART_CONFIG_RX_ENABLE);
113 if (changed_bits & V850E_UART_CONFIG_TX_BITS)
114 temp_disable
115 |= (old_config & V850E_UART_CONFIG_TX_ENABLE);
116
117 /* We have to turn off RX and/or TX mode before changing
118 any associated control bits. */
119 if (temp_disable)
120 V850E_UART_CONFIG (chan) = old_config & ~temp_disable;
121
122 /* Write the new control bits, while RX/TX are disabled. */
123 if (changed_bits & ~enable)
124 V850E_UART_CONFIG (chan) = new_config & ~enable;
125
126 v850e_uart_config_delay (new_config, new_speed);
127
128 /* Write the final version, with enable bits turned on. */
129 V850E_UART_CONFIG (chan) = new_config;
130 }
131
132 local_irq_restore (flags);
133}
134
135
136/* Low-level console. */
137
138#ifdef CONFIG_V850E_UART_CONSOLE
139
140static void v850e_uart_cons_write (struct console *co,
141 const char *s, unsigned count)
142{
143 if (count > 0) {
144 unsigned chan = co->index;
145 unsigned irq = V850E_UART_TX_IRQ (chan);
146 int irq_was_enabled, irq_was_pending, flags;
147
148 /* We don't want to get `transmission completed'
149 interrupts, since we're busy-waiting, so we disable them
150 while sending (we don't disable interrupts entirely
151 because sending over a serial line is really slow). We
152 save the status of the tx interrupt and restore it when
153 we're done so that using printk doesn't interfere with
154 normal serial transmission (other than interleaving the
155 output, of course!). This should work correctly even if
156 this function is interrupted and the interrupt printks
157 something. */
158
159 /* Disable interrupts while fiddling with tx interrupt. */
160 local_irq_save (flags);
161 /* Get current tx interrupt status. */
162 irq_was_enabled = v850e_intc_irq_enabled (irq);
163 irq_was_pending = v850e_intc_irq_pending (irq);
164 /* Disable tx interrupt if necessary. */
165 if (irq_was_enabled)
166 v850e_intc_disable_irq (irq);
167 /* Turn interrupts back on. */
168 local_irq_restore (flags);
169
170 /* Send characters. */
171 while (count > 0) {
172 int ch = *s++;
173
174 if (ch == '\n') {
175 /* We don't have the benefit of a tty
176 driver, so translate NL into CR LF. */
177 v850e_uart_wait_for_xmit_ok (chan);
178 v850e_uart_putc (chan, '\r');
179 }
180
181 v850e_uart_wait_for_xmit_ok (chan);
182 v850e_uart_putc (chan, ch);
183
184 count--;
185 }
186
187 /* Restore saved tx interrupt status. */
188 if (irq_was_enabled) {
189 /* Wait for the last character we sent to be
190 completely transmitted (as we'll get an
191 interrupt interrupt at that point). */
192 v850e_uart_wait_for_xmit_done (chan);
193 /* Clear pending interrupts received due
194 to our transmission, unless there was already
195 one pending, in which case we want the
196 handler to be called. */
197 if (! irq_was_pending)
198 v850e_intc_clear_pending_irq (irq);
199 /* ... and then turn back on handling. */
200 v850e_intc_enable_irq (irq);
201 }
202 }
203}
204
205extern struct uart_driver v850e_uart_driver;
206static struct console v850e_uart_cons =
207{
208 .name = "ttyS",
209 .write = v850e_uart_cons_write,
210 .device = uart_console_device,
211 .flags = CON_PRINTBUFFER,
212 .cflag = V850E_UART_INIT_CFLAGS,
213 .index = -1,
214 .data = &v850e_uart_driver,
215};
216
217void v850e_uart_cons_init (unsigned chan)
218{
219 v850e_uart_configure (chan, V850E_UART_INIT_CFLAGS,
220 V850E_UART_INIT_BAUD);
221 v850e_uart_cons.index = chan;
222 register_console (&v850e_uart_cons);
223 printk ("Console: %s on-chip UART channel %d\n",
224 V850E_UART_CHIP_NAME, chan);
225}
226
227/* This is what the init code actually calls. */
228static int v850e_uart_console_init (void)
229{
230 v850e_uart_cons_init (V850E_UART_CONSOLE_CHANNEL);
231 return 0;
232}
233console_initcall(v850e_uart_console_init);
234
235#define V850E_UART_CONSOLE &v850e_uart_cons
236
237#else /* !CONFIG_V850E_UART_CONSOLE */
238#define V850E_UART_CONSOLE 0
239#endif /* CONFIG_V850E_UART_CONSOLE */
240
241/* TX/RX interrupt handlers. */
242
243static void v850e_uart_stop_tx (struct uart_port *port);
244
245void v850e_uart_tx (struct uart_port *port)
246{
247 struct circ_buf *xmit = &port->info->xmit;
248 int stopped = uart_tx_stopped (port);
249
250 if (v850e_uart_xmit_ok (port->line)) {
251 int tx_ch;
252
253 if (port->x_char) {
254 tx_ch = port->x_char;
255 port->x_char = 0;
256 } else if (!uart_circ_empty (xmit) && !stopped) {
257 tx_ch = xmit->buf[xmit->tail];
258 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
259 } else
260 goto no_xmit;
261
262 v850e_uart_putc (port->line, tx_ch);
263 port->icount.tx++;
264
265 if (uart_circ_chars_pending (xmit) < WAKEUP_CHARS)
266 uart_write_wakeup (port);
267 }
268
269 no_xmit:
270 if (uart_circ_empty (xmit) || stopped)
271 v850e_uart_stop_tx (port, stopped);
272}
273
274static irqreturn_t v850e_uart_tx_irq(int irq, void *data)
275{
276 struct uart_port *port = data;
277 v850e_uart_tx (port);
278 return IRQ_HANDLED;
279}
280
281static irqreturn_t v850e_uart_rx_irq(int irq, void *data)
282{
283 struct uart_port *port = data;
284 unsigned ch_stat = TTY_NORMAL;
285 unsigned ch = v850e_uart_getc (port->line);
286 unsigned err = v850e_uart_err (port->line);
287
288 if (err) {
289 if (err & V850E_UART_ERR_OVERRUN) {
290 ch_stat = TTY_OVERRUN;
291 port->icount.overrun++;
292 } else if (err & V850E_UART_ERR_FRAME) {
293 ch_stat = TTY_FRAME;
294 port->icount.frame++;
295 } else if (err & V850E_UART_ERR_PARITY) {
296 ch_stat = TTY_PARITY;
297 port->icount.parity++;
298 }
299 }
300
301 port->icount.rx++;
302
303 tty_insert_flip_char (port->info->port.tty, ch, ch_stat);
304 tty_schedule_flip (port->info->port.tty);
305
306 return IRQ_HANDLED;
307}
308
309
310/* Control functions for the serial framework. */
311
312static void v850e_uart_nop (struct uart_port *port) { }
313static int v850e_uart_success (struct uart_port *port) { return 0; }
314
315static unsigned v850e_uart_tx_empty (struct uart_port *port)
316{
317 return TIOCSER_TEMT; /* Can't detect. */
318}
319
320static void v850e_uart_set_mctrl (struct uart_port *port, unsigned mctrl)
321{
322#ifdef V850E_UART_SET_RTS
323 V850E_UART_SET_RTS (port->line, (mctrl & TIOCM_RTS));
324#endif
325}
326
327static unsigned v850e_uart_get_mctrl (struct uart_port *port)
328{
329 /* We don't support DCD or DSR, so consider them permanently active. */
330 int mctrl = TIOCM_CAR | TIOCM_DSR;
331
332 /* We may support CTS. */
333#ifdef V850E_UART_CTS
334 mctrl |= V850E_UART_CTS(port->line) ? TIOCM_CTS : 0;
335#else
336 mctrl |= TIOCM_CTS;
337#endif
338
339 return mctrl;
340}
341
342static void v850e_uart_start_tx (struct uart_port *port)
343{
344 v850e_intc_disable_irq (V850E_UART_TX_IRQ (port->line));
345 v850e_uart_tx (port);
346 v850e_intc_enable_irq (V850E_UART_TX_IRQ (port->line));
347}
348
349static void v850e_uart_stop_tx (struct uart_port *port)
350{
351 v850e_intc_disable_irq (V850E_UART_TX_IRQ (port->line));
352}
353
354static void v850e_uart_start_rx (struct uart_port *port)
355{
356 v850e_intc_enable_irq (V850E_UART_RX_IRQ (port->line));
357}
358
359static void v850e_uart_stop_rx (struct uart_port *port)
360{
361 v850e_intc_disable_irq (V850E_UART_RX_IRQ (port->line));
362}
363
364static void v850e_uart_break_ctl (struct uart_port *port, int break_ctl)
365{
366 /* Umm, do this later. */
367}
368
369static int v850e_uart_startup (struct uart_port *port)
370{
371 int err;
372
373 /* Alloc RX irq. */
374 err = request_irq (V850E_UART_RX_IRQ (port->line), v850e_uart_rx_irq,
375 IRQF_DISABLED, "v850e_uart", port);
376 if (err)
377 return err;
378
379 /* Alloc TX irq. */
380 err = request_irq (V850E_UART_TX_IRQ (port->line), v850e_uart_tx_irq,
381 IRQF_DISABLED, "v850e_uart", port);
382 if (err) {
383 free_irq (V850E_UART_RX_IRQ (port->line), port);
384 return err;
385 }
386
387 v850e_uart_start_rx (port);
388
389 return 0;
390}
391
392static void v850e_uart_shutdown (struct uart_port *port)
393{
394 /* Disable port interrupts. */
395 free_irq (V850E_UART_TX_IRQ (port->line), port);
396 free_irq (V850E_UART_RX_IRQ (port->line), port);
397
398 /* Turn off xmit/recv enable bits. */
399 V850E_UART_CONFIG (port->line)
400 &= ~(V850E_UART_CONFIG_TX_ENABLE
401 | V850E_UART_CONFIG_RX_ENABLE);
402 /* Then reset the channel. */
403 V850E_UART_CONFIG (port->line) = 0;
404}
405
406static void
407v850e_uart_set_termios (struct uart_port *port, struct ktermios *termios,
408 struct ktermios *old)
409{
410 unsigned cflags = termios->c_cflag;
411
412 /* Restrict flags to legal values. */
413 if ((cflags & CSIZE) != CS7 && (cflags & CSIZE) != CS8)
414 /* The new value of CSIZE is invalid, use the old value. */
415 cflags = (cflags & ~CSIZE)
416 | (old ? (old->c_cflag & CSIZE) : CS8);
417
418 termios->c_cflag = cflags;
419
420 v850e_uart_configure (port->line, cflags,
421 uart_get_baud_rate (port, termios, old,
422 v850e_uart_min_baud(),
423 v850e_uart_max_baud()));
424}
425
426static const char *v850e_uart_type (struct uart_port *port)
427{
428 return port->type == PORT_V850E_UART ? "v850e_uart" : 0;
429}
430
431static void v850e_uart_config_port (struct uart_port *port, int flags)
432{
433 if (flags & UART_CONFIG_TYPE)
434 port->type = PORT_V850E_UART;
435}
436
437static int
438v850e_uart_verify_port (struct uart_port *port, struct serial_struct *ser)
439{
440 if (ser->type != PORT_UNKNOWN && ser->type != PORT_V850E_UART)
441 return -EINVAL;
442 if (ser->irq != V850E_UART_TX_IRQ (port->line))
443 return -EINVAL;
444 return 0;
445}
446
447static struct uart_ops v850e_uart_ops = {
448 .tx_empty = v850e_uart_tx_empty,
449 .get_mctrl = v850e_uart_get_mctrl,
450 .set_mctrl = v850e_uart_set_mctrl,
451 .start_tx = v850e_uart_start_tx,
452 .stop_tx = v850e_uart_stop_tx,
453 .stop_rx = v850e_uart_stop_rx,
454 .enable_ms = v850e_uart_nop,
455 .break_ctl = v850e_uart_break_ctl,
456 .startup = v850e_uart_startup,
457 .shutdown = v850e_uart_shutdown,
458 .set_termios = v850e_uart_set_termios,
459 .type = v850e_uart_type,
460 .release_port = v850e_uart_nop,
461 .request_port = v850e_uart_success,
462 .config_port = v850e_uart_config_port,
463 .verify_port = v850e_uart_verify_port,
464};
465
466/* Initialization and cleanup. */
467
468static struct uart_driver v850e_uart_driver = {
469 .owner = THIS_MODULE,
470 .driver_name = "v850e_uart",
471 .dev_name = "ttyS",
472 .major = TTY_MAJOR,
473 .minor = V850E_UART_MINOR_BASE,
474 .nr = V850E_UART_NUM_CHANNELS,
475 .cons = V850E_UART_CONSOLE,
476};
477
478
479static struct uart_port v850e_uart_ports[V850E_UART_NUM_CHANNELS];
480
481static int __init v850e_uart_init (void)
482{
483 int rval;
484
485 printk (KERN_INFO "%s on-chip UART\n", V850E_UART_CHIP_NAME);
486
487 rval = uart_register_driver (&v850e_uart_driver);
488 if (rval == 0) {
489 unsigned chan;
490
491 for (chan = 0; chan < V850E_UART_NUM_CHANNELS; chan++) {
492 struct uart_port *port = &v850e_uart_ports[chan];
493
494 memset (port, 0, sizeof *port);
495
496 port->ops = &v850e_uart_ops;
497 port->line = chan;
498 port->iotype = UPIO_MEM;
499 port->flags = UPF_BOOT_AUTOCONF;
500
501 /* We actually use multiple IRQs, but the serial
502 framework seems to mainly use this for
503 informational purposes anyway. Here we use the TX
504 irq. */
505 port->irq = V850E_UART_TX_IRQ (chan);
506
507 /* The serial framework doesn't really use these
508 membase/mapbase fields for anything useful, but
509 it requires that they be something non-zero to
510 consider the port `valid', and also uses them
511 for informational purposes. */
512 port->membase = (void *)V850E_UART_BASE_ADDR (chan);
513 port->mapbase = V850E_UART_BASE_ADDR (chan);
514
515 /* The framework insists on knowing the uart's master
516 clock freq, though it doesn't seem to do anything
517 useful for us with it. We must make it at least
518 higher than (the maximum baud rate * 16), otherwise
519 the framework will puke during its internal
520 calculations, and force the baud rate to be 9600.
521 To be accurate though, just repeat the calculation
522 we use when actually setting the speed. */
523 port->uartclk = v850e_uart_max_clock() * 16;
524
525 uart_add_one_port (&v850e_uart_driver, port);
526 }
527 }
528
529 return rval;
530}
531
532static void __exit v850e_uart_exit (void)
533{
534 unsigned chan;
535
536 for (chan = 0; chan < V850E_UART_NUM_CHANNELS; chan++)
537 uart_remove_one_port (&v850e_uart_driver,
538 &v850e_uart_ports[chan]);
539
540 uart_unregister_driver (&v850e_uart_driver);
541}
542
543module_init (v850e_uart_init);
544module_exit (v850e_uart_exit);
545
546MODULE_AUTHOR ("Miles Bader");
547MODULE_DESCRIPTION ("NEC " V850E_UART_CHIP_NAME " on-chip UART");
548MODULE_LICENSE ("GPL");
diff --git a/drivers/sh/maple/maple.c b/drivers/sh/maple/maple.c
index 617efb1640b1..d1812d32f47d 100644
--- a/drivers/sh/maple/maple.c
+++ b/drivers/sh/maple/maple.c
@@ -2,6 +2,7 @@
2 * Core maple bus functionality 2 * Core maple bus functionality
3 * 3 *
4 * Copyright (C) 2007, 2008 Adrian McMenamin 4 * Copyright (C) 2007, 2008 Adrian McMenamin
5 * Copyright (C) 2001 - 2008 Paul Mundt
5 * 6 *
6 * Based on 2.4 code by: 7 * Based on 2.4 code by:
7 * 8 *
@@ -24,15 +25,14 @@
24#include <linux/slab.h> 25#include <linux/slab.h>
25#include <linux/maple.h> 26#include <linux/maple.h>
26#include <linux/dma-mapping.h> 27#include <linux/dma-mapping.h>
28#include <linux/delay.h>
27#include <asm/cacheflush.h> 29#include <asm/cacheflush.h>
28#include <asm/dma.h> 30#include <asm/dma.h>
29#include <asm/io.h> 31#include <asm/io.h>
30#include <asm/mach/dma.h> 32#include <mach/dma.h>
31#include <asm/mach/sysasic.h> 33#include <mach/sysasic.h>
32#include <asm/mach/maple.h>
33#include <linux/delay.h>
34 34
35MODULE_AUTHOR("Yaegshi Takeshi, Paul Mundt, M.R. Brown, Adrian McMenamin"); 35MODULE_AUTHOR("Yaegashi Takeshi, Paul Mundt, M. R. Brown, Adrian McMenamin");
36MODULE_DESCRIPTION("Maple bus driver for Dreamcast"); 36MODULE_DESCRIPTION("Maple bus driver for Dreamcast");
37MODULE_LICENSE("GPL v2"); 37MODULE_LICENSE("GPL v2");
38MODULE_SUPPORTED_DEVICE("{{SEGA, Dreamcast/Maple}}"); 38MODULE_SUPPORTED_DEVICE("{{SEGA, Dreamcast/Maple}}");
@@ -46,14 +46,15 @@ static DECLARE_WORK(maple_vblank_process, maple_vblank_handler);
46static LIST_HEAD(maple_waitq); 46static LIST_HEAD(maple_waitq);
47static LIST_HEAD(maple_sentq); 47static LIST_HEAD(maple_sentq);
48 48
49static DEFINE_MUTEX(maple_list_lock); 49/* mutex to protect queue of waiting packets */
50static DEFINE_MUTEX(maple_wlist_lock);
50 51
51static struct maple_driver maple_dummy_driver; 52static struct maple_driver maple_dummy_driver;
52static struct device maple_bus; 53static struct device maple_bus;
53static int subdevice_map[MAPLE_PORTS]; 54static int subdevice_map[MAPLE_PORTS];
54static unsigned long *maple_sendbuf, *maple_sendptr, *maple_lastptr; 55static unsigned long *maple_sendbuf, *maple_sendptr, *maple_lastptr;
55static unsigned long maple_pnp_time; 56static unsigned long maple_pnp_time;
56static int started, scanning, liststatus, fullscan; 57static int started, scanning, fullscan;
57static struct kmem_cache *maple_queue_cache; 58static struct kmem_cache *maple_queue_cache;
58 59
59struct maple_device_specify { 60struct maple_device_specify {
@@ -65,19 +66,36 @@ static bool checked[4];
65static struct maple_device *baseunits[4]; 66static struct maple_device *baseunits[4];
66 67
67/** 68/**
68 * maple_driver_register - register a device driver 69 * maple_driver_register - register a maple driver
69 * automatically makes the driver bus a maple bus 70 * @drv: maple driver to be registered.
70 * @drv: the driver to be registered 71 *
72 * Registers the passed in @drv, while updating the bus type.
73 * Devices with matching function IDs will be automatically probed.
71 */ 74 */
72int maple_driver_register(struct device_driver *drv) 75int maple_driver_register(struct maple_driver *drv)
73{ 76{
74 if (!drv) 77 if (!drv)
75 return -EINVAL; 78 return -EINVAL;
76 drv->bus = &maple_bus_type; 79
77 return driver_register(drv); 80 drv->drv.bus = &maple_bus_type;
81
82 return driver_register(&drv->drv);
78} 83}
79EXPORT_SYMBOL_GPL(maple_driver_register); 84EXPORT_SYMBOL_GPL(maple_driver_register);
80 85
86/**
87 * maple_driver_unregister - unregister a maple driver.
88 * @drv: maple driver to unregister.
89 *
90 * Cleans up after maple_driver_register(). To be invoked in the exit
91 * path of any module drivers.
92 */
93void maple_driver_unregister(struct maple_driver *drv)
94{
95 driver_unregister(&drv->drv);
96}
97EXPORT_SYMBOL_GPL(maple_driver_unregister);
98
81/* set hardware registers to enable next round of dma */ 99/* set hardware registers to enable next round of dma */
82static void maplebus_dma_reset(void) 100static void maplebus_dma_reset(void)
83{ 101{
@@ -131,33 +149,123 @@ static void maple_release_device(struct device *dev)
131 149
132/** 150/**
133 * maple_add_packet - add a single instruction to the queue 151 * maple_add_packet - add a single instruction to the queue
134 * @mq: instruction to add to waiting queue 152 * @mdev: maple device
153 * @function: function on device being queried
154 * @command: maple command to add
155 * @length: length of command string (in 32 bit words)
156 * @data: remainder of command string
135 */ 157 */
136void maple_add_packet(struct mapleq *mq) 158int maple_add_packet(struct maple_device *mdev, u32 function, u32 command,
159 size_t length, void *data)
137{ 160{
138 mutex_lock(&maple_list_lock); 161 int locking, ret = 0;
139 list_add(&mq->list, &maple_waitq); 162 void *sendbuf = NULL;
140 mutex_unlock(&maple_list_lock); 163
164 mutex_lock(&maple_wlist_lock);
165 /* bounce if device already locked */
166 locking = mutex_is_locked(&mdev->mq->mutex);
167 if (locking) {
168 ret = -EBUSY;
169 goto out;
170 }
171
172 mutex_lock(&mdev->mq->mutex);
173
174 if (length) {
175 sendbuf = kmalloc(length * 4, GFP_KERNEL);
176 if (!sendbuf) {
177 mutex_unlock(&mdev->mq->mutex);
178 ret = -ENOMEM;
179 goto out;
180 }
181 ((__be32 *)sendbuf)[0] = cpu_to_be32(function);
182 }
183
184 mdev->mq->command = command;
185 mdev->mq->length = length;
186 if (length > 1)
187 memcpy(sendbuf + 4, data, (length - 1) * 4);
188 mdev->mq->sendbuf = sendbuf;
189
190 list_add(&mdev->mq->list, &maple_waitq);
191out:
192 mutex_unlock(&maple_wlist_lock);
193 return ret;
141} 194}
142EXPORT_SYMBOL_GPL(maple_add_packet); 195EXPORT_SYMBOL_GPL(maple_add_packet);
143 196
197/**
198 * maple_add_packet_sleeps - add a single instruction to the queue
199 * @mdev: maple device
200 * @function: function on device being queried
201 * @command: maple command to add
202 * @length: length of command string (in 32 bit words)
203 * @data: remainder of command string
204 *
205 * Same as maple_add_packet(), but waits for the lock to become free.
206 */
207int maple_add_packet_sleeps(struct maple_device *mdev, u32 function,
208 u32 command, size_t length, void *data)
209{
210 int locking, ret = 0;
211 void *sendbuf = NULL;
212
213 locking = mutex_lock_interruptible(&mdev->mq->mutex);
214 if (locking) {
215 ret = -EIO;
216 goto out;
217 }
218
219 if (length) {
220 sendbuf = kmalloc(length * 4, GFP_KERNEL);
221 if (!sendbuf) {
222 mutex_unlock(&mdev->mq->mutex);
223 ret = -ENOMEM;
224 goto out;
225 }
226 ((__be32 *)sendbuf)[0] = cpu_to_be32(function);
227 }
228
229 mdev->mq->command = command;
230 mdev->mq->length = length;
231 if (length > 1)
232 memcpy(sendbuf + 4, data, (length - 1) * 4);
233 mdev->mq->sendbuf = sendbuf;
234
235 mutex_lock(&maple_wlist_lock);
236 list_add(&mdev->mq->list, &maple_waitq);
237 mutex_unlock(&maple_wlist_lock);
238out:
239 return ret;
240}
241EXPORT_SYMBOL_GPL(maple_add_packet_sleeps);
242
144static struct mapleq *maple_allocq(struct maple_device *mdev) 243static struct mapleq *maple_allocq(struct maple_device *mdev)
145{ 244{
146 struct mapleq *mq; 245 struct mapleq *mq;
147 246
148 mq = kmalloc(sizeof(*mq), GFP_KERNEL); 247 mq = kmalloc(sizeof(*mq), GFP_KERNEL);
149 if (!mq) 248 if (!mq)
150 return NULL; 249 goto failed_nomem;
151 250
152 mq->dev = mdev; 251 mq->dev = mdev;
153 mq->recvbufdcsp = kmem_cache_zalloc(maple_queue_cache, GFP_KERNEL); 252 mq->recvbufdcsp = kmem_cache_zalloc(maple_queue_cache, GFP_KERNEL);
154 mq->recvbuf = (void *) P2SEGADDR(mq->recvbufdcsp); 253 mq->recvbuf = (void *) P2SEGADDR(mq->recvbufdcsp);
155 if (!mq->recvbuf) { 254 if (!mq->recvbuf)
156 kfree(mq); 255 goto failed_p2;
157 return NULL; 256 /*
158 } 257 * most devices do not need the mutex - but
258 * anything that injects block reads or writes
259 * will rely on it
260 */
261 mutex_init(&mq->mutex);
159 262
160 return mq; 263 return mq;
264
265failed_p2:
266 kfree(mq);
267failed_nomem:
268 return NULL;
161} 269}
162 270
163static struct maple_device *maple_alloc_dev(int port, int unit) 271static struct maple_device *maple_alloc_dev(int port, int unit)
@@ -178,7 +286,6 @@ static struct maple_device *maple_alloc_dev(int port, int unit)
178 } 286 }
179 mdev->dev.bus = &maple_bus_type; 287 mdev->dev.bus = &maple_bus_type;
180 mdev->dev.parent = &maple_bus; 288 mdev->dev.parent = &maple_bus;
181 mdev->function = 0;
182 return mdev; 289 return mdev;
183} 290}
184 291
@@ -216,7 +323,6 @@ static void maple_build_block(struct mapleq *mq)
216 *maple_sendptr++ = PHYSADDR(mq->recvbuf); 323 *maple_sendptr++ = PHYSADDR(mq->recvbuf);
217 *maple_sendptr++ = 324 *maple_sendptr++ =
218 mq->command | (to << 8) | (from << 16) | (len << 24); 325 mq->command | (to << 8) | (from << 16) | (len << 24);
219
220 while (len-- > 0) 326 while (len-- > 0)
221 *maple_sendptr++ = *lsendbuf++; 327 *maple_sendptr++ = *lsendbuf++;
222} 328}
@@ -224,22 +330,27 @@ static void maple_build_block(struct mapleq *mq)
224/* build up command queue */ 330/* build up command queue */
225static void maple_send(void) 331static void maple_send(void)
226{ 332{
227 int i; 333 int i, maple_packets = 0;
228 int maple_packets;
229 struct mapleq *mq, *nmq; 334 struct mapleq *mq, *nmq;
230 335
231 if (!list_empty(&maple_sentq)) 336 if (!list_empty(&maple_sentq))
232 return; 337 return;
233 if (list_empty(&maple_waitq) || !maple_dma_done()) 338 mutex_lock(&maple_wlist_lock);
339 if (list_empty(&maple_waitq) || !maple_dma_done()) {
340 mutex_unlock(&maple_wlist_lock);
234 return; 341 return;
235 maple_packets = 0; 342 }
236 maple_sendptr = maple_lastptr = maple_sendbuf; 343 mutex_unlock(&maple_wlist_lock);
344 maple_lastptr = maple_sendbuf;
345 maple_sendptr = maple_sendbuf;
346 mutex_lock(&maple_wlist_lock);
237 list_for_each_entry_safe(mq, nmq, &maple_waitq, list) { 347 list_for_each_entry_safe(mq, nmq, &maple_waitq, list) {
238 maple_build_block(mq); 348 maple_build_block(mq);
239 list_move(&mq->list, &maple_sentq); 349 list_move(&mq->list, &maple_sentq);
240 if (maple_packets++ > MAPLE_MAXPACKETS) 350 if (maple_packets++ > MAPLE_MAXPACKETS)
241 break; 351 break;
242 } 352 }
353 mutex_unlock(&maple_wlist_lock);
243 if (maple_packets > 0) { 354 if (maple_packets > 0) {
244 for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++) 355 for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++)
245 dma_cache_sync(0, maple_sendbuf + i * PAGE_SIZE, 356 dma_cache_sync(0, maple_sendbuf + i * PAGE_SIZE,
@@ -247,7 +358,8 @@ static void maple_send(void)
247 } 358 }
248} 359}
249 360
250static int attach_matching_maple_driver(struct device_driver *driver, 361/* check if there is a driver registered likely to match this device */
362static int check_matching_maple_driver(struct device_driver *driver,
251 void *devptr) 363 void *devptr)
252{ 364{
253 struct maple_driver *maple_drv; 365 struct maple_driver *maple_drv;
@@ -255,12 +367,8 @@ static int attach_matching_maple_driver(struct device_driver *driver,
255 367
256 mdev = devptr; 368 mdev = devptr;
257 maple_drv = to_maple_driver(driver); 369 maple_drv = to_maple_driver(driver);
258 if (mdev->devinfo.function & be32_to_cpu(maple_drv->function)) { 370 if (mdev->devinfo.function & cpu_to_be32(maple_drv->function))
259 if (maple_drv->connect(mdev) == 0) { 371 return 1;
260 mdev->driver = maple_drv;
261 return 1;
262 }
263 }
264 return 0; 372 return 0;
265} 373}
266 374
@@ -268,11 +376,6 @@ static void maple_detach_driver(struct maple_device *mdev)
268{ 376{
269 if (!mdev) 377 if (!mdev)
270 return; 378 return;
271 if (mdev->driver) {
272 if (mdev->driver->disconnect)
273 mdev->driver->disconnect(mdev);
274 }
275 mdev->driver = NULL;
276 device_unregister(&mdev->dev); 379 device_unregister(&mdev->dev);
277 mdev = NULL; 380 mdev = NULL;
278} 381}
@@ -328,8 +431,8 @@ static void maple_attach_driver(struct maple_device *mdev)
328 mdev->port, mdev->unit, function); 431 mdev->port, mdev->unit, function);
329 432
330 matched = 433 matched =
331 bus_for_each_drv(&maple_bus_type, NULL, mdev, 434 bus_for_each_drv(&maple_bus_type, NULL, mdev,
332 attach_matching_maple_driver); 435 check_matching_maple_driver);
333 436
334 if (matched == 0) { 437 if (matched == 0) {
335 /* Driver does not exist yet */ 438 /* Driver does not exist yet */
@@ -373,45 +476,48 @@ static int detach_maple_device(struct device *device, void *portptr)
373 476
374static int setup_maple_commands(struct device *device, void *ignored) 477static int setup_maple_commands(struct device *device, void *ignored)
375{ 478{
479 int add;
376 struct maple_device *maple_dev = to_maple_dev(device); 480 struct maple_device *maple_dev = to_maple_dev(device);
377 481
378 if ((maple_dev->interval > 0) 482 if ((maple_dev->interval > 0)
379 && time_after(jiffies, maple_dev->when)) { 483 && time_after(jiffies, maple_dev->when)) {
380 maple_dev->when = jiffies + maple_dev->interval; 484 /* bounce if we cannot lock */
381 maple_dev->mq->command = MAPLE_COMMAND_GETCOND; 485 add = maple_add_packet(maple_dev,
382 maple_dev->mq->sendbuf = &maple_dev->function; 486 be32_to_cpu(maple_dev->devinfo.function),
383 maple_dev->mq->length = 1; 487 MAPLE_COMMAND_GETCOND, 1, NULL);
384 maple_add_packet(maple_dev->mq); 488 if (!add)
385 liststatus++; 489 maple_dev->when = jiffies + maple_dev->interval;
386 } else { 490 } else {
387 if (time_after(jiffies, maple_pnp_time)) { 491 if (time_after(jiffies, maple_pnp_time))
388 maple_dev->mq->command = MAPLE_COMMAND_DEVINFO; 492 /* This will also bounce */
389 maple_dev->mq->length = 0; 493 maple_add_packet(maple_dev, 0,
390 maple_add_packet(maple_dev->mq); 494 MAPLE_COMMAND_DEVINFO, 0, NULL);
391 liststatus++;
392 }
393 } 495 }
394
395 return 0; 496 return 0;
396} 497}
397 498
398/* VBLANK bottom half - implemented via workqueue */ 499/* VBLANK bottom half - implemented via workqueue */
399static void maple_vblank_handler(struct work_struct *work) 500static void maple_vblank_handler(struct work_struct *work)
400{ 501{
401 if (!maple_dma_done()) 502 if (!list_empty(&maple_sentq) || !maple_dma_done())
402 return;
403 if (!list_empty(&maple_sentq))
404 return; 503 return;
504
405 ctrl_outl(0, MAPLE_ENABLE); 505 ctrl_outl(0, MAPLE_ENABLE);
406 liststatus = 0; 506
407 bus_for_each_dev(&maple_bus_type, NULL, NULL, 507 bus_for_each_dev(&maple_bus_type, NULL, NULL,
408 setup_maple_commands); 508 setup_maple_commands);
509
409 if (time_after(jiffies, maple_pnp_time)) 510 if (time_after(jiffies, maple_pnp_time))
410 maple_pnp_time = jiffies + MAPLE_PNP_INTERVAL; 511 maple_pnp_time = jiffies + MAPLE_PNP_INTERVAL;
411 if (liststatus && list_empty(&maple_sentq)) { 512
412 INIT_LIST_HEAD(&maple_sentq); 513 mutex_lock(&maple_wlist_lock);
514 if (!list_empty(&maple_waitq) && list_empty(&maple_sentq)) {
515 mutex_unlock(&maple_wlist_lock);
413 maple_send(); 516 maple_send();
517 } else {
518 mutex_unlock(&maple_wlist_lock);
414 } 519 }
520
415 maplebus_dma_reset(); 521 maplebus_dma_reset();
416} 522}
417 523
@@ -422,8 +528,8 @@ static void maple_map_subunits(struct maple_device *mdev, int submask)
422 struct maple_device *mdev_add; 528 struct maple_device *mdev_add;
423 struct maple_device_specify ds; 529 struct maple_device_specify ds;
424 530
531 ds.port = mdev->port;
425 for (k = 0; k < 5; k++) { 532 for (k = 0; k < 5; k++) {
426 ds.port = mdev->port;
427 ds.unit = k + 1; 533 ds.unit = k + 1;
428 retval = 534 retval =
429 bus_for_each_dev(&maple_bus_type, NULL, &ds, 535 bus_for_each_dev(&maple_bus_type, NULL, &ds,
@@ -437,9 +543,9 @@ static void maple_map_subunits(struct maple_device *mdev, int submask)
437 mdev_add = maple_alloc_dev(mdev->port, k + 1); 543 mdev_add = maple_alloc_dev(mdev->port, k + 1);
438 if (!mdev_add) 544 if (!mdev_add)
439 return; 545 return;
440 mdev_add->mq->command = MAPLE_COMMAND_DEVINFO; 546 maple_add_packet(mdev_add, 0, MAPLE_COMMAND_DEVINFO,
441 mdev_add->mq->length = 0; 547 0, NULL);
442 maple_add_packet(mdev_add->mq); 548 /* mark that we are checking sub devices */
443 scanning = 1; 549 scanning = 1;
444 } 550 }
445 submask = submask >> 1; 551 submask = submask >> 1;
@@ -505,6 +611,28 @@ static void maple_response_devinfo(struct maple_device *mdev,
505 } 611 }
506} 612}
507 613
614static void maple_port_rescan(void)
615{
616 int i;
617 struct maple_device *mdev;
618
619 fullscan = 1;
620 for (i = 0; i < MAPLE_PORTS; i++) {
621 if (checked[i] == false) {
622 fullscan = 0;
623 mdev = baseunits[i];
624 /*
625 * test lock in case scan has failed
626 * but device is still locked
627 */
628 if (mutex_is_locked(&mdev->mq->mutex))
629 mutex_unlock(&mdev->mq->mutex);
630 maple_add_packet(mdev, 0, MAPLE_COMMAND_DEVINFO,
631 0, NULL);
632 }
633 }
634}
635
508/* maple dma end bottom half - implemented via workqueue */ 636/* maple dma end bottom half - implemented via workqueue */
509static void maple_dma_handler(struct work_struct *work) 637static void maple_dma_handler(struct work_struct *work)
510{ 638{
@@ -512,7 +640,6 @@ static void maple_dma_handler(struct work_struct *work)
512 struct maple_device *dev; 640 struct maple_device *dev;
513 char *recvbuf; 641 char *recvbuf;
514 enum maple_code code; 642 enum maple_code code;
515 int i;
516 643
517 if (!maple_dma_done()) 644 if (!maple_dma_done())
518 return; 645 return;
@@ -522,6 +649,10 @@ static void maple_dma_handler(struct work_struct *work)
522 recvbuf = mq->recvbuf; 649 recvbuf = mq->recvbuf;
523 code = recvbuf[0]; 650 code = recvbuf[0];
524 dev = mq->dev; 651 dev = mq->dev;
652 kfree(mq->sendbuf);
653 mutex_unlock(&mq->mutex);
654 list_del_init(&mq->list);
655
525 switch (code) { 656 switch (code) {
526 case MAPLE_RESPONSE_NONE: 657 case MAPLE_RESPONSE_NONE:
527 maple_response_none(dev, mq); 658 maple_response_none(dev, mq);
@@ -558,26 +689,16 @@ static void maple_dma_handler(struct work_struct *work)
558 break; 689 break;
559 } 690 }
560 } 691 }
561 INIT_LIST_HEAD(&maple_sentq); 692 /* if scanning is 1 then we have subdevices to check */
562 if (scanning == 1) { 693 if (scanning == 1) {
563 maple_send(); 694 maple_send();
564 scanning = 2; 695 scanning = 2;
565 } else 696 } else
566 scanning = 0; 697 scanning = 0;
567 698 /*check if we have actually tested all ports yet */
568 if (!fullscan) { 699 if (!fullscan)
569 fullscan = 1; 700 maple_port_rescan();
570 for (i = 0; i < MAPLE_PORTS; i++) { 701 /* mark that we have been through the first scan */
571 if (checked[i] == false) {
572 fullscan = 0;
573 dev = baseunits[i];
574 dev->mq->command =
575 MAPLE_COMMAND_DEVINFO;
576 dev->mq->length = 0;
577 maple_add_packet(dev->mq);
578 }
579 }
580 }
581 if (started == 0) 702 if (started == 0)
582 started = 1; 703 started = 1;
583 } 704 }
@@ -622,16 +743,14 @@ static int maple_get_dma_buffer(void)
622static int match_maple_bus_driver(struct device *devptr, 743static int match_maple_bus_driver(struct device *devptr,
623 struct device_driver *drvptr) 744 struct device_driver *drvptr)
624{ 745{
625 struct maple_driver *maple_drv; 746 struct maple_driver *maple_drv = to_maple_driver(drvptr);
626 struct maple_device *maple_dev; 747 struct maple_device *maple_dev = to_maple_dev(devptr);
627 748
628 maple_drv = container_of(drvptr, struct maple_driver, drv);
629 maple_dev = container_of(devptr, struct maple_device, dev);
630 /* Trap empty port case */ 749 /* Trap empty port case */
631 if (maple_dev->devinfo.function == 0xFFFFFFFF) 750 if (maple_dev->devinfo.function == 0xFFFFFFFF)
632 return 0; 751 return 0;
633 else if (maple_dev->devinfo.function & 752 else if (maple_dev->devinfo.function &
634 be32_to_cpu(maple_drv->function)) 753 cpu_to_be32(maple_drv->function))
635 return 1; 754 return 1;
636 return 0; 755 return 0;
637} 756}
@@ -713,6 +832,9 @@ static int __init maple_bus_init(void)
713 if (!maple_queue_cache) 832 if (!maple_queue_cache)
714 goto cleanup_bothirqs; 833 goto cleanup_bothirqs;
715 834
835 INIT_LIST_HEAD(&maple_waitq);
836 INIT_LIST_HEAD(&maple_sentq);
837
716 /* setup maple ports */ 838 /* setup maple ports */
717 for (i = 0; i < MAPLE_PORTS; i++) { 839 for (i = 0; i < MAPLE_PORTS; i++) {
718 checked[i] = false; 840 checked[i] = false;
@@ -723,9 +845,7 @@ static int __init maple_bus_init(void)
723 maple_free_dev(mdev[i]); 845 maple_free_dev(mdev[i]);
724 goto cleanup_cache; 846 goto cleanup_cache;
725 } 847 }
726 mdev[i]->mq->command = MAPLE_COMMAND_DEVINFO; 848 maple_add_packet(mdev[i], 0, MAPLE_COMMAND_DEVINFO, 0, NULL);
727 mdev[i]->mq->length = 0;
728 maple_add_packet(mdev[i]->mq);
729 subdevice_map[i] = 0; 849 subdevice_map[i] = 0;
730 } 850 }
731 851
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 2303521b4f09..b9d0efb6803f 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -149,6 +149,12 @@ config SPI_OMAP24XX
149 SPI master controller for OMAP24xx/OMAP34xx Multichannel SPI 149 SPI master controller for OMAP24xx/OMAP34xx Multichannel SPI
150 (McSPI) modules. 150 (McSPI) modules.
151 151
152config SPI_ORION
153 tristate "Orion SPI master (EXPERIMENTAL)"
154 depends on PLAT_ORION && EXPERIMENTAL
155 help
156 This enables using the SPI master controller on the Orion chips.
157
152config SPI_PXA2XX 158config SPI_PXA2XX
153 tristate "PXA2xx SSP SPI master" 159 tristate "PXA2xx SSP SPI master"
154 depends on ARCH_PXA && EXPERIMENTAL 160 depends on ARCH_PXA && EXPERIMENTAL
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 7fca043ce723..ccf18de34e1e 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o
21obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o 21obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o
22obj-$(CONFIG_SPI_OMAP_UWIRE) += omap_uwire.o 22obj-$(CONFIG_SPI_OMAP_UWIRE) += omap_uwire.o
23obj-$(CONFIG_SPI_OMAP24XX) += omap2_mcspi.o 23obj-$(CONFIG_SPI_OMAP24XX) += omap2_mcspi.o
24obj-$(CONFIG_SPI_ORION) += orion_spi.o
24obj-$(CONFIG_SPI_MPC52xx_PSC) += mpc52xx_psc_spi.o 25obj-$(CONFIG_SPI_MPC52xx_PSC) += mpc52xx_psc_spi.o
25obj-$(CONFIG_SPI_MPC83xx) += spi_mpc83xx.o 26obj-$(CONFIG_SPI_MPC83xx) += spi_mpc83xx.o
26obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o 27obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o
diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c
index 0c7165660853..02f9320f3efc 100644
--- a/drivers/spi/atmel_spi.c
+++ b/drivers/spi/atmel_spi.c
@@ -20,9 +20,9 @@
20#include <linux/spi/spi.h> 20#include <linux/spi/spi.h>
21 21
22#include <asm/io.h> 22#include <asm/io.h>
23#include <asm/arch/board.h> 23#include <mach/board.h>
24#include <asm/arch/gpio.h> 24#include <mach/gpio.h>
25#include <asm/arch/cpu.h> 25#include <mach/cpu.h>
26 26
27#include "atmel_spi.h" 27#include "atmel_spi.h"
28 28
@@ -184,7 +184,8 @@ static void atmel_spi_next_xfer(struct spi_master *master,
184{ 184{
185 struct atmel_spi *as = spi_master_get_devdata(master); 185 struct atmel_spi *as = spi_master_get_devdata(master);
186 struct spi_transfer *xfer; 186 struct spi_transfer *xfer;
187 u32 len, remaining, total; 187 u32 len, remaining;
188 u32 ieval;
188 dma_addr_t tx_dma, rx_dma; 189 dma_addr_t tx_dma, rx_dma;
189 190
190 if (!as->current_transfer) 191 if (!as->current_transfer)
@@ -197,6 +198,8 @@ static void atmel_spi_next_xfer(struct spi_master *master,
197 xfer = NULL; 198 xfer = NULL;
198 199
199 if (xfer) { 200 if (xfer) {
201 spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
202
200 len = xfer->len; 203 len = xfer->len;
201 atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len); 204 atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len);
202 remaining = xfer->len - len; 205 remaining = xfer->len - len;
@@ -234,6 +237,8 @@ static void atmel_spi_next_xfer(struct spi_master *master,
234 as->next_transfer = xfer; 237 as->next_transfer = xfer;
235 238
236 if (xfer) { 239 if (xfer) {
240 u32 total;
241
237 total = len; 242 total = len;
238 atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len); 243 atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len);
239 as->next_remaining_bytes = total - len; 244 as->next_remaining_bytes = total - len;
@@ -250,9 +255,11 @@ static void atmel_spi_next_xfer(struct spi_master *master,
250 " next xfer %p: len %u tx %p/%08x rx %p/%08x\n", 255 " next xfer %p: len %u tx %p/%08x rx %p/%08x\n",
251 xfer, xfer->len, xfer->tx_buf, xfer->tx_dma, 256 xfer, xfer->len, xfer->tx_buf, xfer->tx_dma,
252 xfer->rx_buf, xfer->rx_dma); 257 xfer->rx_buf, xfer->rx_dma);
258 ieval = SPI_BIT(ENDRX) | SPI_BIT(OVRES);
253 } else { 259 } else {
254 spi_writel(as, RNCR, 0); 260 spi_writel(as, RNCR, 0);
255 spi_writel(as, TNCR, 0); 261 spi_writel(as, TNCR, 0);
262 ieval = SPI_BIT(RXBUFF) | SPI_BIT(ENDRX) | SPI_BIT(OVRES);
256 } 263 }
257 264
258 /* REVISIT: We're waiting for ENDRX before we start the next 265 /* REVISIT: We're waiting for ENDRX before we start the next
@@ -265,7 +272,7 @@ static void atmel_spi_next_xfer(struct spi_master *master,
265 * 272 *
266 * It should be doable, though. Just not now... 273 * It should be doable, though. Just not now...
267 */ 274 */
268 spi_writel(as, IER, SPI_BIT(ENDRX) | SPI_BIT(OVRES)); 275 spi_writel(as, IER, ieval);
269 spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN)); 276 spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN));
270} 277}
271 278
@@ -396,7 +403,7 @@ atmel_spi_interrupt(int irq, void *dev_id)
396 403
397 ret = IRQ_HANDLED; 404 ret = IRQ_HANDLED;
398 405
399 spi_writel(as, IDR, (SPI_BIT(ENDTX) | SPI_BIT(ENDRX) 406 spi_writel(as, IDR, (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX)
400 | SPI_BIT(OVRES))); 407 | SPI_BIT(OVRES)));
401 408
402 /* 409 /*
@@ -418,7 +425,7 @@ atmel_spi_interrupt(int irq, void *dev_id)
418 if (xfer->delay_usecs) 425 if (xfer->delay_usecs)
419 udelay(xfer->delay_usecs); 426 udelay(xfer->delay_usecs);
420 427
421 dev_warn(master->dev.parent, "fifo overrun (%u/%u remaining)\n", 428 dev_warn(master->dev.parent, "overrun (%u/%u remaining)\n",
422 spi_readl(as, TCR), spi_readl(as, RCR)); 429 spi_readl(as, TCR), spi_readl(as, RCR));
423 430
424 /* 431 /*
@@ -442,7 +449,7 @@ atmel_spi_interrupt(int irq, void *dev_id)
442 spi_readl(as, SR); 449 spi_readl(as, SR);
443 450
444 atmel_spi_msg_done(master, as, msg, -EIO, 0); 451 atmel_spi_msg_done(master, as, msg, -EIO, 0);
445 } else if (pending & SPI_BIT(ENDRX)) { 452 } else if (pending & (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX))) {
446 ret = IRQ_HANDLED; 453 ret = IRQ_HANDLED;
447 454
448 spi_writel(as, IDR, pending); 455 spi_writel(as, IDR, pending);
diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c
index f6f987bb71ca..9d2186fd74aa 100644
--- a/drivers/spi/omap2_mcspi.c
+++ b/drivers/spi/omap2_mcspi.c
@@ -35,8 +35,8 @@
35 35
36#include <linux/spi/spi.h> 36#include <linux/spi/spi.h>
37 37
38#include <asm/arch/dma.h> 38#include <mach/dma.h>
39#include <asm/arch/clock.h> 39#include <mach/clock.h>
40 40
41 41
42#define OMAP2_MCSPI_MAX_FREQ 48000000 42#define OMAP2_MCSPI_MAX_FREQ 48000000
diff --git a/drivers/spi/omap_uwire.c b/drivers/spi/omap_uwire.c
index d9ae111c27ae..5515eb97d7c5 100644
--- a/drivers/spi/omap_uwire.c
+++ b/drivers/spi/omap_uwire.c
@@ -47,12 +47,12 @@
47 47
48#include <asm/system.h> 48#include <asm/system.h>
49#include <asm/irq.h> 49#include <asm/irq.h>
50#include <asm/hardware.h> 50#include <mach/hardware.h>
51#include <asm/io.h> 51#include <asm/io.h>
52#include <asm/mach-types.h> 52#include <asm/mach-types.h>
53 53
54#include <asm/arch/mux.h> 54#include <mach/mux.h>
55#include <asm/arch/omap730.h> /* OMAP730_IO_CONF registers */ 55#include <mach/omap730.h> /* OMAP730_IO_CONF registers */
56 56
57 57
58/* FIXME address is now a platform device resource, 58/* FIXME address is now a platform device resource,
diff --git a/drivers/spi/orion_spi.c b/drivers/spi/orion_spi.c
new file mode 100644
index 000000000000..c4eaacd6e553
--- /dev/null
+++ b/drivers/spi/orion_spi.c
@@ -0,0 +1,574 @@
1/*
2 * orion_spi.c -- Marvell Orion SPI controller driver
3 *
4 * Author: Shadi Ammouri <shadi@marvell.com>
5 * Copyright (C) 2007-2008 Marvell Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/init.h>
13#include <linux/interrupt.h>
14#include <linux/delay.h>
15#include <linux/platform_device.h>
16#include <linux/err.h>
17#include <linux/io.h>
18#include <linux/spi/spi.h>
19#include <linux/spi/orion_spi.h>
20#include <asm/unaligned.h>
21
22#define DRIVER_NAME "orion_spi"
23
24#define ORION_NUM_CHIPSELECTS 1 /* only one slave is supported*/
25#define ORION_SPI_WAIT_RDY_MAX_LOOP 2000 /* in usec */
26
27#define ORION_SPI_IF_CTRL_REG 0x00
28#define ORION_SPI_IF_CONFIG_REG 0x04
29#define ORION_SPI_DATA_OUT_REG 0x08
30#define ORION_SPI_DATA_IN_REG 0x0c
31#define ORION_SPI_INT_CAUSE_REG 0x10
32
33#define ORION_SPI_IF_8_16_BIT_MODE (1 << 5)
34#define ORION_SPI_CLK_PRESCALE_MASK 0x1F
35
36struct orion_spi {
37 struct work_struct work;
38
39 /* Lock access to transfer list. */
40 spinlock_t lock;
41
42 struct list_head msg_queue;
43 struct spi_master *master;
44 void __iomem *base;
45 unsigned int max_speed;
46 unsigned int min_speed;
47 struct orion_spi_info *spi_info;
48};
49
50static struct workqueue_struct *orion_spi_wq;
51
52static inline void __iomem *spi_reg(struct orion_spi *orion_spi, u32 reg)
53{
54 return orion_spi->base + reg;
55}
56
57static inline void
58orion_spi_setbits(struct orion_spi *orion_spi, u32 reg, u32 mask)
59{
60 void __iomem *reg_addr = spi_reg(orion_spi, reg);
61 u32 val;
62
63 val = readl(reg_addr);
64 val |= mask;
65 writel(val, reg_addr);
66}
67
68static inline void
69orion_spi_clrbits(struct orion_spi *orion_spi, u32 reg, u32 mask)
70{
71 void __iomem *reg_addr = spi_reg(orion_spi, reg);
72 u32 val;
73
74 val = readl(reg_addr);
75 val &= ~mask;
76 writel(val, reg_addr);
77}
78
79static int orion_spi_set_transfer_size(struct orion_spi *orion_spi, int size)
80{
81 if (size == 16) {
82 orion_spi_setbits(orion_spi, ORION_SPI_IF_CONFIG_REG,
83 ORION_SPI_IF_8_16_BIT_MODE);
84 } else if (size == 8) {
85 orion_spi_clrbits(orion_spi, ORION_SPI_IF_CONFIG_REG,
86 ORION_SPI_IF_8_16_BIT_MODE);
87 } else {
88 pr_debug("Bad bits per word value %d (only 8 or 16 are "
89 "allowed).\n", size);
90 return -EINVAL;
91 }
92
93 return 0;
94}
95
96static int orion_spi_baudrate_set(struct spi_device *spi, unsigned int speed)
97{
98 u32 tclk_hz;
99 u32 rate;
100 u32 prescale;
101 u32 reg;
102 struct orion_spi *orion_spi;
103
104 orion_spi = spi_master_get_devdata(spi->master);
105
106 tclk_hz = orion_spi->spi_info->tclk;
107
108 /*
109 * the supported rates are: 4,6,8...30
110 * round up as we look for equal or less speed
111 */
112 rate = DIV_ROUND_UP(tclk_hz, speed);
113 rate = roundup(rate, 2);
114
115 /* check if requested speed is too small */
116 if (rate > 30)
117 return -EINVAL;
118
119 if (rate < 4)
120 rate = 4;
121
122 /* Convert the rate to SPI clock divisor value. */
123 prescale = 0x10 + rate/2;
124
125 reg = readl(spi_reg(orion_spi, ORION_SPI_IF_CONFIG_REG));
126 reg = ((reg & ~ORION_SPI_CLK_PRESCALE_MASK) | prescale);
127 writel(reg, spi_reg(orion_spi, ORION_SPI_IF_CONFIG_REG));
128
129 return 0;
130}
131
132/*
133 * called only when no transfer is active on the bus
134 */
135static int
136orion_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
137{
138 struct orion_spi *orion_spi;
139 unsigned int speed = spi->max_speed_hz;
140 unsigned int bits_per_word = spi->bits_per_word;
141 int rc;
142
143 orion_spi = spi_master_get_devdata(spi->master);
144
145 if ((t != NULL) && t->speed_hz)
146 speed = t->speed_hz;
147
148 if ((t != NULL) && t->bits_per_word)
149 bits_per_word = t->bits_per_word;
150
151 rc = orion_spi_baudrate_set(spi, speed);
152 if (rc)
153 return rc;
154
155 return orion_spi_set_transfer_size(orion_spi, bits_per_word);
156}
157
158static void orion_spi_set_cs(struct orion_spi *orion_spi, int enable)
159{
160 if (enable)
161 orion_spi_setbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1);
162 else
163 orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1);
164}
165
166static inline int orion_spi_wait_till_ready(struct orion_spi *orion_spi)
167{
168 int i;
169
170 for (i = 0; i < ORION_SPI_WAIT_RDY_MAX_LOOP; i++) {
171 if (readl(spi_reg(orion_spi, ORION_SPI_INT_CAUSE_REG)))
172 return 1;
173 else
174 udelay(1);
175 }
176
177 return -1;
178}
179
180static inline int
181orion_spi_write_read_8bit(struct spi_device *spi,
182 const u8 **tx_buf, u8 **rx_buf)
183{
184 void __iomem *tx_reg, *rx_reg, *int_reg;
185 struct orion_spi *orion_spi;
186
187 orion_spi = spi_master_get_devdata(spi->master);
188 tx_reg = spi_reg(orion_spi, ORION_SPI_DATA_OUT_REG);
189 rx_reg = spi_reg(orion_spi, ORION_SPI_DATA_IN_REG);
190 int_reg = spi_reg(orion_spi, ORION_SPI_INT_CAUSE_REG);
191
192 /* clear the interrupt cause register */
193 writel(0x0, int_reg);
194
195 if (tx_buf && *tx_buf)
196 writel(*(*tx_buf)++, tx_reg);
197 else
198 writel(0, tx_reg);
199
200 if (orion_spi_wait_till_ready(orion_spi) < 0) {
201 dev_err(&spi->dev, "TXS timed out\n");
202 return -1;
203 }
204
205 if (rx_buf && *rx_buf)
206 *(*rx_buf)++ = readl(rx_reg);
207
208 return 1;
209}
210
211static inline int
212orion_spi_write_read_16bit(struct spi_device *spi,
213 const u16 **tx_buf, u16 **rx_buf)
214{
215 void __iomem *tx_reg, *rx_reg, *int_reg;
216 struct orion_spi *orion_spi;
217
218 orion_spi = spi_master_get_devdata(spi->master);
219 tx_reg = spi_reg(orion_spi, ORION_SPI_DATA_OUT_REG);
220 rx_reg = spi_reg(orion_spi, ORION_SPI_DATA_IN_REG);
221 int_reg = spi_reg(orion_spi, ORION_SPI_INT_CAUSE_REG);
222
223 /* clear the interrupt cause register */
224 writel(0x0, int_reg);
225
226 if (tx_buf && *tx_buf)
227 writel(__cpu_to_le16(get_unaligned((*tx_buf)++)), tx_reg);
228 else
229 writel(0, tx_reg);
230
231 if (orion_spi_wait_till_ready(orion_spi) < 0) {
232 dev_err(&spi->dev, "TXS timed out\n");
233 return -1;
234 }
235
236 if (rx_buf && *rx_buf)
237 put_unaligned(__le16_to_cpu(readl(rx_reg)), (*rx_buf)++);
238
239 return 1;
240}
241
242static unsigned int
243orion_spi_write_read(struct spi_device *spi, struct spi_transfer *xfer)
244{
245 struct orion_spi *orion_spi;
246 unsigned int count;
247 int word_len;
248
249 orion_spi = spi_master_get_devdata(spi->master);
250 word_len = spi->bits_per_word;
251 count = xfer->len;
252
253 if (word_len == 8) {
254 const u8 *tx = xfer->tx_buf;
255 u8 *rx = xfer->rx_buf;
256
257 do {
258 if (orion_spi_write_read_8bit(spi, &tx, &rx) < 0)
259 goto out;
260 count--;
261 } while (count);
262 } else if (word_len == 16) {
263 const u16 *tx = xfer->tx_buf;
264 u16 *rx = xfer->rx_buf;
265
266 do {
267 if (orion_spi_write_read_16bit(spi, &tx, &rx) < 0)
268 goto out;
269 count -= 2;
270 } while (count);
271 }
272
273out:
274 return xfer->len - count;
275}
276
277
278static void orion_spi_work(struct work_struct *work)
279{
280 struct orion_spi *orion_spi =
281 container_of(work, struct orion_spi, work);
282
283 spin_lock_irq(&orion_spi->lock);
284 while (!list_empty(&orion_spi->msg_queue)) {
285 struct spi_message *m;
286 struct spi_device *spi;
287 struct spi_transfer *t = NULL;
288 int par_override = 0;
289 int status = 0;
290 int cs_active = 0;
291
292 m = container_of(orion_spi->msg_queue.next, struct spi_message,
293 queue);
294
295 list_del_init(&m->queue);
296 spin_unlock_irq(&orion_spi->lock);
297
298 spi = m->spi;
299
300 /* Load defaults */
301 status = orion_spi_setup_transfer(spi, NULL);
302
303 if (status < 0)
304 goto msg_done;
305
306 list_for_each_entry(t, &m->transfers, transfer_list) {
307 if (par_override || t->speed_hz || t->bits_per_word) {
308 par_override = 1;
309 status = orion_spi_setup_transfer(spi, t);
310 if (status < 0)
311 break;
312 if (!t->speed_hz && !t->bits_per_word)
313 par_override = 0;
314 }
315
316 if (!cs_active) {
317 orion_spi_set_cs(orion_spi, 1);
318 cs_active = 1;
319 }
320
321 if (t->len)
322 m->actual_length +=
323 orion_spi_write_read(spi, t);
324
325 if (t->delay_usecs)
326 udelay(t->delay_usecs);
327
328 if (t->cs_change) {
329 orion_spi_set_cs(orion_spi, 0);
330 cs_active = 0;
331 }
332 }
333
334msg_done:
335 if (cs_active)
336 orion_spi_set_cs(orion_spi, 0);
337
338 m->status = status;
339 m->complete(m->context);
340
341 spin_lock_irq(&orion_spi->lock);
342 }
343
344 spin_unlock_irq(&orion_spi->lock);
345}
346
347static int __init orion_spi_reset(struct orion_spi *orion_spi)
348{
349 /* Verify that the CS is deasserted */
350 orion_spi_set_cs(orion_spi, 0);
351
352 return 0;
353}
354
355static int orion_spi_setup(struct spi_device *spi)
356{
357 struct orion_spi *orion_spi;
358
359 orion_spi = spi_master_get_devdata(spi->master);
360
361 if (spi->mode) {
362 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
363 spi->mode);
364 return -EINVAL;
365 }
366
367 if (spi->bits_per_word == 0)
368 spi->bits_per_word = 8;
369
370 if ((spi->max_speed_hz == 0)
371 || (spi->max_speed_hz > orion_spi->max_speed))
372 spi->max_speed_hz = orion_spi->max_speed;
373
374 if (spi->max_speed_hz < orion_spi->min_speed) {
375 dev_err(&spi->dev, "setup: requested speed too low %d Hz\n",
376 spi->max_speed_hz);
377 return -EINVAL;
378 }
379
380 /*
381 * baudrate & width will be set orion_spi_setup_transfer
382 */
383 return 0;
384}
385
386static int orion_spi_transfer(struct spi_device *spi, struct spi_message *m)
387{
388 struct orion_spi *orion_spi;
389 struct spi_transfer *t = NULL;
390 unsigned long flags;
391
392 m->actual_length = 0;
393 m->status = 0;
394
395 /* reject invalid messages and transfers */
396 if (list_empty(&m->transfers) || !m->complete)
397 return -EINVAL;
398
399 orion_spi = spi_master_get_devdata(spi->master);
400
401 list_for_each_entry(t, &m->transfers, transfer_list) {
402 unsigned int bits_per_word = spi->bits_per_word;
403
404 if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) {
405 dev_err(&spi->dev,
406 "message rejected : "
407 "invalid transfer data buffers\n");
408 goto msg_rejected;
409 }
410
411 if ((t != NULL) && t->bits_per_word)
412 bits_per_word = t->bits_per_word;
413
414 if ((bits_per_word != 8) && (bits_per_word != 16)) {
415 dev_err(&spi->dev,
416 "message rejected : "
417 "invalid transfer bits_per_word (%d bits)\n",
418 bits_per_word);
419 goto msg_rejected;
420 }
421 /*make sure buffer length is even when working in 16 bit mode*/
422 if ((t != NULL) && (t->bits_per_word == 16) && (t->len & 1)) {
423 dev_err(&spi->dev,
424 "message rejected : "
425 "odd data length (%d) while in 16 bit mode\n",
426 t->len);
427 goto msg_rejected;
428 }
429
430 if (t->speed_hz < orion_spi->min_speed) {
431 dev_err(&spi->dev,
432 "message rejected : "
433 "device min speed (%d Hz) exceeds "
434 "required transfer speed (%d Hz)\n",
435 orion_spi->min_speed, t->speed_hz);
436 goto msg_rejected;
437 }
438 }
439
440
441 spin_lock_irqsave(&orion_spi->lock, flags);
442 list_add_tail(&m->queue, &orion_spi->msg_queue);
443 queue_work(orion_spi_wq, &orion_spi->work);
444 spin_unlock_irqrestore(&orion_spi->lock, flags);
445
446 return 0;
447msg_rejected:
448 /* Message rejected and not queued */
449 m->status = -EINVAL;
450 if (m->complete)
451 m->complete(m->context);
452 return -EINVAL;
453}
454
455static int __init orion_spi_probe(struct platform_device *pdev)
456{
457 struct spi_master *master;
458 struct orion_spi *spi;
459 struct resource *r;
460 struct orion_spi_info *spi_info;
461 int status = 0;
462
463 spi_info = pdev->dev.platform_data;
464
465 master = spi_alloc_master(&pdev->dev, sizeof *spi);
466 if (master == NULL) {
467 dev_dbg(&pdev->dev, "master allocation failed\n");
468 return -ENOMEM;
469 }
470
471 if (pdev->id != -1)
472 master->bus_num = pdev->id;
473
474 master->setup = orion_spi_setup;
475 master->transfer = orion_spi_transfer;
476 master->num_chipselect = ORION_NUM_CHIPSELECTS;
477
478 dev_set_drvdata(&pdev->dev, master);
479
480 spi = spi_master_get_devdata(master);
481 spi->master = master;
482 spi->spi_info = spi_info;
483
484 spi->max_speed = DIV_ROUND_UP(spi_info->tclk, 4);
485 spi->min_speed = DIV_ROUND_UP(spi_info->tclk, 30);
486
487 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
488 if (r == NULL) {
489 status = -ENODEV;
490 goto out;
491 }
492
493 if (!request_mem_region(r->start, (r->end - r->start) + 1,
494 pdev->dev.bus_id)) {
495 status = -EBUSY;
496 goto out;
497 }
498 spi->base = ioremap(r->start, SZ_1K);
499
500 INIT_WORK(&spi->work, orion_spi_work);
501
502 spin_lock_init(&spi->lock);
503 INIT_LIST_HEAD(&spi->msg_queue);
504
505 if (orion_spi_reset(spi) < 0)
506 goto out_rel_mem;
507
508 status = spi_register_master(master);
509 if (status < 0)
510 goto out_rel_mem;
511
512 return status;
513
514out_rel_mem:
515 release_mem_region(r->start, (r->end - r->start) + 1);
516
517out:
518 spi_master_put(master);
519 return status;
520}
521
522
523static int __exit orion_spi_remove(struct platform_device *pdev)
524{
525 struct spi_master *master;
526 struct orion_spi *spi;
527 struct resource *r;
528
529 master = dev_get_drvdata(&pdev->dev);
530 spi = spi_master_get_devdata(master);
531
532 cancel_work_sync(&spi->work);
533
534 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
535 release_mem_region(r->start, (r->end - r->start) + 1);
536
537 spi_unregister_master(master);
538
539 return 0;
540}
541
542MODULE_ALIAS("platform:" DRIVER_NAME);
543
544static struct platform_driver orion_spi_driver = {
545 .driver = {
546 .name = DRIVER_NAME,
547 .owner = THIS_MODULE,
548 },
549 .remove = __exit_p(orion_spi_remove),
550};
551
552static int __init orion_spi_init(void)
553{
554 orion_spi_wq = create_singlethread_workqueue(
555 orion_spi_driver.driver.name);
556 if (orion_spi_wq == NULL)
557 return -ENOMEM;
558
559 return platform_driver_probe(&orion_spi_driver, orion_spi_probe);
560}
561module_init(orion_spi_init);
562
563static void __exit orion_spi_exit(void)
564{
565 flush_workqueue(orion_spi_wq);
566 platform_driver_unregister(&orion_spi_driver);
567
568 destroy_workqueue(orion_spi_wq);
569}
570module_exit(orion_spi_exit);
571
572MODULE_DESCRIPTION("Orion SPI driver");
573MODULE_AUTHOR("Shadi Ammouri <shadi@marvell.com>");
574MODULE_LICENSE("GPL");
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
index 067299d6d192..34c7c9875681 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/pxa2xx_spi.c
@@ -31,15 +31,14 @@
31 31
32#include <asm/io.h> 32#include <asm/io.h>
33#include <asm/irq.h> 33#include <asm/irq.h>
34#include <asm/hardware.h>
35#include <asm/delay.h> 34#include <asm/delay.h>
36#include <asm/dma.h> 35#include <asm/dma.h>
37 36
38#include <asm/arch/hardware.h> 37#include <mach/hardware.h>
39#include <asm/arch/pxa-regs.h> 38#include <mach/pxa-regs.h>
40#include <asm/arch/regs-ssp.h> 39#include <mach/regs-ssp.h>
41#include <asm/arch/ssp.h> 40#include <mach/ssp.h>
42#include <asm/arch/pxa2xx_spi.h> 41#include <mach/pxa2xx_spi.h>
43 42
44MODULE_AUTHOR("Stephen Street"); 43MODULE_AUTHOR("Stephen Street");
45MODULE_DESCRIPTION("PXA2xx SSP SPI Controller"); 44MODULE_DESCRIPTION("PXA2xx SSP SPI Controller");
diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi_imx.c
index 6fb77fcc4971..61ba147e384d 100644
--- a/drivers/spi/spi_imx.c
+++ b/drivers/spi/spi_imx.c
@@ -33,12 +33,11 @@
33 33
34#include <asm/io.h> 34#include <asm/io.h>
35#include <asm/irq.h> 35#include <asm/irq.h>
36#include <asm/hardware.h>
37#include <asm/delay.h> 36#include <asm/delay.h>
38 37
39#include <asm/arch/hardware.h> 38#include <mach/hardware.h>
40#include <asm/arch/imx-dma.h> 39#include <mach/imx-dma.h>
41#include <asm/arch/spi_imx.h> 40#include <mach/spi_imx.h>
42 41
43/*-------------------------------------------------------------------------*/ 42/*-------------------------------------------------------------------------*/
44/* SPI Registers offsets from peripheral base address */ 43/* SPI Registers offsets from peripheral base address */
diff --git a/drivers/spi/spi_s3c24xx.c b/drivers/spi/spi_s3c24xx.c
index 1c643c9e1f15..98abc73c1a1d 100644
--- a/drivers/spi/spi_s3c24xx.c
+++ b/drivers/spi/spi_s3c24xx.c
@@ -25,11 +25,11 @@
25 25
26#include <asm/io.h> 26#include <asm/io.h>
27#include <asm/dma.h> 27#include <asm/dma.h>
28#include <asm/hardware.h> 28#include <mach/hardware.h>
29 29
30#include <asm/arch/regs-gpio.h> 30#include <mach/regs-gpio.h>
31#include <asm/plat-s3c24xx/regs-spi.h> 31#include <asm/plat-s3c24xx/regs-spi.h>
32#include <asm/arch/spi.h> 32#include <mach/spi.h>
33 33
34struct s3c24xx_spi { 34struct s3c24xx_spi {
35 /* bitbang has to be first */ 35 /* bitbang has to be first */
@@ -236,6 +236,19 @@ static irqreturn_t s3c24xx_spi_irq(int irq, void *dev)
236 return IRQ_HANDLED; 236 return IRQ_HANDLED;
237} 237}
238 238
239static void s3c24xx_spi_initialsetup(struct s3c24xx_spi *hw)
240{
241 /* for the moment, permanently enable the clock */
242
243 clk_enable(hw->clk);
244
245 /* program defaults into the registers */
246
247 writeb(0xff, hw->regs + S3C2410_SPPRE);
248 writeb(SPPIN_DEFAULT, hw->regs + S3C2410_SPPIN);
249 writeb(SPCON_DEFAULT, hw->regs + S3C2410_SPCON);
250}
251
239static int __init s3c24xx_spi_probe(struct platform_device *pdev) 252static int __init s3c24xx_spi_probe(struct platform_device *pdev)
240{ 253{
241 struct s3c2410_spi_info *pdata; 254 struct s3c2410_spi_info *pdata;
@@ -327,15 +340,7 @@ static int __init s3c24xx_spi_probe(struct platform_device *pdev)
327 goto err_no_clk; 340 goto err_no_clk;
328 } 341 }
329 342
330 /* for the moment, permanently enable the clock */ 343 s3c24xx_spi_initialsetup(hw);
331
332 clk_enable(hw->clk);
333
334 /* program defaults into the registers */
335
336 writeb(0xff, hw->regs + S3C2410_SPPRE);
337 writeb(SPPIN_DEFAULT, hw->regs + S3C2410_SPPIN);
338 writeb(SPCON_DEFAULT, hw->regs + S3C2410_SPCON);
339 344
340 /* setup any gpio we can */ 345 /* setup any gpio we can */
341 346
@@ -415,7 +420,7 @@ static int s3c24xx_spi_resume(struct platform_device *pdev)
415{ 420{
416 struct s3c24xx_spi *hw = platform_get_drvdata(pdev); 421 struct s3c24xx_spi *hw = platform_get_drvdata(pdev);
417 422
418 clk_enable(hw->clk); 423 s3c24xx_spi_initialsetup(hw);
419 return 0; 424 return 0;
420} 425}
421 426
diff --git a/drivers/spi/spi_s3c24xx_gpio.c b/drivers/spi/spi_s3c24xx_gpio.c
index e33f6145c560..cc1f647f579b 100644
--- a/drivers/spi/spi_s3c24xx_gpio.c
+++ b/drivers/spi/spi_s3c24xx_gpio.c
@@ -21,9 +21,9 @@
21#include <linux/spi/spi.h> 21#include <linux/spi/spi.h>
22#include <linux/spi/spi_bitbang.h> 22#include <linux/spi/spi_bitbang.h>
23 23
24#include <asm/arch/regs-gpio.h> 24#include <mach/regs-gpio.h>
25#include <asm/arch/spi-gpio.h> 25#include <mach/spi-gpio.h>
26#include <asm/hardware.h> 26#include <mach/hardware.h>
27 27
28struct s3c2410_spigpio { 28struct s3c2410_spigpio {
29 struct spi_bitbang bitbang; 29 struct spi_bitbang bitbang;
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 2fcc06eb5e60..586d6f1376cf 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -389,7 +389,6 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
389 if (io->entries <= 0) 389 if (io->entries <= 0)
390 return io->entries; 390 return io->entries;
391 391
392 io->count = io->entries;
393 io->urbs = kmalloc(io->entries * sizeof *io->urbs, mem_flags); 392 io->urbs = kmalloc(io->entries * sizeof *io->urbs, mem_flags);
394 if (!io->urbs) 393 if (!io->urbs)
395 goto nomem; 394 goto nomem;
@@ -458,6 +457,7 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
458 io->urbs[--i]->transfer_flags &= ~URB_NO_INTERRUPT; 457 io->urbs[--i]->transfer_flags &= ~URB_NO_INTERRUPT;
459 458
460 /* transaction state */ 459 /* transaction state */
460 io->count = io->entries;
461 io->status = 0; 461 io->status = 0;
462 io->bytes = 0; 462 io->bytes = 0;
463 init_completion(&io->complete); 463 init_completion(&io->complete);
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index e2d8a5d86c40..a8a1de413321 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -40,16 +40,15 @@
40#include <linux/usb/gadget.h> 40#include <linux/usb/gadget.h>
41 41
42#include <asm/byteorder.h> 42#include <asm/byteorder.h>
43#include <asm/hardware.h> 43#include <mach/hardware.h>
44#include <asm/io.h> 44#include <asm/io.h>
45#include <asm/irq.h> 45#include <asm/irq.h>
46#include <asm/system.h> 46#include <asm/system.h>
47#include <asm/mach-types.h>
48#include <asm/gpio.h> 47#include <asm/gpio.h>
49 48
50#include <asm/arch/board.h> 49#include <mach/board.h>
51#include <asm/arch/cpu.h> 50#include <mach/cpu.h>
52#include <asm/arch/at91sam9261_matrix.h> 51#include <mach/at91sam9261_matrix.h>
53 52
54#include "at91_udc.h" 53#include "at91_udc.h"
55 54
diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
index 07e5a0b5dcda..ae30ab1d264f 100644
--- a/drivers/usb/gadget/atmel_usba_udc.c
+++ b/drivers/usb/gadget/atmel_usba_udc.c
@@ -22,7 +22,7 @@
22#include <linux/delay.h> 22#include <linux/delay.h>
23 23
24#include <asm/gpio.h> 24#include <asm/gpio.h>
25#include <asm/arch/board.h> 25#include <mach/board.h>
26 26
27#include "atmel_usba_udc.h" 27#include "atmel_usba_udc.h"
28 28
@@ -334,7 +334,7 @@ static void toggle_bias(int is_on)
334 334
335#elif defined(CONFIG_ARCH_AT91) 335#elif defined(CONFIG_ARCH_AT91)
336 336
337#include <asm/arch/at91_pmc.h> 337#include <mach/at91_pmc.h>
338 338
339static void toggle_bias(int is_on) 339static void toggle_bias(int is_on)
340{ 340{
diff --git a/drivers/usb/gadget/lh7a40x_udc.h b/drivers/usb/gadget/lh7a40x_udc.h
index 1ecfd6366b9a..ca861203a301 100644
--- a/drivers/usb/gadget/lh7a40x_udc.h
+++ b/drivers/usb/gadget/lh7a40x_udc.h
@@ -47,7 +47,7 @@
47#include <asm/irq.h> 47#include <asm/irq.h>
48#include <asm/system.h> 48#include <asm/system.h>
49#include <asm/unaligned.h> 49#include <asm/unaligned.h>
50#include <asm/hardware.h> 50#include <mach/hardware.h>
51 51
52#include <linux/usb/ch9.h> 52#include <linux/usb/ch9.h>
53#include <linux/usb/gadget.h> 53#include <linux/usb/gadget.h>
diff --git a/drivers/usb/gadget/m66592-udc.c b/drivers/usb/gadget/m66592-udc.c
index 8da7535c0c70..77b44fb48f0a 100644
--- a/drivers/usb/gadget/m66592-udc.c
+++ b/drivers/usb/gadget/m66592-udc.c
@@ -1593,7 +1593,7 @@ static int __init m66592_probe(struct platform_device *pdev)
1593 1593
1594 m66592->gadget.ops = &m66592_gadget_ops; 1594 m66592->gadget.ops = &m66592_gadget_ops;
1595 device_initialize(&m66592->gadget.dev); 1595 device_initialize(&m66592->gadget.dev);
1596 dev_set_name(&m66592->gadget, "gadget"); 1596 dev_set_name(&m66592->gadget.dev, "gadget");
1597 m66592->gadget.is_dualspeed = 1; 1597 m66592->gadget.is_dualspeed = 1;
1598 m66592->gadget.dev.parent = &pdev->dev; 1598 m66592->gadget.dev.parent = &pdev->dev;
1599 m66592->gadget.dev.dma_mask = pdev->dev.dma_mask; 1599 m66592->gadget.dev.dma_mask = pdev->dev.dma_mask;
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c
index 395bd1844482..376e80c07530 100644
--- a/drivers/usb/gadget/omap_udc.c
+++ b/drivers/usb/gadget/omap_udc.c
@@ -52,8 +52,8 @@
52#include <asm/unaligned.h> 52#include <asm/unaligned.h>
53#include <asm/mach-types.h> 53#include <asm/mach-types.h>
54 54
55#include <asm/arch/dma.h> 55#include <mach/dma.h>
56#include <asm/arch/usb.h> 56#include <mach/usb.h>
57 57
58#include "omap_udc.h" 58#include "omap_udc.h"
59 59
diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c
index 7e6725d89976..da6e93c201d2 100644
--- a/drivers/usb/gadget/pxa25x_udc.c
+++ b/drivers/usb/gadget/pxa25x_udc.c
@@ -61,7 +61,7 @@
61 * This driver is PXA25x only. Grab the right register definitions. 61 * This driver is PXA25x only. Grab the right register definitions.
62 */ 62 */
63#ifdef CONFIG_ARCH_PXA 63#ifdef CONFIG_ARCH_PXA
64#include <asm/arch/pxa25x-udc.h> 64#include <mach/pxa25x-udc.h>
65#endif 65#endif
66 66
67#include <asm/mach/udc_pxa2xx.h> 67#include <asm/mach/udc_pxa2xx.h>
diff --git a/drivers/usb/gadget/pxa25x_udc.h b/drivers/usb/gadget/pxa25x_udc.h
index c8a13215e02c..1d51aa21e6eb 100644
--- a/drivers/usb/gadget/pxa25x_udc.h
+++ b/drivers/usb/gadget/pxa25x_udc.h
@@ -139,7 +139,7 @@ struct pxa25x_udc {
139/*-------------------------------------------------------------------------*/ 139/*-------------------------------------------------------------------------*/
140 140
141#ifdef CONFIG_ARCH_LUBBOCK 141#ifdef CONFIG_ARCH_LUBBOCK
142#include <asm/arch/lubbock.h> 142#include <mach/lubbock.h>
143/* lubbock can also report usb connect/disconnect irqs */ 143/* lubbock can also report usb connect/disconnect irqs */
144#endif 144#endif
145 145
diff --git a/drivers/usb/gadget/pxa27x_udc.c b/drivers/usb/gadget/pxa27x_udc.c
index 9d447d8cfc0c..a28513ecbe5b 100644
--- a/drivers/usb/gadget/pxa27x_udc.c
+++ b/drivers/usb/gadget/pxa27x_udc.c
@@ -33,13 +33,13 @@
33#include <linux/irq.h> 33#include <linux/irq.h>
34 34
35#include <asm/byteorder.h> 35#include <asm/byteorder.h>
36#include <asm/hardware.h> 36#include <mach/hardware.h>
37 37
38#include <linux/usb.h> 38#include <linux/usb.h>
39#include <linux/usb/ch9.h> 39#include <linux/usb/ch9.h>
40#include <linux/usb/gadget.h> 40#include <linux/usb/gadget.h>
41#include <asm/arch/pxa2xx-regs.h> /* FIXME: for PSSR */ 41#include <mach/pxa2xx-regs.h> /* FIXME: for PSSR */
42#include <asm/arch/udc.h> 42#include <mach/udc.h>
43 43
44#include "pxa27x_udc.h" 44#include "pxa27x_udc.h"
45 45
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c
index 6b1ef488043b..538807384592 100644
--- a/drivers/usb/gadget/s3c2410_udc.c
+++ b/drivers/usb/gadget/s3c2410_udc.c
@@ -49,15 +49,14 @@
49#include <asm/irq.h> 49#include <asm/irq.h>
50#include <asm/system.h> 50#include <asm/system.h>
51#include <asm/unaligned.h> 51#include <asm/unaligned.h>
52#include <asm/arch/irqs.h> 52#include <mach/irqs.h>
53 53
54#include <asm/arch/hardware.h> 54#include <mach/hardware.h>
55#include <asm/arch/regs-gpio.h> 55#include <mach/regs-gpio.h>
56 56
57#include <asm/plat-s3c24xx/regs-udc.h> 57#include <asm/plat-s3c24xx/regs-udc.h>
58#include <asm/plat-s3c24xx/udc.h> 58#include <asm/plat-s3c24xx/udc.h>
59 59
60#include <asm/mach-types.h>
61 60
62#include "s3c2410_udc.h" 61#include "s3c2410_udc.h"
63 62
@@ -888,7 +887,7 @@ static void s3c2410_udc_handle_ep(struct s3c2410_ep *ep)
888 } 887 }
889} 888}
890 889
891#include <asm/arch/regs-irq.h> 890#include <mach/regs-irq.h>
892 891
893/* 892/*
894 * s3c2410_udc_irq - interrupt handler 893 * s3c2410_udc_irq - interrupt handler
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 2622b6596d7c..3712b925b315 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -932,7 +932,7 @@ static struct ehci_qh *qh_append_tds (
932 932
933 list_del (&qtd->qtd_list); 933 list_del (&qtd->qtd_list);
934 list_add (&dummy->qtd_list, qtd_list); 934 list_add (&dummy->qtd_list, qtd_list);
935 __list_splice (qtd_list, qh->qtd_list.prev); 935 list_splice_tail(qtd_list, &qh->qtd_list);
936 936
937 ehci_qtd_init(ehci, qtd, qtd->qtd_dma); 937 ehci_qtd_init(ehci, qtd, qtd->qtd_dma);
938 qh->dummy = qtd; 938 qh->dummy = qtd;
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index a5d8e550d897..6db7a2889e66 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -15,12 +15,11 @@
15#include <linux/clk.h> 15#include <linux/clk.h>
16#include <linux/platform_device.h> 16#include <linux/platform_device.h>
17 17
18#include <asm/mach-types.h> 18#include <mach/hardware.h>
19#include <asm/hardware.h>
20#include <asm/gpio.h> 19#include <asm/gpio.h>
21 20
22#include <asm/arch/board.h> 21#include <mach/board.h>
23#include <asm/arch/cpu.h> 22#include <mach/cpu.h>
24 23
25#ifndef CONFIG_ARCH_AT91 24#ifndef CONFIG_ARCH_AT91
26#error "CONFIG_ARCH_AT91 must be defined." 25#error "CONFIG_ARCH_AT91 must be defined."
diff --git a/drivers/usb/host/ohci-ep93xx.c b/drivers/usb/host/ohci-ep93xx.c
index 5adaf36e47d0..cb0b506f8259 100644
--- a/drivers/usb/host/ohci-ep93xx.c
+++ b/drivers/usb/host/ohci-ep93xx.c
@@ -28,8 +28,7 @@
28#include <linux/signal.h> 28#include <linux/signal.h>
29#include <linux/platform_device.h> 29#include <linux/platform_device.h>
30 30
31#include <asm/mach-types.h> 31#include <mach/hardware.h>
32#include <asm/hardware.h>
33 32
34static struct clk *usb_host_clock; 33static struct clk *usb_host_clock;
35 34
diff --git a/drivers/usb/host/ohci-lh7a404.c b/drivers/usb/host/ohci-lh7a404.c
index 1ef5d482c145..9e31d440d115 100644
--- a/drivers/usb/host/ohci-lh7a404.c
+++ b/drivers/usb/host/ohci-lh7a404.c
@@ -19,7 +19,7 @@
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/signal.h> 20#include <linux/signal.h>
21 21
22#include <asm/hardware.h> 22#include <mach/hardware.h>
23 23
24 24
25extern int usb_disabled(void); 25extern int usb_disabled(void);
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index 6e5e5f81ac90..94dfca02f7e1 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -19,15 +19,15 @@
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/clk.h> 20#include <linux/clk.h>
21 21
22#include <asm/hardware.h> 22#include <mach/hardware.h>
23#include <asm/io.h> 23#include <asm/io.h>
24#include <asm/mach-types.h> 24#include <asm/mach-types.h>
25 25
26#include <asm/arch/mux.h> 26#include <mach/mux.h>
27#include <asm/arch/irqs.h> 27#include <mach/irqs.h>
28#include <asm/arch/gpio.h> 28#include <mach/gpio.h>
29#include <asm/arch/fpga.h> 29#include <mach/fpga.h>
30#include <asm/arch/usb.h> 30#include <mach/usb.h>
31 31
32 32
33/* OMAP-1510 OHCI has its own MMU for DMA */ 33/* OMAP-1510 OHCI has its own MMU for DMA */
diff --git a/drivers/usb/host/ohci-pnx4008.c b/drivers/usb/host/ohci-pnx4008.c
index 6ad8f2fc57b9..b02cd0761977 100644
--- a/drivers/usb/host/ohci-pnx4008.c
+++ b/drivers/usb/host/ohci-pnx4008.c
@@ -21,13 +21,12 @@
21#include <linux/platform_device.h> 21#include <linux/platform_device.h>
22#include <linux/i2c.h> 22#include <linux/i2c.h>
23 23
24#include <asm/hardware.h> 24#include <mach/hardware.h>
25#include <asm/io.h> 25#include <asm/io.h>
26#include <asm/mach-types.h>
27 26
28#include <asm/arch/platform.h> 27#include <mach/platform.h>
29#include <asm/arch/irqs.h> 28#include <mach/irqs.h>
30#include <asm/arch/gpio.h> 29#include <mach/gpio.h>
31 30
32#define USB_CTRL IO_ADDRESS(PNX4008_PWRMAN_BASE + 0x64) 31#define USB_CTRL IO_ADDRESS(PNX4008_PWRMAN_BASE + 0x64)
33 32
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index 127b15799024..8c9c4849db6e 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -24,11 +24,10 @@
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/clk.h> 25#include <linux/clk.h>
26 26
27#include <asm/mach-types.h> 27#include <mach/hardware.h>
28#include <asm/hardware.h> 28#include <mach/pxa-regs.h>
29#include <asm/arch/pxa-regs.h> 29#include <mach/pxa2xx-regs.h> /* FIXME: for PSSR */
30#include <asm/arch/pxa2xx-regs.h> /* FIXME: for PSSR */ 30#include <mach/ohci.h>
31#include <asm/arch/ohci.h>
32 31
33#define PXA_UHC_MAX_PORTNUM 3 32#define PXA_UHC_MAX_PORTNUM 3
34 33
diff --git a/drivers/usb/host/ohci-s3c2410.c b/drivers/usb/host/ohci-s3c2410.c
index 3c7a740cfe0c..9e3dc4069e8b 100644
--- a/drivers/usb/host/ohci-s3c2410.c
+++ b/drivers/usb/host/ohci-s3c2410.c
@@ -22,8 +22,8 @@
22#include <linux/platform_device.h> 22#include <linux/platform_device.h>
23#include <linux/clk.h> 23#include <linux/clk.h>
24 24
25#include <asm/hardware.h> 25#include <mach/hardware.h>
26#include <asm/arch/usb-control.h> 26#include <mach/usb-control.h>
27 27
28#define valid_port(idx) ((idx) == 1 || (idx) == 2) 28#define valid_port(idx) ((idx) == 1 || (idx) == 2)
29 29
diff --git a/drivers/usb/host/ohci-sa1111.c b/drivers/usb/host/ohci-sa1111.c
index 2e9dceb9bb99..4626b002e670 100644
--- a/drivers/usb/host/ohci-sa1111.c
+++ b/drivers/usb/host/ohci-sa1111.c
@@ -13,10 +13,10 @@
13 * This file is licenced under the GPL. 13 * This file is licenced under the GPL.
14 */ 14 */
15 15
16#include <asm/hardware.h> 16#include <mach/hardware.h>
17#include <asm/mach-types.h> 17#include <asm/mach-types.h>
18#include <asm/arch/assabet.h> 18#include <mach/assabet.h>
19#include <asm/arch/badge4.h> 19#include <mach/badge4.h>
20#include <asm/hardware/sa1111.h> 20#include <asm/hardware/sa1111.h>
21 21
22#ifndef CONFIG_SA1111 22#ifndef CONFIG_SA1111
diff --git a/drivers/usb/storage/freecom.c b/drivers/usb/storage/freecom.c
index 7a4d45677227..73ac7262239e 100644
--- a/drivers/usb/storage/freecom.c
+++ b/drivers/usb/storage/freecom.c
@@ -26,8 +26,6 @@
26 * (http://www.freecom.de/) 26 * (http://www.freecom.de/)
27 */ 27 */
28 28
29#include <linux/hdreg.h>
30
31#include <scsi/scsi.h> 29#include <scsi/scsi.h>
32#include <scsi/scsi_cmnd.h> 30#include <scsi/scsi_cmnd.h>
33 31
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 0ebc1bfd2514..a6b55297a7fb 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -118,7 +118,6 @@ obj-$(CONFIG_FB_PS3) += ps3fb.o
118obj-$(CONFIG_FB_SM501) += sm501fb.o 118obj-$(CONFIG_FB_SM501) += sm501fb.o
119obj-$(CONFIG_FB_XILINX) += xilinxfb.o 119obj-$(CONFIG_FB_XILINX) += xilinxfb.o
120obj-$(CONFIG_FB_SH_MOBILE_LCDC) += sh_mobile_lcdcfb.o 120obj-$(CONFIG_FB_SH_MOBILE_LCDC) += sh_mobile_lcdcfb.o
121obj-$(CONFIG_FB_SH7343VOU) += sh7343_voufb.o
122obj-$(CONFIG_FB_OMAP) += omap/ 121obj-$(CONFIG_FB_OMAP) += omap/
123obj-$(CONFIG_XEN_FBDEV_FRONTEND) += xen-fbfront.o 122obj-$(CONFIG_XEN_FBDEV_FRONTEND) += xen-fbfront.o
124obj-$(CONFIG_FB_CARMINE) += carminefb.o 123obj-$(CONFIG_FB_CARMINE) += carminefb.o
diff --git a/drivers/video/acornfb.c b/drivers/video/acornfb.c
index 017233d0c481..61c3d3f40fd1 100644
--- a/drivers/video/acornfb.c
+++ b/drivers/video/acornfb.c
@@ -29,7 +29,7 @@
29#include <linux/platform_device.h> 29#include <linux/platform_device.h>
30#include <linux/dma-mapping.h> 30#include <linux/dma-mapping.h>
31 31
32#include <asm/hardware.h> 32#include <mach/hardware.h>
33#include <asm/io.h> 33#include <asm/io.h>
34#include <asm/irq.h> 34#include <asm/irq.h>
35#include <asm/mach-types.h> 35#include <asm/mach-types.h>
@@ -339,7 +339,7 @@ acornfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
339#endif 339#endif
340 340
341#ifdef HAS_VIDC20 341#ifdef HAS_VIDC20
342#include <asm/arch/acornfb.h> 342#include <mach/acornfb.h>
343 343
344#define MAX_SIZE 2*1024*1024 344#define MAX_SIZE 2*1024*1024
345 345
diff --git a/drivers/video/am200epd.c b/drivers/video/am200epd.c
index 32dd85126931..0c35b8b0160e 100644
--- a/drivers/video/am200epd.c
+++ b/drivers/video/am200epd.c
@@ -33,7 +33,7 @@
33 33
34#include <video/metronomefb.h> 34#include <video/metronomefb.h>
35 35
36#include <asm/arch/pxa-regs.h> 36#include <mach/pxa-regs.h>
37 37
38/* register offsets for gpio control */ 38/* register offsets for gpio control */
39#define LED_GPIO_PIN 51 39#define LED_GPIO_PIN 51
diff --git a/drivers/video/arkfb.c b/drivers/video/arkfb.c
index 5001bd4ef466..4bd569e479a7 100644
--- a/drivers/video/arkfb.c
+++ b/drivers/video/arkfb.c
@@ -958,20 +958,20 @@ static int __devinit ark_pci_probe(struct pci_dev *dev, const struct pci_device_
958 /* Prepare PCI device */ 958 /* Prepare PCI device */
959 rc = pci_enable_device(dev); 959 rc = pci_enable_device(dev);
960 if (rc < 0) { 960 if (rc < 0) {
961 dev_err(info->dev, "cannot enable PCI device\n"); 961 dev_err(info->device, "cannot enable PCI device\n");
962 goto err_enable_device; 962 goto err_enable_device;
963 } 963 }
964 964
965 rc = pci_request_regions(dev, "arkfb"); 965 rc = pci_request_regions(dev, "arkfb");
966 if (rc < 0) { 966 if (rc < 0) {
967 dev_err(info->dev, "cannot reserve framebuffer region\n"); 967 dev_err(info->device, "cannot reserve framebuffer region\n");
968 goto err_request_regions; 968 goto err_request_regions;
969 } 969 }
970 970
971 par->dac = ics5342_init(ark_dac_read_regs, ark_dac_write_regs, info); 971 par->dac = ics5342_init(ark_dac_read_regs, ark_dac_write_regs, info);
972 if (! par->dac) { 972 if (! par->dac) {
973 rc = -ENOMEM; 973 rc = -ENOMEM;
974 dev_err(info->dev, "RAMDAC initialization failed\n"); 974 dev_err(info->device, "RAMDAC initialization failed\n");
975 goto err_dac; 975 goto err_dac;
976 } 976 }
977 977
@@ -982,7 +982,7 @@ static int __devinit ark_pci_probe(struct pci_dev *dev, const struct pci_device_
982 info->screen_base = pci_iomap(dev, 0, 0); 982 info->screen_base = pci_iomap(dev, 0, 0);
983 if (! info->screen_base) { 983 if (! info->screen_base) {
984 rc = -ENOMEM; 984 rc = -ENOMEM;
985 dev_err(info->dev, "iomap for framebuffer failed\n"); 985 dev_err(info->device, "iomap for framebuffer failed\n");
986 goto err_iomap; 986 goto err_iomap;
987 } 987 }
988 988
@@ -1004,19 +1004,19 @@ static int __devinit ark_pci_probe(struct pci_dev *dev, const struct pci_device_
1004 rc = fb_find_mode(&(info->var), info, mode_option, NULL, 0, NULL, 8); 1004 rc = fb_find_mode(&(info->var), info, mode_option, NULL, 0, NULL, 8);
1005 if (! ((rc == 1) || (rc == 2))) { 1005 if (! ((rc == 1) || (rc == 2))) {
1006 rc = -EINVAL; 1006 rc = -EINVAL;
1007 dev_err(info->dev, "mode %s not found\n", mode_option); 1007 dev_err(info->device, "mode %s not found\n", mode_option);
1008 goto err_find_mode; 1008 goto err_find_mode;
1009 } 1009 }
1010 1010
1011 rc = fb_alloc_cmap(&info->cmap, 256, 0); 1011 rc = fb_alloc_cmap(&info->cmap, 256, 0);
1012 if (rc < 0) { 1012 if (rc < 0) {
1013 dev_err(info->dev, "cannot allocate colormap\n"); 1013 dev_err(info->device, "cannot allocate colormap\n");
1014 goto err_alloc_cmap; 1014 goto err_alloc_cmap;
1015 } 1015 }
1016 1016
1017 rc = register_framebuffer(info); 1017 rc = register_framebuffer(info);
1018 if (rc < 0) { 1018 if (rc < 0) {
1019 dev_err(info->dev, "cannot register framebugger\n"); 1019 dev_err(info->device, "cannot register framebugger\n");
1020 goto err_reg_fb; 1020 goto err_reg_fb;
1021 } 1021 }
1022 1022
@@ -1090,7 +1090,7 @@ static int ark_pci_suspend (struct pci_dev* dev, pm_message_t state)
1090 struct fb_info *info = pci_get_drvdata(dev); 1090 struct fb_info *info = pci_get_drvdata(dev);
1091 struct arkfb_info *par = info->par; 1091 struct arkfb_info *par = info->par;
1092 1092
1093 dev_info(info->dev, "suspend\n"); 1093 dev_info(info->device, "suspend\n");
1094 1094
1095 acquire_console_sem(); 1095 acquire_console_sem();
1096 mutex_lock(&(par->open_lock)); 1096 mutex_lock(&(par->open_lock));
@@ -1121,16 +1121,13 @@ static int ark_pci_resume (struct pci_dev* dev)
1121 struct fb_info *info = pci_get_drvdata(dev); 1121 struct fb_info *info = pci_get_drvdata(dev);
1122 struct arkfb_info *par = info->par; 1122 struct arkfb_info *par = info->par;
1123 1123
1124 dev_info(info->dev, "resume\n"); 1124 dev_info(info->device, "resume\n");
1125 1125
1126 acquire_console_sem(); 1126 acquire_console_sem();
1127 mutex_lock(&(par->open_lock)); 1127 mutex_lock(&(par->open_lock));
1128 1128
1129 if (par->ref_count == 0) { 1129 if (par->ref_count == 0)
1130 mutex_unlock(&(par->open_lock)); 1130 goto fail;
1131 release_console_sem();
1132 return 0;
1133 }
1134 1131
1135 pci_set_power_state(dev, PCI_D0); 1132 pci_set_power_state(dev, PCI_D0);
1136 pci_restore_state(dev); 1133 pci_restore_state(dev);
@@ -1143,8 +1140,8 @@ static int ark_pci_resume (struct pci_dev* dev)
1143 arkfb_set_par(info); 1140 arkfb_set_par(info);
1144 fb_set_suspend(info, 0); 1141 fb_set_suspend(info, 0);
1145 1142
1146 mutex_unlock(&(par->open_lock));
1147fail: 1143fail:
1144 mutex_unlock(&(par->open_lock));
1148 release_console_sem(); 1145 release_console_sem();
1149 return 0; 1146 return 0;
1150} 1147}
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index 5b3a15dffb5f..e7018a2f56af 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -18,9 +18,9 @@
18#include <linux/delay.h> 18#include <linux/delay.h>
19#include <linux/backlight.h> 19#include <linux/backlight.h>
20 20
21#include <asm/arch/board.h> 21#include <mach/board.h>
22#include <asm/arch/cpu.h> 22#include <mach/cpu.h>
23#include <asm/arch/gpio.h> 23#include <mach/gpio.h>
24 24
25#include <video/atmel_lcdc.h> 25#include <video/atmel_lcdc.h>
26 26
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index 620ba8120368..cc6b470073da 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -244,7 +244,7 @@ static int atyfb_sync(struct fb_info *info);
244 */ 244 */
245 245
246static int aty_init(struct fb_info *info); 246static int aty_init(struct fb_info *info);
247static void aty_resume_chip(struct fb_info *info); 247
248#ifdef CONFIG_ATARI 248#ifdef CONFIG_ATARI
249static int store_video_par(char *videopar, unsigned char m64_num); 249static int store_video_par(char *videopar, unsigned char m64_num);
250#endif 250#endif
@@ -2023,6 +2023,20 @@ static int atyfb_pci_suspend(struct pci_dev *pdev, pm_message_t state)
2023 return 0; 2023 return 0;
2024} 2024}
2025 2025
2026static void aty_resume_chip(struct fb_info *info)
2027{
2028 struct atyfb_par *par = info->par;
2029
2030 aty_st_le32(MEM_CNTL, par->mem_cntl, par);
2031
2032 if (par->pll_ops->resume_pll)
2033 par->pll_ops->resume_pll(info, &par->pll);
2034
2035 if (par->aux_start)
2036 aty_st_le32(BUS_CNTL,
2037 aty_ld_le32(BUS_CNTL, par) | BUS_APER_REG_DIS, par);
2038}
2039
2026static int atyfb_pci_resume(struct pci_dev *pdev) 2040static int atyfb_pci_resume(struct pci_dev *pdev)
2027{ 2041{
2028 struct fb_info *info = pci_get_drvdata(pdev); 2042 struct fb_info *info = pci_get_drvdata(pdev);
@@ -2659,19 +2673,6 @@ aty_init_exit:
2659 return ret; 2673 return ret;
2660} 2674}
2661 2675
2662static void aty_resume_chip(struct fb_info *info)
2663{
2664 struct atyfb_par *par = info->par;
2665
2666 aty_st_le32(MEM_CNTL, par->mem_cntl, par);
2667
2668 if (par->pll_ops->resume_pll)
2669 par->pll_ops->resume_pll(info, &par->pll);
2670
2671 if (par->aux_start)
2672 aty_st_le32(BUS_CNTL, aty_ld_le32(BUS_CNTL, par) | BUS_APER_REG_DIS, par);
2673}
2674
2675#ifdef CONFIG_ATARI 2676#ifdef CONFIG_ATARI
2676static int __devinit store_video_par(char *video_str, unsigned char m64_num) 2677static int __devinit store_video_par(char *video_str, unsigned char m64_num)
2677{ 2678{
diff --git a/drivers/video/aty/radeon_accel.c b/drivers/video/aty/radeon_accel.c
index 3ca27cb13caa..4d13f68436e6 100644
--- a/drivers/video/aty/radeon_accel.c
+++ b/drivers/video/aty/radeon_accel.c
@@ -241,8 +241,8 @@ void radeonfb_engine_reset(struct radeonfb_info *rinfo)
241 INREG(HOST_PATH_CNTL); 241 INREG(HOST_PATH_CNTL);
242 OUTREG(HOST_PATH_CNTL, host_path_cntl); 242 OUTREG(HOST_PATH_CNTL, host_path_cntl);
243 243
244 if (rinfo->family != CHIP_FAMILY_R300 || 244 if (rinfo->family != CHIP_FAMILY_R300 &&
245 rinfo->family != CHIP_FAMILY_R350 || 245 rinfo->family != CHIP_FAMILY_R350 &&
246 rinfo->family != CHIP_FAMILY_RV350) 246 rinfo->family != CHIP_FAMILY_RV350)
247 OUTREG(RBBM_SOFT_RESET, rbbm_soft_reset); 247 OUTREG(RBBM_SOFT_RESET, rbbm_soft_reset);
248 248
diff --git a/drivers/video/aty/radeon_i2c.c b/drivers/video/aty/radeon_i2c.c
index f9e7c29ad9bf..8c8fa35f1b7c 100644
--- a/drivers/video/aty/radeon_i2c.c
+++ b/drivers/video/aty/radeon_i2c.c
@@ -69,7 +69,8 @@ static int radeon_setup_i2c_bus(struct radeon_i2c_chan *chan, const char *name)
69{ 69{
70 int rc; 70 int rc;
71 71
72 strcpy(chan->adapter.name, name); 72 snprintf(chan->adapter.name, sizeof(chan->adapter.name),
73 "radeonfb %s", name);
73 chan->adapter.owner = THIS_MODULE; 74 chan->adapter.owner = THIS_MODULE;
74 chan->adapter.id = I2C_HW_B_RADEON; 75 chan->adapter.id = I2C_HW_B_RADEON;
75 chan->adapter.algo_data = &chan->algo; 76 chan->adapter.algo_data = &chan->algo;
diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
index fbea2bd129c7..6fa0b9d5559a 100644
--- a/drivers/video/backlight/hp680_bl.c
+++ b/drivers/video/backlight/hp680_bl.c
@@ -18,7 +18,7 @@
18#include <linux/fb.h> 18#include <linux/fb.h>
19#include <linux/backlight.h> 19#include <linux/backlight.h>
20 20
21#include <asm/cpu/dac.h> 21#include <cpu/dac.h>
22#include <asm/hp6xx.h> 22#include <asm/hp6xx.h>
23#include <asm/hd64461.h> 23#include <asm/hd64461.h>
24 24
diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
index 891875d53a49..cbad67e89826 100644
--- a/drivers/video/backlight/omap1_bl.c
+++ b/drivers/video/backlight/omap1_bl.c
@@ -25,9 +25,9 @@
25#include <linux/fb.h> 25#include <linux/fb.h>
26#include <linux/backlight.h> 26#include <linux/backlight.h>
27 27
28#include <asm/arch/hardware.h> 28#include <mach/hardware.h>
29#include <asm/arch/board.h> 29#include <mach/board.h>
30#include <asm/arch/mux.h> 30#include <mach/mux.h>
31 31
32#define OMAPBL_MAX_INTENSITY 0xff 32#define OMAPBL_MAX_INTENSITY 0xff
33 33
diff --git a/drivers/video/backlight/platform_lcd.c b/drivers/video/backlight/platform_lcd.c
index 72d44dbfce82..738694d23889 100644
--- a/drivers/video/backlight/platform_lcd.c
+++ b/drivers/video/backlight/platform_lcd.c
@@ -92,7 +92,7 @@ static int __devinit platform_lcd_probe(struct platform_device *pdev)
92 92
93 plcd->us = dev; 93 plcd->us = dev;
94 plcd->pdata = pdata; 94 plcd->pdata = pdata;
95 plcd->lcd = lcd_device_register("platform-lcd", dev, 95 plcd->lcd = lcd_device_register(dev_name(dev), dev,
96 plcd, &platform_lcd_ops); 96 plcd, &platform_lcd_ops);
97 if (IS_ERR(plcd->lcd)) { 97 if (IS_ERR(plcd->lcd)) {
98 dev_err(dev, "cannot register lcd device\n"); 98 dev_err(dev, "cannot register lcd device\n");
@@ -101,6 +101,8 @@ static int __devinit platform_lcd_probe(struct platform_device *pdev)
101 } 101 }
102 102
103 platform_set_drvdata(pdev, plcd); 103 platform_set_drvdata(pdev, plcd);
104 platform_lcd_set_power(plcd->lcd, FB_BLANK_NORMAL);
105
104 return 0; 106 return 0;
105 107
106 err_mem: 108 err_mem:
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index 6338d0e2fe07..ea07258565f0 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -68,8 +68,10 @@ static int pwm_backlight_probe(struct platform_device *pdev)
68 struct pwm_bl_data *pb; 68 struct pwm_bl_data *pb;
69 int ret; 69 int ret;
70 70
71 if (!data) 71 if (!data) {
72 dev_err(&pdev->dev, "failed to find platform data\n");
72 return -EINVAL; 73 return -EINVAL;
74 }
73 75
74 if (data->init) { 76 if (data->init) {
75 ret = data->init(&pdev->dev); 77 ret = data->init(&pdev->dev);
@@ -79,6 +81,7 @@ static int pwm_backlight_probe(struct platform_device *pdev)
79 81
80 pb = kzalloc(sizeof(*pb), GFP_KERNEL); 82 pb = kzalloc(sizeof(*pb), GFP_KERNEL);
81 if (!pb) { 83 if (!pb) {
84 dev_err(&pdev->dev, "no memory for state\n");
82 ret = -ENOMEM; 85 ret = -ENOMEM;
83 goto err_alloc; 86 goto err_alloc;
84 } 87 }
@@ -91,7 +94,8 @@ static int pwm_backlight_probe(struct platform_device *pdev)
91 dev_err(&pdev->dev, "unable to request PWM for backlight\n"); 94 dev_err(&pdev->dev, "unable to request PWM for backlight\n");
92 ret = PTR_ERR(pb->pwm); 95 ret = PTR_ERR(pb->pwm);
93 goto err_pwm; 96 goto err_pwm;
94 } 97 } else
98 dev_dbg(&pdev->dev, "got pwm for backlight\n");
95 99
96 bl = backlight_device_register(pdev->name, &pdev->dev, 100 bl = backlight_device_register(pdev->name, &pdev->dev,
97 pb, &pwm_backlight_ops); 101 pb, &pwm_backlight_ops);
@@ -183,3 +187,5 @@ module_exit(pwm_backlight_exit);
183 187
184MODULE_DESCRIPTION("PWM based Backlight Driver"); 188MODULE_DESCRIPTION("PWM based Backlight Driver");
185MODULE_LICENSE("GPL"); 189MODULE_LICENSE("GPL");
190MODULE_ALIAS("platform:pwm-backlight");
191
diff --git a/drivers/video/clps711xfb.c b/drivers/video/clps711xfb.c
index 9f8a389dc7ae..16f5db471ab5 100644
--- a/drivers/video/clps711xfb.c
+++ b/drivers/video/clps711xfb.c
@@ -27,12 +27,12 @@
27#include <linux/proc_fs.h> 27#include <linux/proc_fs.h>
28#include <linux/delay.h> 28#include <linux/delay.h>
29 29
30#include <asm/hardware.h> 30#include <mach/hardware.h>
31#include <asm/mach-types.h> 31#include <asm/mach-types.h>
32#include <linux/uaccess.h> 32#include <linux/uaccess.h>
33 33
34#include <asm/hardware/clps7111.h> 34#include <asm/hardware/clps7111.h>
35#include <asm/arch/syspld.h> 35#include <mach/syspld.h>
36 36
37struct fb_info *cfb; 37struct fb_info *cfb;
38 38
diff --git a/drivers/video/console/.gitignore b/drivers/video/console/.gitignore
new file mode 100644
index 000000000000..0c258b45439c
--- /dev/null
+++ b/drivers/video/console/.gitignore
@@ -0,0 +1,2 @@
1# conmakehash generated file
2promcon_tbl.c
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 3ccfa76d9b2a..33859934a8e4 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -1311,6 +1311,9 @@ static void fbcon_clear(struct vc_data *vc, int sy, int sx, int height,
1311 if (!height || !width) 1311 if (!height || !width)
1312 return; 1312 return;
1313 1313
1314 if (sy < vc->vc_top && vc->vc_top == logo_lines)
1315 vc->vc_top = 0;
1316
1314 /* Split blits that cross physical y_wrap boundary */ 1317 /* Split blits that cross physical y_wrap boundary */
1315 1318
1316 y_break = p->vrows - p->yscroll; 1319 y_break = p->vrows - p->yscroll;
diff --git a/drivers/video/console/sticore.c b/drivers/video/console/sticore.c
index d7822af0e00a..ef7870f5ea08 100644
--- a/drivers/video/console/sticore.c
+++ b/drivers/video/console/sticore.c
@@ -24,6 +24,7 @@
24#include <asm/hardware.h> 24#include <asm/hardware.h>
25#include <asm/parisc-device.h> 25#include <asm/parisc-device.h>
26#include <asm/cacheflush.h> 26#include <asm/cacheflush.h>
27#include <asm/grfioctl.h>
27 28
28#include "../sticore.h" 29#include "../sticore.h"
29 30
@@ -725,6 +726,7 @@ static int __devinit sti_read_rom(int wordmode, struct sti_struct *sti,
725{ 726{
726 struct sti_cooked_rom *cooked; 727 struct sti_cooked_rom *cooked;
727 struct sti_rom *raw = NULL; 728 struct sti_rom *raw = NULL;
729 unsigned long revno;
728 730
729 cooked = kmalloc(sizeof *cooked, GFP_KERNEL); 731 cooked = kmalloc(sizeof *cooked, GFP_KERNEL);
730 if (!cooked) 732 if (!cooked)
@@ -767,9 +769,35 @@ static int __devinit sti_read_rom(int wordmode, struct sti_struct *sti,
767 sti->graphics_id[1] = raw->graphics_id[1]; 769 sti->graphics_id[1] = raw->graphics_id[1];
768 770
769 sti_dump_rom(raw); 771 sti_dump_rom(raw);
770 772
773 /* check if the ROM routines in this card are compatible */
774 if (wordmode || sti->graphics_id[1] != 0x09A02587)
775 goto ok;
776
777 revno = (raw->revno[0] << 8) | raw->revno[1];
778
779 switch (sti->graphics_id[0]) {
780 case S9000_ID_HCRX:
781 /* HyperA or HyperB ? */
782 if (revno == 0x8408 || revno == 0x840b)
783 goto msg_not_supported;
784 break;
785 case CRT_ID_THUNDER:
786 if (revno == 0x8509)
787 goto msg_not_supported;
788 break;
789 case CRT_ID_THUNDER2:
790 if (revno == 0x850c)
791 goto msg_not_supported;
792 }
793ok:
771 return 1; 794 return 1;
772 795
796msg_not_supported:
797 printk(KERN_ERR "Sorry, this GSC/STI card is not yet supported.\n");
798 printk(KERN_ERR "Please see http://parisc-linux.org/faq/"
799 "graphics-howto.html for more info.\n");
800 /* fall through */
773out_err: 801out_err:
774 kfree(raw); 802 kfree(raw);
775 kfree(cooked); 803 kfree(cooked);
diff --git a/drivers/video/cyber2000fb.c b/drivers/video/cyber2000fb.c
index d0e4cb618269..41d62632dcdb 100644
--- a/drivers/video/cyber2000fb.c
+++ b/drivers/video/cyber2000fb.c
@@ -1425,7 +1425,7 @@ static void cyberpro_common_resume(struct cfb_info *cfb)
1425 1425
1426#ifdef CONFIG_ARCH_SHARK 1426#ifdef CONFIG_ARCH_SHARK
1427 1427
1428#include <asm/arch/hardware.h> 1428#include <mach/hardware.h>
1429 1429
1430static int __devinit cyberpro_vl_probe(void) 1430static int __devinit cyberpro_vl_probe(void)
1431{ 1431{
diff --git a/drivers/video/epson1355fb.c b/drivers/video/epson1355fb.c
index cc2810ef5de5..2735b79e52a1 100644
--- a/drivers/video/epson1355fb.c
+++ b/drivers/video/epson1355fb.c
@@ -71,7 +71,7 @@ struct epson1355_par {
71#if defined(CONFIG_ARM) 71#if defined(CONFIG_ARM)
72 72
73# ifdef CONFIG_ARCH_CEIVA 73# ifdef CONFIG_ARCH_CEIVA
74# include <asm/arch/hardware.h> 74# include <mach/hardware.h>
75# define EPSON1355FB_BASE_PHYS (CEIVA_PHYS_SED1355) 75# define EPSON1355FB_BASE_PHYS (CEIVA_PHYS_SED1355)
76# endif 76# endif
77 77
diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c
index 9cd36c223d33..bd320a2bfb7c 100644
--- a/drivers/video/fsl-diu-fb.c
+++ b/drivers/video/fsl-diu-fb.c
@@ -1649,8 +1649,10 @@ static int __init fsl_diu_init(void)
1649 } 1649 }
1650 1650
1651 prop = of_get_property(np, "d-cache-size", NULL); 1651 prop = of_get_property(np, "d-cache-size", NULL);
1652 if (prop == NULL) 1652 if (prop == NULL) {
1653 of_node_put(np);
1653 return -ENODEV; 1654 return -ENODEV;
1655 }
1654 1656
1655 /* Freescale PLRU requires 13/8 times the cache size to do a proper 1657 /* Freescale PLRU requires 13/8 times the cache size to do a proper
1656 displacement flush 1658 displacement flush
@@ -1659,8 +1661,10 @@ static int __init fsl_diu_init(void)
1659 coherence_data_size /= 8; 1661 coherence_data_size /= 8;
1660 1662
1661 prop = of_get_property(np, "d-cache-line-size", NULL); 1663 prop = of_get_property(np, "d-cache-line-size", NULL);
1662 if (prop == NULL) 1664 if (prop == NULL) {
1665 of_node_put(np);
1663 return -ENODEV; 1666 return -ENODEV;
1667 }
1664 d_cache_line_size = *prop; 1668 d_cache_line_size = *prop;
1665 1669
1666 of_node_put(np); 1670 of_node_put(np);
diff --git a/drivers/video/gbefb.c b/drivers/video/gbefb.c
index 2e552d5bbb5d..f89c3cce1e0c 100644
--- a/drivers/video/gbefb.c
+++ b/drivers/video/gbefb.c
@@ -87,6 +87,8 @@ static int gbe_revision;
87static int ypan, ywrap; 87static int ypan, ywrap;
88 88
89static uint32_t pseudo_palette[16]; 89static uint32_t pseudo_palette[16];
90static uint32_t gbe_cmap[256];
91static int gbe_turned_on; /* 0 turned off, 1 turned on */
90 92
91static char *mode_option __initdata = NULL; 93static char *mode_option __initdata = NULL;
92 94
@@ -208,6 +210,8 @@ void gbe_turn_off(void)
208 int i; 210 int i;
209 unsigned int val, x, y, vpixen_off; 211 unsigned int val, x, y, vpixen_off;
210 212
213 gbe_turned_on = 0;
214
211 /* check if pixel counter is on */ 215 /* check if pixel counter is on */
212 val = gbe->vt_xy; 216 val = gbe->vt_xy;
213 if (GET_GBE_FIELD(VT_XY, FREEZE, val) == 1) 217 if (GET_GBE_FIELD(VT_XY, FREEZE, val) == 1)
@@ -371,6 +375,22 @@ static void gbe_turn_on(void)
371 } 375 }
372 if (i == 10000) 376 if (i == 10000)
373 printk(KERN_ERR "gbefb: turn on DMA timed out\n"); 377 printk(KERN_ERR "gbefb: turn on DMA timed out\n");
378
379 gbe_turned_on = 1;
380}
381
382static void gbe_loadcmap(void)
383{
384 int i, j;
385
386 for (i = 0; i < 256; i++) {
387 for (j = 0; j < 1000 && gbe->cm_fifo >= 63; j++)
388 udelay(10);
389 if (j == 1000)
390 printk(KERN_ERR "gbefb: cmap FIFO timeout\n");
391
392 gbe->cmap[i] = gbe_cmap[i];
393 }
374} 394}
375 395
376/* 396/*
@@ -382,6 +402,7 @@ static int gbefb_blank(int blank, struct fb_info *info)
382 switch (blank) { 402 switch (blank) {
383 case FB_BLANK_UNBLANK: /* unblank */ 403 case FB_BLANK_UNBLANK: /* unblank */
384 gbe_turn_on(); 404 gbe_turn_on();
405 gbe_loadcmap();
385 break; 406 break;
386 407
387 case FB_BLANK_NORMAL: /* blank */ 408 case FB_BLANK_NORMAL: /* blank */
@@ -796,16 +817,10 @@ static int gbefb_set_par(struct fb_info *info)
796 gbe->gmap[i] = (i << 24) | (i << 16) | (i << 8); 817 gbe->gmap[i] = (i << 24) | (i << 16) | (i << 8);
797 818
798 /* Initialize the color map */ 819 /* Initialize the color map */
799 for (i = 0; i < 256; i++) { 820 for (i = 0; i < 256; i++)
800 int j; 821 gbe_cmap[i] = (i << 8) | (i << 16) | (i << 24);
801
802 for (j = 0; j < 1000 && gbe->cm_fifo >= 63; j++)
803 udelay(10);
804 if (j == 1000)
805 printk(KERN_ERR "gbefb: cmap FIFO timeout\n");
806 822
807 gbe->cmap[i] = (i << 8) | (i << 16) | (i << 24); 823 gbe_loadcmap();
808 }
809 824
810 return 0; 825 return 0;
811} 826}
@@ -855,14 +870,17 @@ static int gbefb_setcolreg(unsigned regno, unsigned red, unsigned green,
855 blue >>= 8; 870 blue >>= 8;
856 871
857 if (info->var.bits_per_pixel <= 8) { 872 if (info->var.bits_per_pixel <= 8) {
858 /* wait for the color map FIFO to have a free entry */ 873 gbe_cmap[regno] = (red << 24) | (green << 16) | (blue << 8);
859 for (i = 0; i < 1000 && gbe->cm_fifo >= 63; i++) 874 if (gbe_turned_on) {
860 udelay(10); 875 /* wait for the color map FIFO to have a free entry */
861 if (i == 1000) { 876 for (i = 0; i < 1000 && gbe->cm_fifo >= 63; i++)
862 printk(KERN_ERR "gbefb: cmap FIFO timeout\n"); 877 udelay(10);
863 return 1; 878 if (i == 1000) {
879 printk(KERN_ERR "gbefb: cmap FIFO timeout\n");
880 return 1;
881 }
882 gbe->cmap[regno] = gbe_cmap[regno];
864 } 883 }
865 gbe->cmap[regno] = (red << 24) | (green << 16) | (blue << 8);
866 } else if (regno < 16) { 884 } else if (regno < 16) {
867 switch (info->var.bits_per_pixel) { 885 switch (info->var.bits_per_pixel) {
868 case 15: 886 case 15:
diff --git a/drivers/video/hitfb.c b/drivers/video/hitfb.c
index 392a8be6aa76..e6467cf9f19f 100644
--- a/drivers/video/hitfb.c
+++ b/drivers/video/hitfb.c
@@ -27,7 +27,7 @@
27#include <asm/pgtable.h> 27#include <asm/pgtable.h>
28#include <asm/io.h> 28#include <asm/io.h>
29#include <asm/hd64461.h> 29#include <asm/hd64461.h>
30#include <asm/cpu/dac.h> 30#include <cpu/dac.h>
31 31
32#define WIDTH 640 32#define WIDTH 640
33 33
diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c
index 0c5a475c1cae..ccd986140c95 100644
--- a/drivers/video/imxfb.c
+++ b/drivers/video/imxfb.c
@@ -33,9 +33,9 @@
33#include <linux/platform_device.h> 33#include <linux/platform_device.h>
34#include <linux/dma-mapping.h> 34#include <linux/dma-mapping.h>
35 35
36#include <asm/hardware.h> 36#include <mach/hardware.h>
37#include <asm/io.h> 37#include <asm/io.h>
38#include <asm/arch/imxfb.h> 38#include <mach/imxfb.h>
39 39
40/* 40/*
41 * Complain if VAR is out of range. 41 * Complain if VAR is out of range.
diff --git a/drivers/video/matrox/matroxfb_base.c b/drivers/video/matrox/matroxfb_base.c
index 54e82f35353d..c02136202792 100644
--- a/drivers/video/matrox/matroxfb_base.c
+++ b/drivers/video/matrox/matroxfb_base.c
@@ -2536,7 +2536,7 @@ module_param(fh, int, 0);
2536MODULE_PARM_DESC(fh, "Startup horizontal frequency, 0-999kHz, 1000-INF Hz"); 2536MODULE_PARM_DESC(fh, "Startup horizontal frequency, 0-999kHz, 1000-INF Hz");
2537module_param(fv, int, 0); 2537module_param(fv, int, 0);
2538MODULE_PARM_DESC(fv, "Startup vertical frequency, 0-INF Hz\n" 2538MODULE_PARM_DESC(fv, "Startup vertical frequency, 0-INF Hz\n"
2539"You should specify \"fv:max_monitor_vsync,fh:max_monitor_hsync,maxclk:max_monitor_dotclock\"\n"); 2539"You should specify \"fv:max_monitor_vsync,fh:max_monitor_hsync,maxclk:max_monitor_dotclock\"");
2540module_param(grayscale, int, 0); 2540module_param(grayscale, int, 0);
2541MODULE_PARM_DESC(grayscale, "Sets display into grayscale. Works perfectly with paletized videomode (4, 8bpp), some limitations apply to 16, 24 and 32bpp videomodes (default=nograyscale)"); 2541MODULE_PARM_DESC(grayscale, "Sets display into grayscale. Works perfectly with paletized videomode (4, 8bpp), some limitations apply to 16, 24 and 32bpp videomodes (default=nograyscale)");
2542module_param(cross4MB, int, 0); 2542module_param(cross4MB, int, 0);
diff --git a/drivers/video/omap/blizzard.c b/drivers/video/omap/blizzard.c
index 4d8ad9cd0e19..9dfcf39d3367 100644
--- a/drivers/video/omap/blizzard.c
+++ b/drivers/video/omap/blizzard.c
@@ -26,9 +26,9 @@
26#include <linux/delay.h> 26#include <linux/delay.h>
27#include <linux/clk.h> 27#include <linux/clk.h>
28 28
29#include <asm/arch/dma.h> 29#include <mach/dma.h>
30#include <asm/arch/omapfb.h> 30#include <mach/omapfb.h>
31#include <asm/arch/blizzard.h> 31#include <mach/blizzard.h>
32 32
33#include "dispc.h" 33#include "dispc.h"
34 34
diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c
index ab77c51fe9d6..6efcf89e7fbe 100644
--- a/drivers/video/omap/dispc.c
+++ b/drivers/video/omap/dispc.c
@@ -25,9 +25,9 @@
25#include <linux/clk.h> 25#include <linux/clk.h>
26#include <linux/io.h> 26#include <linux/io.h>
27 27
28#include <asm/arch/sram.h> 28#include <mach/sram.h>
29#include <asm/arch/omapfb.h> 29#include <mach/omapfb.h>
30#include <asm/arch/board.h> 30#include <mach/board.h>
31 31
32#include "dispc.h" 32#include "dispc.h"
33 33
diff --git a/drivers/video/omap/hwa742.c b/drivers/video/omap/hwa742.c
index 1e642b7a20fe..f24df0b54e1c 100644
--- a/drivers/video/omap/hwa742.c
+++ b/drivers/video/omap/hwa742.c
@@ -26,9 +26,9 @@
26#include <linux/delay.h> 26#include <linux/delay.h>
27#include <linux/clk.h> 27#include <linux/clk.h>
28 28
29#include <asm/arch/dma.h> 29#include <mach/dma.h>
30#include <asm/arch/omapfb.h> 30#include <mach/omapfb.h>
31#include <asm/arch/hwa742.h> 31#include <mach/hwa742.h>
32 32
33#define HWA742_REV_CODE_REG 0x0 33#define HWA742_REV_CODE_REG 0x0
34#define HWA742_CONFIG_REG 0x2 34#define HWA742_CONFIG_REG 0x2
diff --git a/drivers/video/omap/lcd_h3.c b/drivers/video/omap/lcd_h3.c
index 31e978349a80..2486237ebba5 100644
--- a/drivers/video/omap/lcd_h3.c
+++ b/drivers/video/omap/lcd_h3.c
@@ -23,8 +23,8 @@
23#include <linux/platform_device.h> 23#include <linux/platform_device.h>
24#include <linux/i2c/tps65010.h> 24#include <linux/i2c/tps65010.h>
25 25
26#include <asm/arch/gpio.h> 26#include <mach/gpio.h>
27#include <asm/arch/omapfb.h> 27#include <mach/omapfb.h>
28 28
29#define MODULE_NAME "omapfb-lcd_h3" 29#define MODULE_NAME "omapfb-lcd_h3"
30 30
diff --git a/drivers/video/omap/lcd_h4.c b/drivers/video/omap/lcd_h4.c
index fd6f0eb16de1..88c19d424ef7 100644
--- a/drivers/video/omap/lcd_h4.c
+++ b/drivers/video/omap/lcd_h4.c
@@ -22,7 +22,7 @@
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/platform_device.h> 23#include <linux/platform_device.h>
24 24
25#include <asm/arch/omapfb.h> 25#include <mach/omapfb.h>
26 26
27static int h4_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev) 27static int h4_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev)
28{ 28{
diff --git a/drivers/video/omap/lcd_inn1510.c b/drivers/video/omap/lcd_inn1510.c
index 551f385861d1..6953ed4b5820 100644
--- a/drivers/video/omap/lcd_inn1510.c
+++ b/drivers/video/omap/lcd_inn1510.c
@@ -23,8 +23,8 @@
23#include <linux/platform_device.h> 23#include <linux/platform_device.h>
24#include <linux/io.h> 24#include <linux/io.h>
25 25
26#include <asm/arch/fpga.h> 26#include <mach/fpga.h>
27#include <asm/arch/omapfb.h> 27#include <mach/omapfb.h>
28 28
29static int innovator1510_panel_init(struct lcd_panel *panel, 29static int innovator1510_panel_init(struct lcd_panel *panel,
30 struct omapfb_device *fbdev) 30 struct omapfb_device *fbdev)
diff --git a/drivers/video/omap/lcd_inn1610.c b/drivers/video/omap/lcd_inn1610.c
index 5ef119c813e0..6a42c6a0cd99 100644
--- a/drivers/video/omap/lcd_inn1610.c
+++ b/drivers/video/omap/lcd_inn1610.c
@@ -22,8 +22,8 @@
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/platform_device.h> 23#include <linux/platform_device.h>
24 24
25#include <asm/arch/gpio.h> 25#include <mach/gpio.h>
26#include <asm/arch/omapfb.h> 26#include <mach/omapfb.h>
27 27
28#define MODULE_NAME "omapfb-lcd_h3" 28#define MODULE_NAME "omapfb-lcd_h3"
29 29
diff --git a/drivers/video/omap/lcd_osk.c b/drivers/video/omap/lcd_osk.c
index a38038840fd6..a4a725f427a4 100644
--- a/drivers/video/omap/lcd_osk.c
+++ b/drivers/video/omap/lcd_osk.c
@@ -23,9 +23,9 @@
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25 25
26#include <asm/arch/gpio.h> 26#include <mach/gpio.h>
27#include <asm/arch/mux.h> 27#include <mach/mux.h>
28#include <asm/arch/omapfb.h> 28#include <mach/omapfb.h>
29 29
30static int osk_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev) 30static int osk_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev)
31{ 31{
diff --git a/drivers/video/omap/lcd_palmte.c b/drivers/video/omap/lcd_palmte.c
index 52bdfdac42c9..218317366e6e 100644
--- a/drivers/video/omap/lcd_palmte.c
+++ b/drivers/video/omap/lcd_palmte.c
@@ -23,8 +23,8 @@
23#include <linux/platform_device.h> 23#include <linux/platform_device.h>
24#include <linux/io.h> 24#include <linux/io.h>
25 25
26#include <asm/arch/fpga.h> 26#include <mach/fpga.h>
27#include <asm/arch/omapfb.h> 27#include <mach/omapfb.h>
28 28
29static int palmte_panel_init(struct lcd_panel *panel, 29static int palmte_panel_init(struct lcd_panel *panel,
30 struct omapfb_device *fbdev) 30 struct omapfb_device *fbdev)
diff --git a/drivers/video/omap/lcd_palmtt.c b/drivers/video/omap/lcd_palmtt.c
index 4bb349f54356..57b0f6cf6a5a 100644
--- a/drivers/video/omap/lcd_palmtt.c
+++ b/drivers/video/omap/lcd_palmtt.c
@@ -29,8 +29,8 @@ GPIO13 - screen blanking
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/io.h> 30#include <linux/io.h>
31 31
32#include <asm/arch/gpio.h> 32#include <mach/gpio.h>
33#include <asm/arch/omapfb.h> 33#include <mach/omapfb.h>
34 34
35static int palmtt_panel_init(struct lcd_panel *panel, 35static int palmtt_panel_init(struct lcd_panel *panel,
36 struct omapfb_device *fbdev) 36 struct omapfb_device *fbdev)
diff --git a/drivers/video/omap/lcd_palmz71.c b/drivers/video/omap/lcd_palmz71.c
index ea6170ddff35..d33d78b11723 100644
--- a/drivers/video/omap/lcd_palmz71.c
+++ b/drivers/video/omap/lcd_palmz71.c
@@ -24,7 +24,7 @@
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/io.h> 25#include <linux/io.h>
26 26
27#include <asm/arch/omapfb.h> 27#include <mach/omapfb.h>
28 28
29static int palmz71_panel_init(struct lcd_panel *panel, 29static int palmz71_panel_init(struct lcd_panel *panel,
30 struct omapfb_device *fbdev) 30 struct omapfb_device *fbdev)
diff --git a/drivers/video/omap/lcd_sx1.c b/drivers/video/omap/lcd_sx1.c
index c4f306a4e5c9..caa6a896cb8b 100644
--- a/drivers/video/omap/lcd_sx1.c
+++ b/drivers/video/omap/lcd_sx1.c
@@ -23,10 +23,10 @@
23#include <linux/delay.h> 23#include <linux/delay.h>
24#include <linux/io.h> 24#include <linux/io.h>
25 25
26#include <asm/arch/gpio.h> 26#include <mach/gpio.h>
27#include <asm/arch/omapfb.h> 27#include <mach/omapfb.h>
28#include <asm/arch/mcbsp.h> 28#include <mach/mcbsp.h>
29#include <asm/arch/mux.h> 29#include <mach/mux.h>
30 30
31/* 31/*
32 * OMAP310 GPIO registers 32 * OMAP310 GPIO registers
diff --git a/drivers/video/omap/lcdc.c b/drivers/video/omap/lcdc.c
index fb19ed4992db..83514f066712 100644
--- a/drivers/video/omap/lcdc.c
+++ b/drivers/video/omap/lcdc.c
@@ -29,8 +29,8 @@
29#include <linux/vmalloc.h> 29#include <linux/vmalloc.h>
30#include <linux/clk.h> 30#include <linux/clk.h>
31 31
32#include <asm/arch/dma.h> 32#include <mach/dma.h>
33#include <asm/arch/omapfb.h> 33#include <mach/omapfb.h>
34 34
35#include <asm/mach-types.h> 35#include <asm/mach-types.h>
36 36
diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c
index f85af5c4fa68..51a138bd113c 100644
--- a/drivers/video/omap/omapfb_main.c
+++ b/drivers/video/omap/omapfb_main.c
@@ -28,9 +28,8 @@
28#include <linux/mm.h> 28#include <linux/mm.h>
29#include <linux/uaccess.h> 29#include <linux/uaccess.h>
30 30
31#include <asm/mach-types.h> 31#include <mach/dma.h>
32#include <asm/arch/dma.h> 32#include <mach/omapfb.h>
33#include <asm/arch/omapfb.h>
34 33
35#define MODULE_NAME "omapfb" 34#define MODULE_NAME "omapfb"
36 35
diff --git a/drivers/video/omap/rfbi.c b/drivers/video/omap/rfbi.c
index 789cfd23c36b..4a6f13d3facf 100644
--- a/drivers/video/omap/rfbi.c
+++ b/drivers/video/omap/rfbi.c
@@ -27,7 +27,7 @@
27#include <linux/clk.h> 27#include <linux/clk.h>
28#include <linux/io.h> 28#include <linux/io.h>
29 29
30#include <asm/arch/omapfb.h> 30#include <mach/omapfb.h>
31 31
32#include "dispc.h" 32#include "dispc.h"
33 33
diff --git a/drivers/video/omap/sossi.c b/drivers/video/omap/sossi.c
index fafd0f26b90f..6359353c2c67 100644
--- a/drivers/video/omap/sossi.c
+++ b/drivers/video/omap/sossi.c
@@ -24,8 +24,8 @@
24#include <linux/irq.h> 24#include <linux/irq.h>
25#include <linux/io.h> 25#include <linux/io.h>
26 26
27#include <asm/arch/dma.h> 27#include <mach/dma.h>
28#include <asm/arch/omapfb.h> 28#include <mach/omapfb.h>
29 29
30#include "lcdc.h" 30#include "lcdc.h"
31 31
diff --git a/drivers/video/pnx4008/dum.h b/drivers/video/pnx4008/dum.h
index d80a614d89ed..1234d4375d92 100644
--- a/drivers/video/pnx4008/dum.h
+++ b/drivers/video/pnx4008/dum.h
@@ -12,7 +12,7 @@
12#ifndef __PNX008_DUM_H__ 12#ifndef __PNX008_DUM_H__
13#define __PNX008_DUM_H__ 13#define __PNX008_DUM_H__
14 14
15#include <asm/arch/platform.h> 15#include <mach/platform.h>
16 16
17#define PNX4008_DUMCONF_VA_BASE IO_ADDRESS(PNX4008_DUMCONF_BASE) 17#define PNX4008_DUMCONF_VA_BASE IO_ADDRESS(PNX4008_DUMCONF_BASE)
18#define PNX4008_DUM_MAIN_VA_BASE IO_ADDRESS(PNX4008_DUM_MAINCFG_BASE) 18#define PNX4008_DUM_MAIN_VA_BASE IO_ADDRESS(PNX4008_DUM_MAINCFG_BASE)
diff --git a/drivers/video/pnx4008/sdum.c b/drivers/video/pnx4008/sdum.c
index d23bf0d659b6..2aa09bce3944 100644
--- a/drivers/video/pnx4008/sdum.c
+++ b/drivers/video/pnx4008/sdum.c
@@ -30,7 +30,7 @@
30#include <linux/dma-mapping.h> 30#include <linux/dma-mapping.h>
31#include <linux/clk.h> 31#include <linux/clk.h>
32#include <asm/uaccess.h> 32#include <asm/uaccess.h>
33#include <asm/arch/gpio.h> 33#include <mach/gpio.h>
34 34
35#include "sdum.h" 35#include "sdum.h"
36#include "fbcommon.h" 36#include "fbcommon.h"
diff --git a/drivers/video/pvr2fb.c b/drivers/video/pvr2fb.c
index 8c863a7f654b..0a0fd48a8566 100644
--- a/drivers/video/pvr2fb.c
+++ b/drivers/video/pvr2fb.c
@@ -58,18 +58,18 @@
58 58
59#ifdef CONFIG_SH_DREAMCAST 59#ifdef CONFIG_SH_DREAMCAST
60#include <asm/machvec.h> 60#include <asm/machvec.h>
61#include <asm/mach/sysasic.h> 61#include <mach-dreamcast/mach/sysasic.h>
62#endif 62#endif
63 63
64#ifdef CONFIG_SH_DMA 64#ifdef CONFIG_SH_DMA
65#include <linux/pagemap.h> 65#include <linux/pagemap.h>
66#include <asm/mach/dma.h> 66#include <mach/dma.h>
67#include <asm/dma.h> 67#include <asm/dma.h>
68#endif 68#endif
69 69
70#ifdef CONFIG_SH_STORE_QUEUES 70#ifdef CONFIG_SH_STORE_QUEUES
71#include <linux/uaccess.h> 71#include <linux/uaccess.h>
72#include <asm/cpu/sq.h> 72#include <cpu/sq.h>
73#endif 73#endif
74 74
75#ifndef PCI_DEVICE_ID_NEC_NEON250 75#ifndef PCI_DEVICE_ID_NEC_NEON250
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index 69de2fed6c58..e7aa7ae8fca8 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -45,14 +45,14 @@
45#include <linux/kthread.h> 45#include <linux/kthread.h>
46#include <linux/freezer.h> 46#include <linux/freezer.h>
47 47
48#include <asm/hardware.h> 48#include <mach/hardware.h>
49#include <asm/io.h> 49#include <asm/io.h>
50#include <asm/irq.h> 50#include <asm/irq.h>
51#include <asm/div64.h> 51#include <asm/div64.h>
52#include <asm/arch/pxa-regs.h> 52#include <mach/pxa-regs.h>
53#include <asm/arch/pxa2xx-gpio.h> 53#include <mach/pxa2xx-gpio.h>
54#include <asm/arch/bitfield.h> 54#include <mach/bitfield.h>
55#include <asm/arch/pxafb.h> 55#include <mach/pxafb.h>
56 56
57/* 57/*
58 * Complain if VAR is out of range. 58 * Complain if VAR is out of range.
diff --git a/drivers/video/s3c2410fb.c b/drivers/video/s3c2410fb.c
index f0598961c6b0..79cf0b1976aa 100644
--- a/drivers/video/s3c2410fb.c
+++ b/drivers/video/s3c2410fb.c
@@ -29,9 +29,9 @@
29#include <asm/div64.h> 29#include <asm/div64.h>
30 30
31#include <asm/mach/map.h> 31#include <asm/mach/map.h>
32#include <asm/arch/regs-lcd.h> 32#include <mach/regs-lcd.h>
33#include <asm/arch/regs-gpio.h> 33#include <mach/regs-gpio.h>
34#include <asm/arch/fb.h> 34#include <mach/fb.h>
35 35
36#ifdef CONFIG_PM 36#ifdef CONFIG_PM
37#include <linux/pm.h> 37#include <linux/pm.h>
diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c
index 2972f112dbed..8361bd0e3df1 100644
--- a/drivers/video/s3fb.c
+++ b/drivers/video/s3fb.c
@@ -903,13 +903,13 @@ static int __devinit s3_pci_probe(struct pci_dev *dev, const struct pci_device_i
903 /* Prepare PCI device */ 903 /* Prepare PCI device */
904 rc = pci_enable_device(dev); 904 rc = pci_enable_device(dev);
905 if (rc < 0) { 905 if (rc < 0) {
906 dev_err(info->dev, "cannot enable PCI device\n"); 906 dev_err(info->device, "cannot enable PCI device\n");
907 goto err_enable_device; 907 goto err_enable_device;
908 } 908 }
909 909
910 rc = pci_request_regions(dev, "s3fb"); 910 rc = pci_request_regions(dev, "s3fb");
911 if (rc < 0) { 911 if (rc < 0) {
912 dev_err(info->dev, "cannot reserve framebuffer region\n"); 912 dev_err(info->device, "cannot reserve framebuffer region\n");
913 goto err_request_regions; 913 goto err_request_regions;
914 } 914 }
915 915
@@ -921,7 +921,7 @@ static int __devinit s3_pci_probe(struct pci_dev *dev, const struct pci_device_i
921 info->screen_base = pci_iomap(dev, 0, 0); 921 info->screen_base = pci_iomap(dev, 0, 0);
922 if (! info->screen_base) { 922 if (! info->screen_base) {
923 rc = -ENOMEM; 923 rc = -ENOMEM;
924 dev_err(info->dev, "iomap for framebuffer failed\n"); 924 dev_err(info->device, "iomap for framebuffer failed\n");
925 goto err_iomap; 925 goto err_iomap;
926 } 926 }
927 927
@@ -965,19 +965,19 @@ static int __devinit s3_pci_probe(struct pci_dev *dev, const struct pci_device_i
965 rc = fb_find_mode(&(info->var), info, mode_option, NULL, 0, NULL, 8); 965 rc = fb_find_mode(&(info->var), info, mode_option, NULL, 0, NULL, 8);
966 if (! ((rc == 1) || (rc == 2))) { 966 if (! ((rc == 1) || (rc == 2))) {
967 rc = -EINVAL; 967 rc = -EINVAL;
968 dev_err(info->dev, "mode %s not found\n", mode_option); 968 dev_err(info->device, "mode %s not found\n", mode_option);
969 goto err_find_mode; 969 goto err_find_mode;
970 } 970 }
971 971
972 rc = fb_alloc_cmap(&info->cmap, 256, 0); 972 rc = fb_alloc_cmap(&info->cmap, 256, 0);
973 if (rc < 0) { 973 if (rc < 0) {
974 dev_err(info->dev, "cannot allocate colormap\n"); 974 dev_err(info->device, "cannot allocate colormap\n");
975 goto err_alloc_cmap; 975 goto err_alloc_cmap;
976 } 976 }
977 977
978 rc = register_framebuffer(info); 978 rc = register_framebuffer(info);
979 if (rc < 0) { 979 if (rc < 0) {
980 dev_err(info->dev, "cannot register framebuffer\n"); 980 dev_err(info->device, "cannot register framebuffer\n");
981 goto err_reg_fb; 981 goto err_reg_fb;
982 } 982 }
983 983
@@ -1053,7 +1053,7 @@ static int s3_pci_suspend(struct pci_dev* dev, pm_message_t state)
1053 struct fb_info *info = pci_get_drvdata(dev); 1053 struct fb_info *info = pci_get_drvdata(dev);
1054 struct s3fb_info *par = info->par; 1054 struct s3fb_info *par = info->par;
1055 1055
1056 dev_info(info->dev, "suspend\n"); 1056 dev_info(info->device, "suspend\n");
1057 1057
1058 acquire_console_sem(); 1058 acquire_console_sem();
1059 mutex_lock(&(par->open_lock)); 1059 mutex_lock(&(par->open_lock));
@@ -1085,7 +1085,7 @@ static int s3_pci_resume(struct pci_dev* dev)
1085 struct s3fb_info *par = info->par; 1085 struct s3fb_info *par = info->par;
1086 int err; 1086 int err;
1087 1087
1088 dev_info(info->dev, "resume\n"); 1088 dev_info(info->device, "resume\n");
1089 1089
1090 acquire_console_sem(); 1090 acquire_console_sem();
1091 mutex_lock(&(par->open_lock)); 1091 mutex_lock(&(par->open_lock));
@@ -1102,7 +1102,7 @@ static int s3_pci_resume(struct pci_dev* dev)
1102 if (err) { 1102 if (err) {
1103 mutex_unlock(&(par->open_lock)); 1103 mutex_unlock(&(par->open_lock));
1104 release_console_sem(); 1104 release_console_sem();
1105 dev_err(info->dev, "error %d enabling device for resume\n", err); 1105 dev_err(info->device, "error %d enabling device for resume\n", err);
1106 return err; 1106 return err;
1107 } 1107 }
1108 pci_set_master(dev); 1108 pci_set_master(dev);
diff --git a/drivers/video/sa1100fb.c b/drivers/video/sa1100fb.c
index 78bcdbc3f484..c052bd4c0b06 100644
--- a/drivers/video/sa1100fb.c
+++ b/drivers/video/sa1100fb.c
@@ -177,11 +177,11 @@
177#include <linux/dma-mapping.h> 177#include <linux/dma-mapping.h>
178#include <linux/mutex.h> 178#include <linux/mutex.h>
179 179
180#include <asm/hardware.h> 180#include <mach/hardware.h>
181#include <asm/io.h> 181#include <asm/io.h>
182#include <asm/mach-types.h> 182#include <asm/mach-types.h>
183#include <asm/arch/assabet.h> 183#include <mach/assabet.h>
184#include <asm/arch/shannon.h> 184#include <mach/shannon.h>
185 185
186/* 186/*
187 * debugging? 187 * debugging?
diff --git a/drivers/video/vt8623fb.c b/drivers/video/vt8623fb.c
index 536ab11623f0..34aae7a2a62b 100644
--- a/drivers/video/vt8623fb.c
+++ b/drivers/video/vt8623fb.c
@@ -677,13 +677,13 @@ static int __devinit vt8623_pci_probe(struct pci_dev *dev, const struct pci_devi
677 677
678 rc = pci_enable_device(dev); 678 rc = pci_enable_device(dev);
679 if (rc < 0) { 679 if (rc < 0) {
680 dev_err(info->dev, "cannot enable PCI device\n"); 680 dev_err(info->device, "cannot enable PCI device\n");
681 goto err_enable_device; 681 goto err_enable_device;
682 } 682 }
683 683
684 rc = pci_request_regions(dev, "vt8623fb"); 684 rc = pci_request_regions(dev, "vt8623fb");
685 if (rc < 0) { 685 if (rc < 0) {
686 dev_err(info->dev, "cannot reserve framebuffer region\n"); 686 dev_err(info->device, "cannot reserve framebuffer region\n");
687 goto err_request_regions; 687 goto err_request_regions;
688 } 688 }
689 689
@@ -696,14 +696,14 @@ static int __devinit vt8623_pci_probe(struct pci_dev *dev, const struct pci_devi
696 info->screen_base = pci_iomap(dev, 0, 0); 696 info->screen_base = pci_iomap(dev, 0, 0);
697 if (! info->screen_base) { 697 if (! info->screen_base) {
698 rc = -ENOMEM; 698 rc = -ENOMEM;
699 dev_err(info->dev, "iomap for framebuffer failed\n"); 699 dev_err(info->device, "iomap for framebuffer failed\n");
700 goto err_iomap_1; 700 goto err_iomap_1;
701 } 701 }
702 702
703 par->mmio_base = pci_iomap(dev, 1, 0); 703 par->mmio_base = pci_iomap(dev, 1, 0);
704 if (! par->mmio_base) { 704 if (! par->mmio_base) {
705 rc = -ENOMEM; 705 rc = -ENOMEM;
706 dev_err(info->dev, "iomap for MMIO failed\n"); 706 dev_err(info->device, "iomap for MMIO failed\n");
707 goto err_iomap_2; 707 goto err_iomap_2;
708 } 708 }
709 709
@@ -714,7 +714,7 @@ static int __devinit vt8623_pci_probe(struct pci_dev *dev, const struct pci_devi
714 if ((16 <= memsize1) && (memsize1 <= 64) && (memsize1 == memsize2)) 714 if ((16 <= memsize1) && (memsize1 <= 64) && (memsize1 == memsize2))
715 info->screen_size = memsize1 << 20; 715 info->screen_size = memsize1 << 20;
716 else { 716 else {
717 dev_err(info->dev, "memory size detection failed (%x %x), suppose 16 MB\n", memsize1, memsize2); 717 dev_err(info->device, "memory size detection failed (%x %x), suppose 16 MB\n", memsize1, memsize2);
718 info->screen_size = 16 << 20; 718 info->screen_size = 16 << 20;
719 } 719 }
720 720
@@ -731,19 +731,19 @@ static int __devinit vt8623_pci_probe(struct pci_dev *dev, const struct pci_devi
731 rc = fb_find_mode(&(info->var), info, mode_option, NULL, 0, NULL, 8); 731 rc = fb_find_mode(&(info->var), info, mode_option, NULL, 0, NULL, 8);
732 if (! ((rc == 1) || (rc == 2))) { 732 if (! ((rc == 1) || (rc == 2))) {
733 rc = -EINVAL; 733 rc = -EINVAL;
734 dev_err(info->dev, "mode %s not found\n", mode_option); 734 dev_err(info->device, "mode %s not found\n", mode_option);
735 goto err_find_mode; 735 goto err_find_mode;
736 } 736 }
737 737
738 rc = fb_alloc_cmap(&info->cmap, 256, 0); 738 rc = fb_alloc_cmap(&info->cmap, 256, 0);
739 if (rc < 0) { 739 if (rc < 0) {
740 dev_err(info->dev, "cannot allocate colormap\n"); 740 dev_err(info->device, "cannot allocate colormap\n");
741 goto err_alloc_cmap; 741 goto err_alloc_cmap;
742 } 742 }
743 743
744 rc = register_framebuffer(info); 744 rc = register_framebuffer(info);
745 if (rc < 0) { 745 if (rc < 0) {
746 dev_err(info->dev, "cannot register framebugger\n"); 746 dev_err(info->device, "cannot register framebugger\n");
747 goto err_reg_fb; 747 goto err_reg_fb;
748 } 748 }
749 749
@@ -817,7 +817,7 @@ static int vt8623_pci_suspend(struct pci_dev* dev, pm_message_t state)
817 struct fb_info *info = pci_get_drvdata(dev); 817 struct fb_info *info = pci_get_drvdata(dev);
818 struct vt8623fb_info *par = info->par; 818 struct vt8623fb_info *par = info->par;
819 819
820 dev_info(info->dev, "suspend\n"); 820 dev_info(info->device, "suspend\n");
821 821
822 acquire_console_sem(); 822 acquire_console_sem();
823 mutex_lock(&(par->open_lock)); 823 mutex_lock(&(par->open_lock));
@@ -848,16 +848,13 @@ static int vt8623_pci_resume(struct pci_dev* dev)
848 struct fb_info *info = pci_get_drvdata(dev); 848 struct fb_info *info = pci_get_drvdata(dev);
849 struct vt8623fb_info *par = info->par; 849 struct vt8623fb_info *par = info->par;
850 850
851 dev_info(info->dev, "resume\n"); 851 dev_info(info->device, "resume\n");
852 852
853 acquire_console_sem(); 853 acquire_console_sem();
854 mutex_lock(&(par->open_lock)); 854 mutex_lock(&(par->open_lock));
855 855
856 if (par->ref_count == 0) { 856 if (par->ref_count == 0)
857 mutex_unlock(&(par->open_lock)); 857 goto fail;
858 release_console_sem();
859 return 0;
860 }
861 858
862 pci_set_power_state(dev, PCI_D0); 859 pci_set_power_state(dev, PCI_D0);
863 pci_restore_state(dev); 860 pci_restore_state(dev);
@@ -870,8 +867,8 @@ static int vt8623_pci_resume(struct pci_dev* dev)
870 vt8623fb_set_par(info); 867 vt8623fb_set_par(info);
871 fb_set_suspend(info, 0); 868 fb_set_suspend(info, 0);
872 869
873 mutex_unlock(&(par->open_lock));
874fail: 870fail:
871 mutex_unlock(&(par->open_lock));
875 release_console_sem(); 872 release_console_sem();
876 873
877 return 0; 874 return 0;
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 48399e134c0d..32b9fe153641 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -463,7 +463,7 @@ config PC87413_WDT
463 module will be called pc87413_wdt. 463 module will be called pc87413_wdt.
464 464
465 Most people will say N. 465 Most people will say N.
466 466
467config 60XX_WDT 467config 60XX_WDT
468 tristate "SBC-60XX Watchdog Timer" 468 tristate "SBC-60XX Watchdog Timer"
469 depends on X86 469 depends on X86
@@ -695,9 +695,17 @@ config 8xx_WDT
695 tristate "MPC8xx Watchdog Timer" 695 tristate "MPC8xx Watchdog Timer"
696 depends on 8xx 696 depends on 8xx
697 697
698config 83xx_WDT 698config 8xxx_WDT
699 tristate "MPC83xx Watchdog Timer" 699 tristate "MPC8xxx Platform Watchdog Timer"
700 depends on PPC_83xx 700 depends on PPC_8xx || PPC_83xx || PPC_86xx
701 help
702 This driver is for a SoC level watchdog that exists on some
703 Freescale PowerPC processors. So far this driver supports:
704 - MPC8xx watchdogs
705 - MPC83xx watchdogs
706 - MPC86xx watchdogs
707
708 For BookE processors (MPC85xx) use the BOOKE_WDT driver instead.
701 709
702config MV64X60_WDT 710config MV64X60_WDT
703 tristate "MV64X60 (Marvell Discovery) Watchdog Timer" 711 tristate "MV64X60 (Marvell Discovery) Watchdog Timer"
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index edd305a64e63..049c91895699 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -92,7 +92,7 @@ obj-$(CONFIG_SBC_EPX_C3_WATCHDOG) += sbc_epx_c3.o
92 92
93# MIPS Architecture 93# MIPS Architecture
94obj-$(CONFIG_INDYDOG) += indydog.o 94obj-$(CONFIG_INDYDOG) += indydog.o
95obj-$(CONFIG_WDT_MTX1) += mtx-1_wdt.o 95obj-$(CONFIG_WDT_MTX1) += mtx-1_wdt.o
96obj-$(CONFIG_WDT_RM9K_GPI) += rm9k_wdt.o 96obj-$(CONFIG_WDT_RM9K_GPI) += rm9k_wdt.o
97obj-$(CONFIG_SIBYTE_WDOG) += sb_wdog.o 97obj-$(CONFIG_SIBYTE_WDOG) += sb_wdog.o
98obj-$(CONFIG_AR7_WDT) += ar7_wdt.o 98obj-$(CONFIG_AR7_WDT) += ar7_wdt.o
@@ -103,7 +103,7 @@ obj-$(CONFIG_TXX9_WDT) += txx9wdt.o
103# POWERPC Architecture 103# POWERPC Architecture
104obj-$(CONFIG_8xx_WDT) += mpc8xx_wdt.o 104obj-$(CONFIG_8xx_WDT) += mpc8xx_wdt.o
105obj-$(CONFIG_MPC5200_WDT) += mpc5200_wdt.o 105obj-$(CONFIG_MPC5200_WDT) += mpc5200_wdt.o
106obj-$(CONFIG_83xx_WDT) += mpc83xx_wdt.o 106obj-$(CONFIG_8xxx_WDT) += mpc8xxx_wdt.o
107obj-$(CONFIG_MV64X60_WDT) += mv64x60_wdt.o 107obj-$(CONFIG_MV64X60_WDT) += mv64x60_wdt.o
108obj-$(CONFIG_BOOKE_WDT) += booke_wdt.o 108obj-$(CONFIG_BOOKE_WDT) += booke_wdt.o
109 109
diff --git a/drivers/watchdog/acquirewdt.c b/drivers/watchdog/acquirewdt.c
index 85269c365a10..6e46a551395c 100644
--- a/drivers/watchdog/acquirewdt.c
+++ b/drivers/watchdog/acquirewdt.c
@@ -58,39 +58,45 @@
58#include <linux/types.h> /* For standard types (like size_t) */ 58#include <linux/types.h> /* For standard types (like size_t) */
59#include <linux/errno.h> /* For the -ENODEV/... values */ 59#include <linux/errno.h> /* For the -ENODEV/... values */
60#include <linux/kernel.h> /* For printk/panic/... */ 60#include <linux/kernel.h> /* For printk/panic/... */
61#include <linux/miscdevice.h> /* For MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR) */ 61#include <linux/miscdevice.h> /* For MODULE_ALIAS_MISCDEV
62 (WATCHDOG_MINOR) */
62#include <linux/watchdog.h> /* For the watchdog specific items */ 63#include <linux/watchdog.h> /* For the watchdog specific items */
63#include <linux/fs.h> /* For file operations */ 64#include <linux/fs.h> /* For file operations */
64#include <linux/ioport.h> /* For io-port access */ 65#include <linux/ioport.h> /* For io-port access */
65#include <linux/platform_device.h> /* For platform_driver framework */ 66#include <linux/platform_device.h> /* For platform_driver framework */
66#include <linux/init.h> /* For __init/__exit/... */ 67#include <linux/init.h> /* For __init/__exit/... */
67 68#include <linux/uaccess.h> /* For copy_to_user/put_user/... */
68#include <asm/uaccess.h> /* For copy_to_user/put_user/... */ 69#include <linux/io.h> /* For inb/outb/... */
69#include <asm/io.h> /* For inb/outb/... */
70 70
71/* Module information */ 71/* Module information */
72#define DRV_NAME "acquirewdt" 72#define DRV_NAME "acquirewdt"
73#define PFX DRV_NAME ": " 73#define PFX DRV_NAME ": "
74#define WATCHDOG_NAME "Acquire WDT" 74#define WATCHDOG_NAME "Acquire WDT"
75#define WATCHDOG_HEARTBEAT 0 /* There is no way to see what the correct time-out period is */ 75/* There is no way to see what the correct time-out period is */
76#define WATCHDOG_HEARTBEAT 0
76 77
77/* internal variables */ 78/* internal variables */
78static struct platform_device *acq_platform_device; /* the watchdog platform device */ 79/* the watchdog platform device */
80static struct platform_device *acq_platform_device;
79static unsigned long acq_is_open; 81static unsigned long acq_is_open;
80static char expect_close; 82static char expect_close;
81 83
82/* module parameters */ 84/* module parameters */
83static int wdt_stop = 0x43; /* You must set this - there is no sane way to probe for this board. */ 85/* You must set this - there is no sane way to probe for this board. */
86static int wdt_stop = 0x43;
84module_param(wdt_stop, int, 0); 87module_param(wdt_stop, int, 0);
85MODULE_PARM_DESC(wdt_stop, "Acquire WDT 'stop' io port (default 0x43)"); 88MODULE_PARM_DESC(wdt_stop, "Acquire WDT 'stop' io port (default 0x43)");
86 89
87static int wdt_start = 0x443; /* You must set this - there is no sane way to probe for this board. */ 90/* You must set this - there is no sane way to probe for this board. */
91static int wdt_start = 0x443;
88module_param(wdt_start, int, 0); 92module_param(wdt_start, int, 0);
89MODULE_PARM_DESC(wdt_start, "Acquire WDT 'start' io port (default 0x443)"); 93MODULE_PARM_DESC(wdt_start, "Acquire WDT 'start' io port (default 0x443)");
90 94
91static int nowayout = WATCHDOG_NOWAYOUT; 95static int nowayout = WATCHDOG_NOWAYOUT;
92module_param(nowayout, int, 0); 96module_param(nowayout, int, 0);
93MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 97MODULE_PARM_DESC(nowayout,
98 "Watchdog cannot be stopped once started (default="
99 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
94 100
95/* 101/*
96 * Watchdog Operations 102 * Watchdog Operations
@@ -112,18 +118,18 @@ static void acq_stop(void)
112 * /dev/watchdog handling 118 * /dev/watchdog handling
113 */ 119 */
114 120
115static ssize_t acq_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) 121static ssize_t acq_write(struct file *file, const char __user *buf,
122 size_t count, loff_t *ppos)
116{ 123{
117 /* See if we got the magic character 'V' and reload the timer */ 124 /* See if we got the magic character 'V' and reload the timer */
118 if(count) { 125 if (count) {
119 if (!nowayout) { 126 if (!nowayout) {
120 size_t i; 127 size_t i;
121
122 /* note: just in case someone wrote the magic character 128 /* note: just in case someone wrote the magic character
123 * five months ago... */ 129 five months ago... */
124 expect_close = 0; 130 expect_close = 0;
125 131 /* scan to see whether or not we got the
126 /* scan to see whether or not we got the magic character */ 132 magic character */
127 for (i = 0; i != count; i++) { 133 for (i = 0; i != count; i++) {
128 char c; 134 char c;
129 if (get_user(c, buf + i)) 135 if (get_user(c, buf + i))
@@ -132,64 +138,55 @@ static ssize_t acq_write(struct file *file, const char __user *buf, size_t count
132 expect_close = 42; 138 expect_close = 42;
133 } 139 }
134 } 140 }
135 141 /* Well, anyhow someone wrote to us, we should
136 /* Well, anyhow someone wrote to us, we should return that favour */ 142 return that favour */
137 acq_keepalive(); 143 acq_keepalive();
138 } 144 }
139 return count; 145 return count;
140} 146}
141 147
142static int acq_ioctl(struct inode *inode, struct file *file, unsigned int cmd, 148static long acq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
143 unsigned long arg)
144{ 149{
145 int options, retval = -EINVAL; 150 int options, retval = -EINVAL;
146 void __user *argp = (void __user *)arg; 151 void __user *argp = (void __user *)arg;
147 int __user *p = argp; 152 int __user *p = argp;
148 static struct watchdog_info ident = 153 static struct watchdog_info ident = {
149 {
150 .options = WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, 154 .options = WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
151 .firmware_version = 1, 155 .firmware_version = 1,
152 .identity = WATCHDOG_NAME, 156 .identity = WATCHDOG_NAME,
153 }; 157 };
154 158
155 switch(cmd) 159 switch (cmd) {
156 {
157 case WDIOC_GETSUPPORT: 160 case WDIOC_GETSUPPORT:
158 return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0; 161 return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
159 162
160 case WDIOC_GETSTATUS: 163 case WDIOC_GETSTATUS:
161 case WDIOC_GETBOOTSTATUS: 164 case WDIOC_GETBOOTSTATUS:
162 return put_user(0, p); 165 return put_user(0, p);
163
164 case WDIOC_KEEPALIVE:
165 acq_keepalive();
166 return 0;
167
168 case WDIOC_GETTIMEOUT:
169 return put_user(WATCHDOG_HEARTBEAT, p);
170 166
171 case WDIOC_SETOPTIONS: 167 case WDIOC_SETOPTIONS:
172 { 168 {
173 if (get_user(options, p)) 169 if (get_user(options, p))
174 return -EFAULT; 170 return -EFAULT;
175 171 if (options & WDIOS_DISABLECARD) {
176 if (options & WDIOS_DISABLECARD) 172 acq_stop();
177 { 173 retval = 0;
178 acq_stop(); 174 }
179 retval = 0; 175 if (options & WDIOS_ENABLECARD) {
180 } 176 acq_keepalive();
181 177 retval = 0;
182 if (options & WDIOS_ENABLECARD) 178 }
183 { 179 return retval;
184 acq_keepalive();
185 retval = 0;
186 }
187
188 return retval;
189 } 180 }
181 case WDIOC_KEEPALIVE:
182 acq_keepalive();
183 return 0;
184
185 case WDIOC_GETTIMEOUT:
186 return put_user(WATCHDOG_HEARTBEAT, p);
190 187
191 default: 188 default:
192 return -ENOTTY; 189 return -ENOTTY;
193 } 190 }
194} 191}
195 192
@@ -211,7 +208,8 @@ static int acq_close(struct inode *inode, struct file *file)
211 if (expect_close == 42) { 208 if (expect_close == 42) {
212 acq_stop(); 209 acq_stop();
213 } else { 210 } else {
214 printk(KERN_CRIT PFX "Unexpected close, not stopping watchdog!\n"); 211 printk(KERN_CRIT PFX
212 "Unexpected close, not stopping watchdog!\n");
215 acq_keepalive(); 213 acq_keepalive();
216 } 214 }
217 clear_bit(0, &acq_is_open); 215 clear_bit(0, &acq_is_open);
@@ -227,7 +225,7 @@ static const struct file_operations acq_fops = {
227 .owner = THIS_MODULE, 225 .owner = THIS_MODULE,
228 .llseek = no_llseek, 226 .llseek = no_llseek,
229 .write = acq_write, 227 .write = acq_write,
230 .ioctl = acq_ioctl, 228 .unlocked_ioctl = acq_ioctl,
231 .open = acq_open, 229 .open = acq_open,
232 .release = acq_close, 230 .release = acq_close,
233}; 231};
@@ -248,32 +246,29 @@ static int __devinit acq_probe(struct platform_device *dev)
248 246
249 if (wdt_stop != wdt_start) { 247 if (wdt_stop != wdt_start) {
250 if (!request_region(wdt_stop, 1, WATCHDOG_NAME)) { 248 if (!request_region(wdt_stop, 1, WATCHDOG_NAME)) {
251 printk (KERN_ERR PFX "I/O address 0x%04x already in use\n", 249 printk(KERN_ERR PFX
252 wdt_stop); 250 "I/O address 0x%04x already in use\n", wdt_stop);
253 ret = -EIO; 251 ret = -EIO;
254 goto out; 252 goto out;
255 } 253 }
256 } 254 }
257 255
258 if (!request_region(wdt_start, 1, WATCHDOG_NAME)) { 256 if (!request_region(wdt_start, 1, WATCHDOG_NAME)) {
259 printk (KERN_ERR PFX "I/O address 0x%04x already in use\n", 257 printk(KERN_ERR PFX "I/O address 0x%04x already in use\n",
260 wdt_start); 258 wdt_start);
261 ret = -EIO; 259 ret = -EIO;
262 goto unreg_stop; 260 goto unreg_stop;
263 } 261 }
264
265 ret = misc_register(&acq_miscdev); 262 ret = misc_register(&acq_miscdev);
266 if (ret != 0) { 263 if (ret != 0) {
267 printk (KERN_ERR PFX "cannot register miscdev on minor=%d (err=%d)\n", 264 printk(KERN_ERR PFX
268 WATCHDOG_MINOR, ret); 265 "cannot register miscdev on minor=%d (err=%d)\n",
266 WATCHDOG_MINOR, ret);
269 goto unreg_regions; 267 goto unreg_regions;
270 } 268 }
271 269 printk(KERN_INFO PFX "initialized. (nowayout=%d)\n", nowayout);
272 printk (KERN_INFO PFX "initialized. (nowayout=%d)\n",
273 nowayout);
274 270
275 return 0; 271 return 0;
276
277unreg_regions: 272unreg_regions:
278 release_region(wdt_start, 1); 273 release_region(wdt_start, 1);
279unreg_stop: 274unreg_stop:
@@ -286,9 +281,9 @@ out:
286static int __devexit acq_remove(struct platform_device *dev) 281static int __devexit acq_remove(struct platform_device *dev)
287{ 282{
288 misc_deregister(&acq_miscdev); 283 misc_deregister(&acq_miscdev);
289 release_region(wdt_start,1); 284 release_region(wdt_start, 1);
290 if(wdt_stop != wdt_start) 285 if (wdt_stop != wdt_start)
291 release_region(wdt_stop,1); 286 release_region(wdt_stop, 1);
292 287
293 return 0; 288 return 0;
294} 289}
@@ -313,18 +308,19 @@ static int __init acq_init(void)
313{ 308{
314 int err; 309 int err;
315 310
316 printk(KERN_INFO "WDT driver for Acquire single board computer initialising.\n"); 311 printk(KERN_INFO
312 "WDT driver for Acquire single board computer initialising.\n");
317 313
318 err = platform_driver_register(&acquirewdt_driver); 314 err = platform_driver_register(&acquirewdt_driver);
319 if (err) 315 if (err)
320 return err; 316 return err;
321 317
322 acq_platform_device = platform_device_register_simple(DRV_NAME, -1, NULL, 0); 318 acq_platform_device = platform_device_register_simple(DRV_NAME,
319 -1, NULL, 0);
323 if (IS_ERR(acq_platform_device)) { 320 if (IS_ERR(acq_platform_device)) {
324 err = PTR_ERR(acq_platform_device); 321 err = PTR_ERR(acq_platform_device);
325 goto unreg_platform_driver; 322 goto unreg_platform_driver;
326 } 323 }
327
328 return 0; 324 return 0;
329 325
330unreg_platform_driver: 326unreg_platform_driver:
diff --git a/drivers/watchdog/advantechwdt.c b/drivers/watchdog/advantechwdt.c
index 8121cc247343..a5110f93a755 100644
--- a/drivers/watchdog/advantechwdt.c
+++ b/drivers/watchdog/advantechwdt.c
@@ -37,9 +37,9 @@
37#include <linux/ioport.h> 37#include <linux/ioport.h>
38#include <linux/platform_device.h> 38#include <linux/platform_device.h>
39#include <linux/init.h> 39#include <linux/init.h>
40#include <linux/io.h>
41#include <linux/uaccess.h>
40 42
41#include <asm/io.h>
42#include <asm/uaccess.h>
43#include <asm/system.h> 43#include <asm/system.h>
44 44
45#define DRV_NAME "advantechwdt" 45#define DRV_NAME "advantechwdt"
@@ -47,7 +47,8 @@
47#define WATCHDOG_NAME "Advantech WDT" 47#define WATCHDOG_NAME "Advantech WDT"
48#define WATCHDOG_TIMEOUT 60 /* 60 sec default timeout */ 48#define WATCHDOG_TIMEOUT 60 /* 60 sec default timeout */
49 49
50static struct platform_device *advwdt_platform_device; /* the watchdog platform device */ 50/* the watchdog platform device */
51static struct platform_device *advwdt_platform_device;
51static unsigned long advwdt_is_open; 52static unsigned long advwdt_is_open;
52static char adv_expect_close; 53static char adv_expect_close;
53 54
@@ -72,35 +73,35 @@ MODULE_PARM_DESC(wdt_start, "Advantech WDT 'start' io port (default 0x443)");
72 73
73static int timeout = WATCHDOG_TIMEOUT; /* in seconds */ 74static int timeout = WATCHDOG_TIMEOUT; /* in seconds */
74module_param(timeout, int, 0); 75module_param(timeout, int, 0);
75MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds. 1<= timeout <=63, default=" __MODULE_STRING(WATCHDOG_TIMEOUT) "."); 76MODULE_PARM_DESC(timeout,
77 "Watchdog timeout in seconds. 1<= timeout <=63, default="
78 __MODULE_STRING(WATCHDOG_TIMEOUT) ".");
76 79
77static int nowayout = WATCHDOG_NOWAYOUT; 80static int nowayout = WATCHDOG_NOWAYOUT;
78module_param(nowayout, int, 0); 81module_param(nowayout, int, 0);
79MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 82MODULE_PARM_DESC(nowayout,
83 "Watchdog cannot be stopped once started (default="
84 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
80 85
81/* 86/*
82 * Watchdog Operations 87 * Watchdog Operations
83 */ 88 */
84 89
85static void 90static void advwdt_ping(void)
86advwdt_ping(void)
87{ 91{
88 /* Write a watchdog value */ 92 /* Write a watchdog value */
89 outb_p(timeout, wdt_start); 93 outb_p(timeout, wdt_start);
90} 94}
91 95
92static void 96static void advwdt_disable(void)
93advwdt_disable(void)
94{ 97{
95 inb_p(wdt_stop); 98 inb_p(wdt_stop);
96} 99}
97 100
98static int 101static int advwdt_set_heartbeat(int t)
99advwdt_set_heartbeat(int t)
100{ 102{
101 if ((t < 1) || (t > 63)) 103 if (t < 1 || t > 63)
102 return -EINVAL; 104 return -EINVAL;
103
104 timeout = t; 105 timeout = t;
105 return 0; 106 return 0;
106} 107}
@@ -109,8 +110,8 @@ advwdt_set_heartbeat(int t)
109 * /dev/watchdog handling 110 * /dev/watchdog handling
110 */ 111 */
111 112
112static ssize_t 113static ssize_t advwdt_write(struct file *file, const char __user *buf,
113advwdt_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) 114 size_t count, loff_t *ppos)
114{ 115{
115 if (count) { 116 if (count) {
116 if (!nowayout) { 117 if (!nowayout) {
@@ -120,7 +121,7 @@ advwdt_write(struct file *file, const char __user *buf, size_t count, loff_t *pp
120 121
121 for (i = 0; i != count; i++) { 122 for (i = 0; i != count; i++) {
122 char c; 123 char c;
123 if (get_user(c, buf+i)) 124 if (get_user(c, buf + i))
124 return -EFAULT; 125 return -EFAULT;
125 if (c == 'V') 126 if (c == 'V')
126 adv_expect_close = 42; 127 adv_expect_close = 42;
@@ -131,9 +132,7 @@ advwdt_write(struct file *file, const char __user *buf, size_t count, loff_t *pp
131 return count; 132 return count;
132} 133}
133 134
134static int 135static long advwdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
135advwdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
136 unsigned long arg)
137{ 136{
138 int new_timeout; 137 int new_timeout;
139 void __user *argp = (void __user *)arg; 138 void __user *argp = (void __user *)arg;
@@ -146,57 +145,50 @@ advwdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
146 145
147 switch (cmd) { 146 switch (cmd) {
148 case WDIOC_GETSUPPORT: 147 case WDIOC_GETSUPPORT:
149 if (copy_to_user(argp, &ident, sizeof(ident))) 148 if (copy_to_user(argp, &ident, sizeof(ident)))
150 return -EFAULT; 149 return -EFAULT;
151 break; 150 break;
152 151
153 case WDIOC_GETSTATUS: 152 case WDIOC_GETSTATUS:
154 case WDIOC_GETBOOTSTATUS: 153 case WDIOC_GETBOOTSTATUS:
155 return put_user(0, p); 154 return put_user(0, p);
156
157 case WDIOC_KEEPALIVE:
158 advwdt_ping();
159 break;
160
161 case WDIOC_SETTIMEOUT:
162 if (get_user(new_timeout, p))
163 return -EFAULT;
164 if (advwdt_set_heartbeat(new_timeout))
165 return -EINVAL;
166 advwdt_ping();
167 /* Fall */
168
169 case WDIOC_GETTIMEOUT:
170 return put_user(timeout, p);
171 155
172 case WDIOC_SETOPTIONS: 156 case WDIOC_SETOPTIONS:
173 { 157 {
174 int options, retval = -EINVAL; 158 int options, retval = -EINVAL;
175
176 if (get_user(options, p))
177 return -EFAULT;
178
179 if (options & WDIOS_DISABLECARD) {
180 advwdt_disable();
181 retval = 0;
182 }
183 159
184 if (options & WDIOS_ENABLECARD) { 160 if (get_user(options, p))
185 advwdt_ping(); 161 return -EFAULT;
186 retval = 0; 162 if (options & WDIOS_DISABLECARD) {
187 } 163 advwdt_disable();
188 164 retval = 0;
189 return retval; 165 }
166 if (options & WDIOS_ENABLECARD) {
167 advwdt_ping();
168 retval = 0;
169 }
170 return retval;
190 } 171 }
172 case WDIOC_KEEPALIVE:
173 advwdt_ping();
174 break;
191 175
176 case WDIOC_SETTIMEOUT:
177 if (get_user(new_timeout, p))
178 return -EFAULT;
179 if (advwdt_set_heartbeat(new_timeout))
180 return -EINVAL;
181 advwdt_ping();
182 /* Fall */
183 case WDIOC_GETTIMEOUT:
184 return put_user(timeout, p);
192 default: 185 default:
193 return -ENOTTY; 186 return -ENOTTY;
194 } 187 }
195 return 0; 188 return 0;
196} 189}
197 190
198static int 191static int advwdt_open(struct inode *inode, struct file *file)
199advwdt_open(struct inode *inode, struct file *file)
200{ 192{
201 if (test_and_set_bit(0, &advwdt_is_open)) 193 if (test_and_set_bit(0, &advwdt_is_open))
202 return -EBUSY; 194 return -EBUSY;
@@ -208,13 +200,13 @@ advwdt_open(struct inode *inode, struct file *file)
208 return nonseekable_open(inode, file); 200 return nonseekable_open(inode, file);
209} 201}
210 202
211static int 203static int advwdt_close(struct inode *inode, struct file *file)
212advwdt_close(struct inode *inode, struct file *file)
213{ 204{
214 if (adv_expect_close == 42) { 205 if (adv_expect_close == 42) {
215 advwdt_disable(); 206 advwdt_disable();
216 } else { 207 } else {
217 printk(KERN_CRIT PFX "Unexpected close, not stopping watchdog!\n"); 208 printk(KERN_CRIT PFX
209 "Unexpected close, not stopping watchdog!\n");
218 advwdt_ping(); 210 advwdt_ping();
219 } 211 }
220 clear_bit(0, &advwdt_is_open); 212 clear_bit(0, &advwdt_is_open);
@@ -230,7 +222,7 @@ static const struct file_operations advwdt_fops = {
230 .owner = THIS_MODULE, 222 .owner = THIS_MODULE,
231 .llseek = no_llseek, 223 .llseek = no_llseek,
232 .write = advwdt_write, 224 .write = advwdt_write,
233 .ioctl = advwdt_ioctl, 225 .unlocked_ioctl = advwdt_ioctl,
234 .open = advwdt_open, 226 .open = advwdt_open,
235 .release = advwdt_close, 227 .release = advwdt_close,
236}; 228};
@@ -245,23 +237,24 @@ static struct miscdevice advwdt_miscdev = {
245 * Init & exit routines 237 * Init & exit routines
246 */ 238 */
247 239
248static int __devinit 240static int __devinit advwdt_probe(struct platform_device *dev)
249advwdt_probe(struct platform_device *dev)
250{ 241{
251 int ret; 242 int ret;
252 243
253 if (wdt_stop != wdt_start) { 244 if (wdt_stop != wdt_start) {
254 if (!request_region(wdt_stop, 1, WATCHDOG_NAME)) { 245 if (!request_region(wdt_stop, 1, WATCHDOG_NAME)) {
255 printk (KERN_ERR PFX "I/O address 0x%04x already in use\n", 246 printk(KERN_ERR PFX
256 wdt_stop); 247 "I/O address 0x%04x already in use\n",
248 wdt_stop);
257 ret = -EIO; 249 ret = -EIO;
258 goto out; 250 goto out;
259 } 251 }
260 } 252 }
261 253
262 if (!request_region(wdt_start, 1, WATCHDOG_NAME)) { 254 if (!request_region(wdt_start, 1, WATCHDOG_NAME)) {
263 printk (KERN_ERR PFX "I/O address 0x%04x already in use\n", 255 printk(KERN_ERR PFX
264 wdt_start); 256 "I/O address 0x%04x already in use\n",
257 wdt_start);
265 ret = -EIO; 258 ret = -EIO;
266 goto unreg_stop; 259 goto unreg_stop;
267 } 260 }
@@ -269,20 +262,19 @@ advwdt_probe(struct platform_device *dev)
269 /* Check that the heartbeat value is within it's range ; if not reset to the default */ 262 /* Check that the heartbeat value is within it's range ; if not reset to the default */
270 if (advwdt_set_heartbeat(timeout)) { 263 if (advwdt_set_heartbeat(timeout)) {
271 advwdt_set_heartbeat(WATCHDOG_TIMEOUT); 264 advwdt_set_heartbeat(WATCHDOG_TIMEOUT);
272 printk (KERN_INFO PFX "timeout value must be 1<=x<=63, using %d\n", 265 printk(KERN_INFO PFX
273 timeout); 266 "timeout value must be 1<=x<=63, using %d\n", timeout);
274 } 267 }
275 268
276 ret = misc_register(&advwdt_miscdev); 269 ret = misc_register(&advwdt_miscdev);
277 if (ret != 0) { 270 if (ret != 0) {
278 printk (KERN_ERR PFX "cannot register miscdev on minor=%d (err=%d)\n", 271 printk(KERN_ERR PFX
279 WATCHDOG_MINOR, ret); 272 "cannot register miscdev on minor=%d (err=%d)\n",
273 WATCHDOG_MINOR, ret);
280 goto unreg_regions; 274 goto unreg_regions;
281 } 275 }
282 276 printk(KERN_INFO PFX "initialized. timeout=%d sec (nowayout=%d)\n",
283 printk (KERN_INFO PFX "initialized. timeout=%d sec (nowayout=%d)\n",
284 timeout, nowayout); 277 timeout, nowayout);
285
286out: 278out:
287 return ret; 279 return ret;
288unreg_regions: 280unreg_regions:
@@ -293,19 +285,17 @@ unreg_stop:
293 goto out; 285 goto out;
294} 286}
295 287
296static int __devexit 288static int __devexit advwdt_remove(struct platform_device *dev)
297advwdt_remove(struct platform_device *dev)
298{ 289{
299 misc_deregister(&advwdt_miscdev); 290 misc_deregister(&advwdt_miscdev);
300 release_region(wdt_start,1); 291 release_region(wdt_start, 1);
301 if(wdt_stop != wdt_start) 292 if (wdt_stop != wdt_start)
302 release_region(wdt_stop,1); 293 release_region(wdt_stop, 1);
303 294
304 return 0; 295 return 0;
305} 296}
306 297
307static void 298static void advwdt_shutdown(struct platform_device *dev)
308advwdt_shutdown(struct platform_device *dev)
309{ 299{
310 /* Turn the WDT off if we have a soft shutdown */ 300 /* Turn the WDT off if we have a soft shutdown */
311 advwdt_disable(); 301 advwdt_disable();
@@ -321,18 +311,19 @@ static struct platform_driver advwdt_driver = {
321 }, 311 },
322}; 312};
323 313
324static int __init 314static int __init advwdt_init(void)
325advwdt_init(void)
326{ 315{
327 int err; 316 int err;
328 317
329 printk(KERN_INFO "WDT driver for Advantech single board computer initialising.\n"); 318 printk(KERN_INFO
319 "WDT driver for Advantech single board computer initialising.\n");
330 320
331 err = platform_driver_register(&advwdt_driver); 321 err = platform_driver_register(&advwdt_driver);
332 if (err) 322 if (err)
333 return err; 323 return err;
334 324
335 advwdt_platform_device = platform_device_register_simple(DRV_NAME, -1, NULL, 0); 325 advwdt_platform_device = platform_device_register_simple(DRV_NAME,
326 -1, NULL, 0);
336 if (IS_ERR(advwdt_platform_device)) { 327 if (IS_ERR(advwdt_platform_device)) {
337 err = PTR_ERR(advwdt_platform_device); 328 err = PTR_ERR(advwdt_platform_device);
338 goto unreg_platform_driver; 329 goto unreg_platform_driver;
@@ -345,8 +336,7 @@ unreg_platform_driver:
345 return err; 336 return err;
346} 337}
347 338
348static void __exit 339static void __exit advwdt_exit(void)
349advwdt_exit(void)
350{ 340{
351 platform_device_unregister(advwdt_platform_device); 341 platform_device_unregister(advwdt_platform_device);
352 platform_driver_unregister(&advwdt_driver); 342 platform_driver_unregister(&advwdt_driver);
diff --git a/drivers/watchdog/alim1535_wdt.c b/drivers/watchdog/alim1535_wdt.c
index 2b1fbdb2fcf7..2a7690ecf97d 100644
--- a/drivers/watchdog/alim1535_wdt.c
+++ b/drivers/watchdog/alim1535_wdt.c
@@ -18,9 +18,8 @@
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/fs.h> 19#include <linux/fs.h>
20#include <linux/pci.h> 20#include <linux/pci.h>
21 21#include <linux/uaccess.h>
22#include <asm/uaccess.h> 22#include <linux/io.h>
23#include <asm/io.h>
24 23
25#define WATCHDOG_NAME "ALi_M1535" 24#define WATCHDOG_NAME "ALi_M1535"
26#define PFX WATCHDOG_NAME ": " 25#define PFX WATCHDOG_NAME ": "
@@ -30,17 +29,21 @@
30static unsigned long ali_is_open; 29static unsigned long ali_is_open;
31static char ali_expect_release; 30static char ali_expect_release;
32static struct pci_dev *ali_pci; 31static struct pci_dev *ali_pci;
33static u32 ali_timeout_bits; /* stores the computed timeout */ 32static u32 ali_timeout_bits; /* stores the computed timeout */
34static DEFINE_SPINLOCK(ali_lock); /* Guards the hardware */ 33static DEFINE_SPINLOCK(ali_lock); /* Guards the hardware */
35 34
36/* module parameters */ 35/* module parameters */
37static int timeout = WATCHDOG_TIMEOUT; 36static int timeout = WATCHDOG_TIMEOUT;
38module_param(timeout, int, 0); 37module_param(timeout, int, 0);
39MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds. (0<timeout<18000, default=" __MODULE_STRING(WATCHDOG_TIMEOUT) ")"); 38MODULE_PARM_DESC(timeout,
39 "Watchdog timeout in seconds. (0 < timeout < 18000, default="
40 __MODULE_STRING(WATCHDOG_TIMEOUT) ")");
40 41
41static int nowayout = WATCHDOG_NOWAYOUT; 42static int nowayout = WATCHDOG_NOWAYOUT;
42module_param(nowayout, int, 0); 43module_param(nowayout, int, 0);
43MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 44MODULE_PARM_DESC(nowayout,
45 "Watchdog cannot be stopped once started (default="
46 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
44 47
45/* 48/*
46 * ali_start - start watchdog countdown 49 * ali_start - start watchdog countdown
@@ -103,15 +106,16 @@ static void ali_keepalive(void)
103 106
104static int ali_settimer(int t) 107static int ali_settimer(int t)
105{ 108{
106 if(t < 0) 109 if (t < 0)
107 return -EINVAL; 110 return -EINVAL;
108 else if(t < 60) 111 else if (t < 60)
109 ali_timeout_bits = t|(1<<6); 112 ali_timeout_bits = t|(1<<6);
110 else if(t < 3600) 113 else if (t < 3600)
111 ali_timeout_bits = (t/60)|(1<<7); 114 ali_timeout_bits = (t/60)|(1<<7);
112 else if(t < 18000) 115 else if (t < 18000)
113 ali_timeout_bits = (t/300)|(1<<6)|(1<<7); 116 ali_timeout_bits = (t/300)|(1<<6)|(1<<7);
114 else return -EINVAL; 117 else
118 return -EINVAL;
115 119
116 timeout = t; 120 timeout = t;
117 return 0; 121 return 0;
@@ -134,21 +138,22 @@ static int ali_settimer(int t)
134 */ 138 */
135 139
136static ssize_t ali_write(struct file *file, const char __user *data, 140static ssize_t ali_write(struct file *file, const char __user *data,
137 size_t len, loff_t * ppos) 141 size_t len, loff_t *ppos)
138{ 142{
139 /* See if we got the magic character 'V' and reload the timer */ 143 /* See if we got the magic character 'V' and reload the timer */
140 if (len) { 144 if (len) {
141 if (!nowayout) { 145 if (!nowayout) {
142 size_t i; 146 size_t i;
143 147
144 /* note: just in case someone wrote the magic character 148 /* note: just in case someone wrote the
145 * five months ago... */ 149 magic character five months ago... */
146 ali_expect_release = 0; 150 ali_expect_release = 0;
147 151
148 /* scan to see whether or not we got the magic character */ 152 /* scan to see whether or not we got
153 the magic character */
149 for (i = 0; i != len; i++) { 154 for (i = 0; i != len; i++) {
150 char c; 155 char c;
151 if(get_user(c, data+i)) 156 if (get_user(c, data + i))
152 return -EFAULT; 157 return -EFAULT;
153 if (c == 'V') 158 if (c == 'V')
154 ali_expect_release = 42; 159 ali_expect_release = 42;
@@ -163,7 +168,6 @@ static ssize_t ali_write(struct file *file, const char __user *data,
163 168
164/* 169/*
165 * ali_ioctl - handle watchdog ioctls 170 * ali_ioctl - handle watchdog ioctls
166 * @inode: VFS inode
167 * @file: VFS file pointer 171 * @file: VFS file pointer
168 * @cmd: ioctl number 172 * @cmd: ioctl number
169 * @arg: arguments to the ioctl 173 * @arg: arguments to the ioctl
@@ -172,8 +176,7 @@ static ssize_t ali_write(struct file *file, const char __user *data,
172 * we want an extension to enable irq ack monitoring and the like 176 * we want an extension to enable irq ack monitoring and the like
173 */ 177 */
174 178
175static int ali_ioctl(struct inode *inode, struct file *file, 179static long ali_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
176 unsigned int cmd, unsigned long arg)
177{ 180{
178 void __user *argp = (void __user *)arg; 181 void __user *argp = (void __user *)arg;
179 int __user *p = argp; 182 int __user *p = argp;
@@ -186,57 +189,45 @@ static int ali_ioctl(struct inode *inode, struct file *file,
186 }; 189 };
187 190
188 switch (cmd) { 191 switch (cmd) {
189 case WDIOC_GETSUPPORT: 192 case WDIOC_GETSUPPORT:
190 return copy_to_user(argp, &ident, 193 return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
191 sizeof (ident)) ? -EFAULT : 0; 194
192 195 case WDIOC_GETSTATUS:
193 case WDIOC_GETSTATUS: 196 case WDIOC_GETBOOTSTATUS:
194 case WDIOC_GETBOOTSTATUS: 197 return put_user(0, p);
195 return put_user(0, p); 198 case WDIOC_SETOPTIONS:
196 199 {
197 case WDIOC_KEEPALIVE: 200 int new_options, retval = -EINVAL;
198 ali_keepalive(); 201
199 return 0; 202 if (get_user(new_options, p))
200 203 return -EFAULT;
201 case WDIOC_SETOPTIONS: 204 if (new_options & WDIOS_DISABLECARD) {
202 { 205 ali_stop();
203 int new_options, retval = -EINVAL; 206 retval = 0;
204
205 if (get_user (new_options, p))
206 return -EFAULT;
207
208 if (new_options & WDIOS_DISABLECARD) {
209 ali_stop();
210 retval = 0;
211 }
212
213 if (new_options & WDIOS_ENABLECARD) {
214 ali_start();
215 retval = 0;
216 }
217
218 return retval;
219 } 207 }
220 208 if (new_options & WDIOS_ENABLECARD) {
221 case WDIOC_SETTIMEOUT: 209 ali_start();
222 { 210 retval = 0;
223 int new_timeout;
224
225 if (get_user(new_timeout, p))
226 return -EFAULT;
227
228 if (ali_settimer(new_timeout))
229 return -EINVAL;
230
231 ali_keepalive();
232 /* Fall */
233 } 211 }
234 212 return retval;
235 case WDIOC_GETTIMEOUT: 213 }
236 return put_user(timeout, p); 214 case WDIOC_KEEPALIVE:
237 215 ali_keepalive();
238 default: 216 return 0;
239 return -ENOTTY; 217 case WDIOC_SETTIMEOUT:
218 {
219 int new_timeout;
220 if (get_user(new_timeout, p))
221 return -EFAULT;
222 if (ali_settimer(new_timeout))
223 return -EINVAL;
224 ali_keepalive();
225 /* Fall */
226 }
227 case WDIOC_GETTIMEOUT:
228 return put_user(timeout, p);
229 default:
230 return -ENOTTY;
240 } 231 }
241} 232}
242 233
@@ -274,10 +265,11 @@ static int ali_release(struct inode *inode, struct file *file)
274 /* 265 /*
275 * Shut off the timer. 266 * Shut off the timer.
276 */ 267 */
277 if (ali_expect_release == 42) { 268 if (ali_expect_release == 42)
278 ali_stop(); 269 ali_stop();
279 } else { 270 else {
280 printk(KERN_CRIT PFX "Unexpected close, not stopping watchdog!\n"); 271 printk(KERN_CRIT PFX
272 "Unexpected close, not stopping watchdog!\n");
281 ali_keepalive(); 273 ali_keepalive();
282 } 274 }
283 clear_bit(0, &ali_is_open); 275 clear_bit(0, &ali_is_open);
@@ -292,13 +284,11 @@ static int ali_release(struct inode *inode, struct file *file)
292 */ 284 */
293 285
294 286
295static int ali_notify_sys(struct notifier_block *this, unsigned long code, void *unused) 287static int ali_notify_sys(struct notifier_block *this,
288 unsigned long code, void *unused)
296{ 289{
297 if (code==SYS_DOWN || code==SYS_HALT) { 290 if (code == SYS_DOWN || code == SYS_HALT)
298 /* Turn the WDT off */ 291 ali_stop(); /* Turn the WDT off */
299 ali_stop();
300 }
301
302 return NOTIFY_DONE; 292 return NOTIFY_DONE;
303} 293}
304 294
@@ -340,10 +330,10 @@ static int __init ali_find_watchdog(void)
340 330
341 /* Check for the a 7101 PMU */ 331 /* Check for the a 7101 PMU */
342 pdev = pci_get_device(PCI_VENDOR_ID_AL, 0x7101, NULL); 332 pdev = pci_get_device(PCI_VENDOR_ID_AL, 0x7101, NULL);
343 if(pdev == NULL) 333 if (pdev == NULL)
344 return -ENODEV; 334 return -ENODEV;
345 335
346 if(pci_enable_device(pdev)) { 336 if (pci_enable_device(pdev)) {
347 pci_dev_put(pdev); 337 pci_dev_put(pdev);
348 return -EIO; 338 return -EIO;
349 } 339 }
@@ -355,9 +345,12 @@ static int __init ali_find_watchdog(void)
355 */ 345 */
356 pci_read_config_dword(pdev, 0xCC, &wdog); 346 pci_read_config_dword(pdev, 0xCC, &wdog);
357 347
358 wdog &= ~0x3F; /* Timer bits */ 348 /* Timer bits */
359 wdog &= ~((1<<27)|(1<<26)|(1<<25)|(1<<24)); /* Issued events */ 349 wdog &= ~0x3F;
360 wdog &= ~((1<<16)|(1<<13)|(1<<12)|(1<<11)|(1<<10)|(1<<9)); /* No monitor bits */ 350 /* Issued events */
351 wdog &= ~((1<<27)|(1<<26)|(1<<25)|(1<<24));
352 /* No monitor bits */
353 wdog &= ~((1<<16)|(1<<13)|(1<<12)|(1<<11)|(1<<10)|(1<<9));
361 354
362 pci_write_config_dword(pdev, 0xCC, wdog); 355 pci_write_config_dword(pdev, 0xCC, wdog);
363 356
@@ -369,12 +362,12 @@ static int __init ali_find_watchdog(void)
369 */ 362 */
370 363
371static const struct file_operations ali_fops = { 364static const struct file_operations ali_fops = {
372 .owner = THIS_MODULE, 365 .owner = THIS_MODULE,
373 .llseek = no_llseek, 366 .llseek = no_llseek,
374 .write = ali_write, 367 .write = ali_write,
375 .ioctl = ali_ioctl, 368 .unlocked_ioctl = ali_ioctl,
376 .open = ali_open, 369 .open = ali_open,
377 .release = ali_release, 370 .release = ali_release,
378}; 371};
379 372
380static struct miscdevice ali_miscdev = { 373static struct miscdevice ali_miscdev = {
@@ -399,15 +392,16 @@ static int __init watchdog_init(void)
399 int ret; 392 int ret;
400 393
401 /* Check whether or not the hardware watchdog is there */ 394 /* Check whether or not the hardware watchdog is there */
402 if (ali_find_watchdog() != 0) { 395 if (ali_find_watchdog() != 0)
403 return -ENODEV; 396 return -ENODEV;
404 }
405 397
406 /* Check that the timeout value is within it's range ; if not reset to the default */ 398 /* Check that the timeout value is within it's range;
399 if not reset to the default */
407 if (timeout < 1 || timeout >= 18000) { 400 if (timeout < 1 || timeout >= 18000) {
408 timeout = WATCHDOG_TIMEOUT; 401 timeout = WATCHDOG_TIMEOUT;
409 printk(KERN_INFO PFX "timeout value must be 0<timeout<18000, using %d\n", 402 printk(KERN_INFO PFX
410 timeout); 403 "timeout value must be 0 < timeout < 18000, using %d\n",
404 timeout);
411 } 405 }
412 406
413 /* Calculate the watchdog's timeout */ 407 /* Calculate the watchdog's timeout */
@@ -415,15 +409,16 @@ static int __init watchdog_init(void)
415 409
416 ret = register_reboot_notifier(&ali_notifier); 410 ret = register_reboot_notifier(&ali_notifier);
417 if (ret != 0) { 411 if (ret != 0) {
418 printk(KERN_ERR PFX "cannot register reboot notifier (err=%d)\n", 412 printk(KERN_ERR PFX
419 ret); 413 "cannot register reboot notifier (err=%d)\n", ret);
420 goto out; 414 goto out;
421 } 415 }
422 416
423 ret = misc_register(&ali_miscdev); 417 ret = misc_register(&ali_miscdev);
424 if (ret != 0) { 418 if (ret != 0) {
425 printk(KERN_ERR PFX "cannot register miscdev on minor=%d (err=%d)\n", 419 printk(KERN_ERR PFX
426 WATCHDOG_MINOR, ret); 420 "cannot register miscdev on minor=%d (err=%d)\n",
421 WATCHDOG_MINOR, ret);
427 goto unreg_reboot; 422 goto unreg_reboot;
428 } 423 }
429 424
diff --git a/drivers/watchdog/alim7101_wdt.c b/drivers/watchdog/alim7101_wdt.c
index 238273c98656..a045ef869439 100644
--- a/drivers/watchdog/alim7101_wdt.c
+++ b/drivers/watchdog/alim7101_wdt.c
@@ -31,9 +31,9 @@
31#include <linux/init.h> 31#include <linux/init.h>
32#include <linux/fs.h> 32#include <linux/fs.h>
33#include <linux/pci.h> 33#include <linux/pci.h>
34#include <linux/io.h>
35#include <linux/uaccess.h>
34 36
35#include <asm/io.h>
36#include <asm/uaccess.h>
37#include <asm/system.h> 37#include <asm/system.h>
38 38
39#define OUR_NAME "alim7101_wdt" 39#define OUR_NAME "alim7101_wdt"
@@ -60,13 +60,17 @@
60 */ 60 */
61 61
62#define WATCHDOG_TIMEOUT 30 /* 30 sec default timeout */ 62#define WATCHDOG_TIMEOUT 30 /* 30 sec default timeout */
63static int timeout = WATCHDOG_TIMEOUT; /* in seconds, will be multiplied by HZ to get seconds to wait for a ping */ 63/* in seconds, will be multiplied by HZ to get seconds to wait for a ping */
64static int timeout = WATCHDOG_TIMEOUT;
64module_param(timeout, int, 0); 65module_param(timeout, int, 0);
65MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds. (1<=timeout<=3600, default=" __MODULE_STRING(WATCHDOG_TIMEOUT) ")"); 66MODULE_PARM_DESC(timeout,
67 "Watchdog timeout in seconds. (1<=timeout<=3600, default="
68 __MODULE_STRING(WATCHDOG_TIMEOUT) ")");
66 69
67static int use_gpio = 0; /* Use the pic (for a1d revision alim7101) */ 70static int use_gpio; /* Use the pic (for a1d revision alim7101) */
68module_param(use_gpio, int, 0); 71module_param(use_gpio, int, 0);
69MODULE_PARM_DESC(use_gpio, "Use the gpio watchdog. (required by old cobalt boards)"); 72MODULE_PARM_DESC(use_gpio,
73 "Use the gpio watchdog (required by old cobalt boards).");
70 74
71static void wdt_timer_ping(unsigned long); 75static void wdt_timer_ping(unsigned long);
72static DEFINE_TIMER(timer, wdt_timer_ping, 0, 1); 76static DEFINE_TIMER(timer, wdt_timer_ping, 0, 1);
@@ -77,8 +81,9 @@ static struct pci_dev *alim7101_pmu;
77 81
78static int nowayout = WATCHDOG_NOWAYOUT; 82static int nowayout = WATCHDOG_NOWAYOUT;
79module_param(nowayout, int, 0); 83module_param(nowayout, int, 0);
80MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" 84MODULE_PARM_DESC(nowayout,
81 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 85 "Watchdog cannot be stopped once started (default="
86 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
82 87
83/* 88/*
84 * Whack the dog 89 * Whack the dog
@@ -89,23 +94,26 @@ static void wdt_timer_ping(unsigned long data)
89 /* If we got a heartbeat pulse within the WDT_US_INTERVAL 94 /* If we got a heartbeat pulse within the WDT_US_INTERVAL
90 * we agree to ping the WDT 95 * we agree to ping the WDT
91 */ 96 */
92 char tmp; 97 char tmp;
93 98
94 if(time_before(jiffies, next_heartbeat)) 99 if (time_before(jiffies, next_heartbeat)) {
95 {
96 /* Ping the WDT (this is actually a disarm/arm sequence) */ 100 /* Ping the WDT (this is actually a disarm/arm sequence) */
97 pci_read_config_byte(alim7101_pmu, 0x92, &tmp); 101 pci_read_config_byte(alim7101_pmu, 0x92, &tmp);
98 pci_write_config_byte(alim7101_pmu, ALI_7101_WDT, (tmp & ~ALI_WDT_ARM)); 102 pci_write_config_byte(alim7101_pmu,
99 pci_write_config_byte(alim7101_pmu, ALI_7101_WDT, (tmp | ALI_WDT_ARM)); 103 ALI_7101_WDT, (tmp & ~ALI_WDT_ARM));
104 pci_write_config_byte(alim7101_pmu,
105 ALI_7101_WDT, (tmp | ALI_WDT_ARM));
100 if (use_gpio) { 106 if (use_gpio) {
101 pci_read_config_byte(alim7101_pmu, ALI_7101_GPIO_O, &tmp); 107 pci_read_config_byte(alim7101_pmu,
102 pci_write_config_byte(alim7101_pmu, ALI_7101_GPIO_O, tmp 108 ALI_7101_GPIO_O, &tmp);
103 | 0x20); 109 pci_write_config_byte(alim7101_pmu,
104 pci_write_config_byte(alim7101_pmu, ALI_7101_GPIO_O, tmp 110 ALI_7101_GPIO_O, tmp | 0x20);
105 & ~0x20); 111 pci_write_config_byte(alim7101_pmu,
112 ALI_7101_GPIO_O, tmp & ~0x20);
106 } 113 }
107 } else { 114 } else {
108 printk(KERN_WARNING PFX "Heartbeat lost! Will not ping the watchdog\n"); 115 printk(KERN_WARNING PFX
116 "Heartbeat lost! Will not ping the watchdog\n");
109 } 117 }
110 /* Re-set the timer interval */ 118 /* Re-set the timer interval */
111 mod_timer(&timer, jiffies + WDT_INTERVAL); 119 mod_timer(&timer, jiffies + WDT_INTERVAL);
@@ -117,21 +125,27 @@ static void wdt_timer_ping(unsigned long data)
117 125
118static void wdt_change(int writeval) 126static void wdt_change(int writeval)
119{ 127{
120 char tmp; 128 char tmp;
121 129
122 pci_read_config_byte(alim7101_pmu, ALI_7101_WDT, &tmp); 130 pci_read_config_byte(alim7101_pmu, ALI_7101_WDT, &tmp);
123 if (writeval == WDT_ENABLE) { 131 if (writeval == WDT_ENABLE) {
124 pci_write_config_byte(alim7101_pmu, ALI_7101_WDT, (tmp | ALI_WDT_ARM)); 132 pci_write_config_byte(alim7101_pmu,
133 ALI_7101_WDT, (tmp | ALI_WDT_ARM));
125 if (use_gpio) { 134 if (use_gpio) {
126 pci_read_config_byte(alim7101_pmu, ALI_7101_GPIO_O, &tmp); 135 pci_read_config_byte(alim7101_pmu,
127 pci_write_config_byte(alim7101_pmu, ALI_7101_GPIO_O, tmp & ~0x20); 136 ALI_7101_GPIO_O, &tmp);
137 pci_write_config_byte(alim7101_pmu,
138 ALI_7101_GPIO_O, tmp & ~0x20);
128 } 139 }
129 140
130 } else { 141 } else {
131 pci_write_config_byte(alim7101_pmu, ALI_7101_WDT, (tmp & ~ALI_WDT_ARM)); 142 pci_write_config_byte(alim7101_pmu,
143 ALI_7101_WDT, (tmp & ~ALI_WDT_ARM));
132 if (use_gpio) { 144 if (use_gpio) {
133 pci_read_config_byte(alim7101_pmu, ALI_7101_GPIO_O, &tmp); 145 pci_read_config_byte(alim7101_pmu,
134 pci_write_config_byte(alim7101_pmu, ALI_7101_GPIO_O, tmp | 0x20); 146 ALI_7101_GPIO_O, &tmp);
147 pci_write_config_byte(alim7101_pmu,
148 ALI_7101_GPIO_O, tmp | 0x20);
135 } 149 }
136 } 150 }
137} 151}
@@ -169,10 +183,11 @@ static void wdt_keepalive(void)
169 * /dev/watchdog handling 183 * /dev/watchdog handling
170 */ 184 */
171 185
172static ssize_t fop_write(struct file * file, const char __user * buf, size_t count, loff_t * ppos) 186static ssize_t fop_write(struct file *file, const char __user *buf,
187 size_t count, loff_t *ppos)
173{ 188{
174 /* See if we got the magic character 'V' and reload the timer */ 189 /* See if we got the magic character 'V' and reload the timer */
175 if(count) { 190 if (count) {
176 if (!nowayout) { 191 if (!nowayout) {
177 size_t ofs; 192 size_t ofs;
178 193
@@ -183,7 +198,7 @@ static ssize_t fop_write(struct file * file, const char __user * buf, size_t cou
183 /* now scan */ 198 /* now scan */
184 for (ofs = 0; ofs != count; ofs++) { 199 for (ofs = 0; ofs != count; ofs++) {
185 char c; 200 char c;
186 if (get_user(c, buf+ofs)) 201 if (get_user(c, buf + ofs))
187 return -EFAULT; 202 return -EFAULT;
188 if (c == 'V') 203 if (c == 'V')
189 wdt_expect_close = 42; 204 wdt_expect_close = 42;
@@ -195,119 +210,116 @@ static ssize_t fop_write(struct file * file, const char __user * buf, size_t cou
195 return count; 210 return count;
196} 211}
197 212
198static int fop_open(struct inode * inode, struct file * file) 213static int fop_open(struct inode *inode, struct file *file)
199{ 214{
200 /* Just in case we're already talking to someone... */ 215 /* Just in case we're already talking to someone... */
201 if(test_and_set_bit(0, &wdt_is_open)) 216 if (test_and_set_bit(0, &wdt_is_open))
202 return -EBUSY; 217 return -EBUSY;
203 /* Good, fire up the show */ 218 /* Good, fire up the show */
204 wdt_startup(); 219 wdt_startup();
205 return nonseekable_open(inode, file); 220 return nonseekable_open(inode, file);
206} 221}
207 222
208static int fop_close(struct inode * inode, struct file * file) 223static int fop_close(struct inode *inode, struct file *file)
209{ 224{
210 if(wdt_expect_close == 42) 225 if (wdt_expect_close == 42)
211 wdt_turnoff(); 226 wdt_turnoff();
212 else { 227 else {
213 /* wim: shouldn't there be a: del_timer(&timer); */ 228 /* wim: shouldn't there be a: del_timer(&timer); */
214 printk(KERN_CRIT PFX "device file closed unexpectedly. Will not stop the WDT!\n"); 229 printk(KERN_CRIT PFX
230 "device file closed unexpectedly. Will not stop the WDT!\n");
215 } 231 }
216 clear_bit(0, &wdt_is_open); 232 clear_bit(0, &wdt_is_open);
217 wdt_expect_close = 0; 233 wdt_expect_close = 0;
218 return 0; 234 return 0;
219} 235}
220 236
221static int fop_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) 237static long fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
222{ 238{
223 void __user *argp = (void __user *)arg; 239 void __user *argp = (void __user *)arg;
224 int __user *p = argp; 240 int __user *p = argp;
225 static struct watchdog_info ident = 241 static struct watchdog_info ident = {
226 { 242 .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT
227 .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE, 243 | WDIOF_MAGICCLOSE,
228 .firmware_version = 1, 244 .firmware_version = 1,
229 .identity = "ALiM7101", 245 .identity = "ALiM7101",
230 }; 246 };
231 247
232 switch(cmd) 248 switch (cmd) {
249 case WDIOC_GETSUPPORT:
250 return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
251 case WDIOC_GETSTATUS:
252 case WDIOC_GETBOOTSTATUS:
253 return put_user(0, p);
254 case WDIOC_SETOPTIONS:
233 { 255 {
234 case WDIOC_GETSUPPORT: 256 int new_options, retval = -EINVAL;
235 return copy_to_user(argp, &ident, sizeof(ident))?-EFAULT:0;
236 case WDIOC_GETSTATUS:
237 case WDIOC_GETBOOTSTATUS:
238 return put_user(0, p);
239 case WDIOC_KEEPALIVE:
240 wdt_keepalive();
241 return 0;
242 case WDIOC_SETOPTIONS:
243 {
244 int new_options, retval = -EINVAL;
245
246 if(get_user(new_options, p))
247 return -EFAULT;
248
249 if(new_options & WDIOS_DISABLECARD) {
250 wdt_turnoff();
251 retval = 0;
252 }
253 257
254 if(new_options & WDIOS_ENABLECARD) { 258 if (get_user(new_options, p))
255 wdt_startup(); 259 return -EFAULT;
256 retval = 0; 260 if (new_options & WDIOS_DISABLECARD) {
257 } 261 wdt_turnoff();
258 262 retval = 0;
259 return retval;
260 } 263 }
261 case WDIOC_SETTIMEOUT: 264 if (new_options & WDIOS_ENABLECARD) {
262 { 265 wdt_startup();
263 int new_timeout; 266 retval = 0;
264
265 if(get_user(new_timeout, p))
266 return -EFAULT;
267
268 if(new_timeout < 1 || new_timeout > 3600) /* arbitrary upper limit */
269 return -EINVAL;
270
271 timeout = new_timeout;
272 wdt_keepalive();
273 /* Fall through */
274 } 267 }
275 case WDIOC_GETTIMEOUT: 268 return retval;
276 return put_user(timeout, p); 269 }
277 default: 270 case WDIOC_KEEPALIVE:
278 return -ENOTTY; 271 wdt_keepalive();
272 return 0;
273 case WDIOC_SETTIMEOUT:
274 {
275 int new_timeout;
276
277 if (get_user(new_timeout, p))
278 return -EFAULT;
279 /* arbitrary upper limit */
280 if (new_timeout < 1 || new_timeout > 3600)
281 return -EINVAL;
282 timeout = new_timeout;
283 wdt_keepalive();
284 /* Fall through */
285 }
286 case WDIOC_GETTIMEOUT:
287 return put_user(timeout, p);
288 default:
289 return -ENOTTY;
279 } 290 }
280} 291}
281 292
282static const struct file_operations wdt_fops = { 293static const struct file_operations wdt_fops = {
283 .owner= THIS_MODULE, 294 .owner = THIS_MODULE,
284 .llseek= no_llseek, 295 .llseek = no_llseek,
285 .write= fop_write, 296 .write = fop_write,
286 .open= fop_open, 297 .open = fop_open,
287 .release= fop_close, 298 .release = fop_close,
288 .ioctl= fop_ioctl, 299 .unlocked_ioctl = fop_ioctl,
289}; 300};
290 301
291static struct miscdevice wdt_miscdev = { 302static struct miscdevice wdt_miscdev = {
292 .minor=WATCHDOG_MINOR, 303 .minor = WATCHDOG_MINOR,
293 .name="watchdog", 304 .name = "watchdog",
294 .fops=&wdt_fops, 305 .fops = &wdt_fops,
295}; 306};
296 307
297/* 308/*
298 * Notifier for system down 309 * Notifier for system down
299 */ 310 */
300 311
301static int wdt_notify_sys(struct notifier_block *this, unsigned long code, void *unused) 312static int wdt_notify_sys(struct notifier_block *this,
313 unsigned long code, void *unused)
302{ 314{
303 if (code==SYS_DOWN || code==SYS_HALT) 315 if (code == SYS_DOWN || code == SYS_HALT)
304 wdt_turnoff(); 316 wdt_turnoff();
305 317
306 if (code==SYS_RESTART) { 318 if (code == SYS_RESTART) {
307 /* 319 /*
308 * Cobalt devices have no way of rebooting themselves other than 320 * Cobalt devices have no way of rebooting themselves other
309 * getting the watchdog to pull reset, so we restart the watchdog on 321 * than getting the watchdog to pull reset, so we restart the
310 * reboot with no heartbeat 322 * watchdog on reboot with no heartbeat
311 */ 323 */
312 wdt_change(WDT_ENABLE); 324 wdt_change(WDT_ENABLE);
313 printk(KERN_INFO PFX "Watchdog timer is now enabled with no heartbeat - should reboot in ~1 second.\n"); 325 printk(KERN_INFO PFX "Watchdog timer is now enabled with no heartbeat - should reboot in ~1 second.\n");
@@ -320,8 +332,7 @@ static int wdt_notify_sys(struct notifier_block *this, unsigned long code, void
320 * turn the timebomb registers off. 332 * turn the timebomb registers off.
321 */ 333 */
322 334
323static struct notifier_block wdt_notifier= 335static struct notifier_block wdt_notifier = {
324{
325 .notifier_call = wdt_notify_sys, 336 .notifier_call = wdt_notify_sys,
326}; 337};
327 338
@@ -354,7 +365,8 @@ static int __init alim7101_wdt_init(void)
354 ali1543_south = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, 365 ali1543_south = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533,
355 NULL); 366 NULL);
356 if (!ali1543_south) { 367 if (!ali1543_south) {
357 printk(KERN_INFO PFX "ALi 1543 South-Bridge not present - WDT not set\n"); 368 printk(KERN_INFO PFX
369 "ALi 1543 South-Bridge not present - WDT not set\n");
358 goto err_out; 370 goto err_out;
359 } 371 }
360 pci_read_config_byte(ali1543_south, 0x5e, &tmp); 372 pci_read_config_byte(ali1543_south, 0x5e, &tmp);
@@ -363,24 +375,25 @@ static int __init alim7101_wdt_init(void)
363 if (!use_gpio) { 375 if (!use_gpio) {
364 printk(KERN_INFO PFX "Detected old alim7101 revision 'a1d'. If this is a cobalt board, set the 'use_gpio' module parameter.\n"); 376 printk(KERN_INFO PFX "Detected old alim7101 revision 'a1d'. If this is a cobalt board, set the 'use_gpio' module parameter.\n");
365 goto err_out; 377 goto err_out;
366 } 378 }
367 nowayout = 1; 379 nowayout = 1;
368 } else if ((tmp & 0x1e) != 0x12 && (tmp & 0x1e) != 0x00) { 380 } else if ((tmp & 0x1e) != 0x12 && (tmp & 0x1e) != 0x00) {
369 printk(KERN_INFO PFX "ALi 1543 South-Bridge does not have the correct revision number (???1001?) - WDT not set\n"); 381 printk(KERN_INFO PFX "ALi 1543 South-Bridge does not have the correct revision number (???1001?) - WDT not set\n");
370 goto err_out; 382 goto err_out;
371 } 383 }
372 384
373 if(timeout < 1 || timeout > 3600) /* arbitrary upper limit */ 385 if (timeout < 1 || timeout > 3600) {
374 { 386 /* arbitrary upper limit */
375 timeout = WATCHDOG_TIMEOUT; 387 timeout = WATCHDOG_TIMEOUT;
376 printk(KERN_INFO PFX "timeout value must be 1<=x<=3600, using %d\n", 388 printk(KERN_INFO PFX
377 timeout); 389 "timeout value must be 1 <= x <= 3600, using %d\n",
390 timeout);
378 } 391 }
379 392
380 rc = register_reboot_notifier(&wdt_notifier); 393 rc = register_reboot_notifier(&wdt_notifier);
381 if (rc) { 394 if (rc) {
382 printk(KERN_ERR PFX "cannot register reboot notifier (err=%d)\n", 395 printk(KERN_ERR PFX
383 rc); 396 "cannot register reboot notifier (err=%d)\n", rc);
384 goto err_out; 397 goto err_out;
385 } 398 }
386 399
@@ -391,9 +404,8 @@ static int __init alim7101_wdt_init(void)
391 goto err_out_reboot; 404 goto err_out_reboot;
392 } 405 }
393 406
394 if (nowayout) { 407 if (nowayout)
395 __module_get(THIS_MODULE); 408 __module_get(THIS_MODULE);
396 }
397 409
398 printk(KERN_INFO PFX "WDT driver for ALi M7101 initialised. timeout=%d sec (nowayout=%d)\n", 410 printk(KERN_INFO PFX "WDT driver for ALi M7101 initialised. timeout=%d sec (nowayout=%d)\n",
399 timeout, nowayout); 411 timeout, nowayout);
diff --git a/drivers/watchdog/ar7_wdt.c b/drivers/watchdog/ar7_wdt.c
index 2eb48c0df32c..55dcbfe2bb72 100644
--- a/drivers/watchdog/ar7_wdt.c
+++ b/drivers/watchdog/ar7_wdt.c
@@ -69,7 +69,8 @@ struct ar7_wdt {
69 u32 prescale; 69 u32 prescale;
70}; 70};
71 71
72static struct semaphore open_semaphore; 72static unsigned long wdt_is_open;
73static spinlock_t wdt_lock;
73static unsigned expect_close; 74static unsigned expect_close;
74 75
75/* XXX currently fixed, allows max margin ~68.72 secs */ 76/* XXX currently fixed, allows max margin ~68.72 secs */
@@ -154,8 +155,10 @@ static void ar7_wdt_update_margin(int new_margin)
154 u32 change; 155 u32 change;
155 156
156 change = new_margin * (ar7_vbus_freq() / prescale_value); 157 change = new_margin * (ar7_vbus_freq() / prescale_value);
157 if (change < 1) change = 1; 158 if (change < 1)
158 if (change > 0xffff) change = 0xffff; 159 change = 1;
160 if (change > 0xffff)
161 change = 0xffff;
159 ar7_wdt_change(change); 162 ar7_wdt_change(change);
160 margin = change * prescale_value / ar7_vbus_freq(); 163 margin = change * prescale_value / ar7_vbus_freq();
161 printk(KERN_INFO DRVNAME 164 printk(KERN_INFO DRVNAME
@@ -179,7 +182,7 @@ static void ar7_wdt_disable_wdt(void)
179static int ar7_wdt_open(struct inode *inode, struct file *file) 182static int ar7_wdt_open(struct inode *inode, struct file *file)
180{ 183{
181 /* only allow one at a time */ 184 /* only allow one at a time */
182 if (down_trylock(&open_semaphore)) 185 if (test_and_set_bit(0, &wdt_is_open))
183 return -EBUSY; 186 return -EBUSY;
184 ar7_wdt_enable_wdt(); 187 ar7_wdt_enable_wdt();
185 expect_close = 0; 188 expect_close = 0;
@@ -195,9 +198,7 @@ static int ar7_wdt_release(struct inode *inode, struct file *file)
195 "will not disable the watchdog timer\n"); 198 "will not disable the watchdog timer\n");
196 else if (!nowayout) 199 else if (!nowayout)
197 ar7_wdt_disable_wdt(); 200 ar7_wdt_disable_wdt();
198 201 clear_bit(0, &wdt_is_open);
199 up(&open_semaphore);
200
201 return 0; 202 return 0;
202} 203}
203 204
@@ -212,7 +213,7 @@ static int ar7_wdt_notify_sys(struct notifier_block *this,
212} 213}
213 214
214static struct notifier_block ar7_wdt_notifier = { 215static struct notifier_block ar7_wdt_notifier = {
215 .notifier_call = ar7_wdt_notify_sys 216 .notifier_call = ar7_wdt_notify_sys,
216}; 217};
217 218
218static ssize_t ar7_wdt_write(struct file *file, const char *data, 219static ssize_t ar7_wdt_write(struct file *file, const char *data,
@@ -222,12 +223,14 @@ static ssize_t ar7_wdt_write(struct file *file, const char *data,
222 if (len) { 223 if (len) {
223 size_t i; 224 size_t i;
224 225
226 spin_lock(&wdt_lock);
225 ar7_wdt_kick(1); 227 ar7_wdt_kick(1);
228 spin_unlock(&wdt_lock);
226 229
227 expect_close = 0; 230 expect_close = 0;
228 for (i = 0; i < len; ++i) { 231 for (i = 0; i < len; ++i) {
229 char c; 232 char c;
230 if (get_user(c, data+i)) 233 if (get_user(c, data + i))
231 return -EFAULT; 234 return -EFAULT;
232 if (c == 'V') 235 if (c == 'V')
233 expect_close = 1; 236 expect_close = 1;
@@ -237,8 +240,8 @@ static ssize_t ar7_wdt_write(struct file *file, const char *data,
237 return len; 240 return len;
238} 241}
239 242
240static int ar7_wdt_ioctl(struct inode *inode, struct file *file, 243static long ar7_wdt_ioctl(struct file *file,
241 unsigned int cmd, unsigned long arg) 244 unsigned int cmd, unsigned long arg)
242{ 245{
243 static struct watchdog_info ident = { 246 static struct watchdog_info ident = {
244 .identity = LONGNAME, 247 .identity = LONGNAME,
@@ -248,8 +251,6 @@ static int ar7_wdt_ioctl(struct inode *inode, struct file *file,
248 int new_margin; 251 int new_margin;
249 252
250 switch (cmd) { 253 switch (cmd) {
251 default:
252 return -ENOTTY;
253 case WDIOC_GETSUPPORT: 254 case WDIOC_GETSUPPORT:
254 if (copy_to_user((struct watchdog_info *)arg, &ident, 255 if (copy_to_user((struct watchdog_info *)arg, &ident,
255 sizeof(ident))) 256 sizeof(ident)))
@@ -269,20 +270,24 @@ static int ar7_wdt_ioctl(struct inode *inode, struct file *file,
269 if (new_margin < 1) 270 if (new_margin < 1)
270 return -EINVAL; 271 return -EINVAL;
271 272
273 spin_lock(&wdt_lock);
272 ar7_wdt_update_margin(new_margin); 274 ar7_wdt_update_margin(new_margin);
273 ar7_wdt_kick(1); 275 ar7_wdt_kick(1);
276 spin_unlock(&wdt_lock);
274 277
275 case WDIOC_GETTIMEOUT: 278 case WDIOC_GETTIMEOUT:
276 if (put_user(margin, (int *)arg)) 279 if (put_user(margin, (int *)arg))
277 return -EFAULT; 280 return -EFAULT;
278 return 0; 281 return 0;
282 default:
283 return -ENOTTY;
279 } 284 }
280} 285}
281 286
282static const struct file_operations ar7_wdt_fops = { 287static const struct file_operations ar7_wdt_fops = {
283 .owner = THIS_MODULE, 288 .owner = THIS_MODULE,
284 .write = ar7_wdt_write, 289 .write = ar7_wdt_write,
285 .ioctl = ar7_wdt_ioctl, 290 .unlocked_ioctl = ar7_wdt_ioctl,
286 .open = ar7_wdt_open, 291 .open = ar7_wdt_open,
287 .release = ar7_wdt_release, 292 .release = ar7_wdt_release,
288}; 293};
@@ -297,6 +302,8 @@ static int __init ar7_wdt_init(void)
297{ 302{
298 int rc; 303 int rc;
299 304
305 spin_lock_init(&wdt_lock);
306
300 ar7_wdt_get_regs(); 307 ar7_wdt_get_regs();
301 308
302 if (!request_mem_region(ar7_regs_wdt, sizeof(struct ar7_wdt), 309 if (!request_mem_region(ar7_regs_wdt, sizeof(struct ar7_wdt),
@@ -312,8 +319,6 @@ static int __init ar7_wdt_init(void)
312 ar7_wdt_prescale(prescale_value); 319 ar7_wdt_prescale(prescale_value);
313 ar7_wdt_update_margin(margin); 320 ar7_wdt_update_margin(margin);
314 321
315 sema_init(&open_semaphore, 1);
316
317 rc = register_reboot_notifier(&ar7_wdt_notifier); 322 rc = register_reboot_notifier(&ar7_wdt_notifier);
318 if (rc) { 323 if (rc) {
319 printk(KERN_ERR DRVNAME 324 printk(KERN_ERR DRVNAME
diff --git a/drivers/watchdog/at32ap700x_wdt.c b/drivers/watchdog/at32ap700x_wdt.c
index ae0fca5e8749..e8ae638e5804 100644
--- a/drivers/watchdog/at32ap700x_wdt.c
+++ b/drivers/watchdog/at32ap700x_wdt.c
@@ -212,8 +212,8 @@ static struct watchdog_info at32_wdt_info = {
212/* 212/*
213 * Handle commands from user-space. 213 * Handle commands from user-space.
214 */ 214 */
215static int at32_wdt_ioctl(struct inode *inode, struct file *file, 215static long at32_wdt_ioctl(struct file *file,
216 unsigned int cmd, unsigned long arg) 216 unsigned int cmd, unsigned long arg)
217{ 217{
218 int ret = -ENOTTY; 218 int ret = -ENOTTY;
219 int time; 219 int time;
@@ -221,27 +221,10 @@ static int at32_wdt_ioctl(struct inode *inode, struct file *file,
221 int __user *p = argp; 221 int __user *p = argp;
222 222
223 switch (cmd) { 223 switch (cmd) {
224 case WDIOC_KEEPALIVE:
225 at32_wdt_pat();
226 ret = 0;
227 break;
228 case WDIOC_GETSUPPORT: 224 case WDIOC_GETSUPPORT:
229 ret = copy_to_user(argp, &at32_wdt_info, 225 ret = copy_to_user(argp, &at32_wdt_info,
230 sizeof(at32_wdt_info)) ? -EFAULT : 0; 226 sizeof(at32_wdt_info)) ? -EFAULT : 0;
231 break; 227 break;
232 case WDIOC_SETTIMEOUT:
233 ret = get_user(time, p);
234 if (ret)
235 break;
236 ret = at32_wdt_settimeout(time);
237 if (ret)
238 break;
239 /* Enable new time value */
240 at32_wdt_start();
241 /* fall through */
242 case WDIOC_GETTIMEOUT:
243 ret = put_user(wdt->timeout, p);
244 break;
245 case WDIOC_GETSTATUS: 228 case WDIOC_GETSTATUS:
246 ret = put_user(0, p); 229 ret = put_user(0, p);
247 break; 230 break;
@@ -258,6 +241,23 @@ static int at32_wdt_ioctl(struct inode *inode, struct file *file,
258 at32_wdt_start(); 241 at32_wdt_start();
259 ret = 0; 242 ret = 0;
260 break; 243 break;
244 case WDIOC_KEEPALIVE:
245 at32_wdt_pat();
246 ret = 0;
247 break;
248 case WDIOC_SETTIMEOUT:
249 ret = get_user(time, p);
250 if (ret)
251 break;
252 ret = at32_wdt_settimeout(time);
253 if (ret)
254 break;
255 /* Enable new time value */
256 at32_wdt_start();
257 /* fall through */
258 case WDIOC_GETTIMEOUT:
259 ret = put_user(wdt->timeout, p);
260 break;
261 } 261 }
262 262
263 return ret; 263 return ret;
@@ -283,7 +283,7 @@ static ssize_t at32_wdt_write(struct file *file, const char __user *data,
283 */ 283 */
284 for (i = 0; i != len; i++) { 284 for (i = 0; i != len; i++) {
285 char c; 285 char c;
286 if (get_user(c, data+i)) 286 if (get_user(c, data + i))
287 return -EFAULT; 287 return -EFAULT;
288 if (c == 'V') 288 if (c == 'V')
289 expect_release = 42; 289 expect_release = 42;
@@ -298,7 +298,7 @@ static ssize_t at32_wdt_write(struct file *file, const char __user *data,
298static const struct file_operations at32_wdt_fops = { 298static const struct file_operations at32_wdt_fops = {
299 .owner = THIS_MODULE, 299 .owner = THIS_MODULE,
300 .llseek = no_llseek, 300 .llseek = no_llseek,
301 .ioctl = at32_wdt_ioctl, 301 .unlocked_ioctl = at32_wdt_ioctl,
302 .open = at32_wdt_open, 302 .open = at32_wdt_open,
303 .release = at32_wdt_close, 303 .release = at32_wdt_close,
304 .write = at32_wdt_write, 304 .write = at32_wdt_write,
@@ -391,7 +391,6 @@ static int __exit at32_wdt_remove(struct platform_device *pdev)
391 wdt = NULL; 391 wdt = NULL;
392 platform_set_drvdata(pdev, NULL); 392 platform_set_drvdata(pdev, NULL);
393 } 393 }
394
395 return 0; 394 return 0;
396} 395}
397 396
diff --git a/drivers/watchdog/at91rm9200_wdt.c b/drivers/watchdog/at91rm9200_wdt.c
index 9ff9a9565320..bacd867dd22e 100644
--- a/drivers/watchdog/at91rm9200_wdt.c
+++ b/drivers/watchdog/at91rm9200_wdt.c
@@ -20,9 +20,8 @@
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/types.h> 21#include <linux/types.h>
22#include <linux/watchdog.h> 22#include <linux/watchdog.h>
23#include <asm/uaccess.h> 23#include <linux/uaccess.h>
24#include <asm/arch/at91_st.h> 24#include <mach/at91_st.h>
25
26 25
27#define WDT_DEFAULT_TIME 5 /* seconds */ 26#define WDT_DEFAULT_TIME 5 /* seconds */
28#define WDT_MAX_TIME 256 /* seconds */ 27#define WDT_MAX_TIME 256 /* seconds */
@@ -31,11 +30,14 @@ static int wdt_time = WDT_DEFAULT_TIME;
31static int nowayout = WATCHDOG_NOWAYOUT; 30static int nowayout = WATCHDOG_NOWAYOUT;
32 31
33module_param(wdt_time, int, 0); 32module_param(wdt_time, int, 0);
34MODULE_PARM_DESC(wdt_time, "Watchdog time in seconds. (default="__MODULE_STRING(WDT_DEFAULT_TIME) ")"); 33MODULE_PARM_DESC(wdt_time, "Watchdog time in seconds. (default="
34 __MODULE_STRING(WDT_DEFAULT_TIME) ")");
35 35
36#ifdef CONFIG_WATCHDOG_NOWAYOUT 36#ifdef CONFIG_WATCHDOG_NOWAYOUT
37module_param(nowayout, int, 0); 37module_param(nowayout, int, 0);
38MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 38MODULE_PARM_DESC(nowayout,
39 "Watchdog cannot be stopped once started (default="
40 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
39#endif 41#endif
40 42
41 43
@@ -46,7 +48,7 @@ static unsigned long at91wdt_busy;
46/* 48/*
47 * Disable the watchdog. 49 * Disable the watchdog.
48 */ 50 */
49static void inline at91_wdt_stop(void) 51static inline void at91_wdt_stop(void)
50{ 52{
51 at91_sys_write(AT91_ST_WDMR, AT91_ST_EXTEN); 53 at91_sys_write(AT91_ST_WDMR, AT91_ST_EXTEN);
52} 54}
@@ -54,16 +56,17 @@ static void inline at91_wdt_stop(void)
54/* 56/*
55 * Enable and reset the watchdog. 57 * Enable and reset the watchdog.
56 */ 58 */
57static void inline at91_wdt_start(void) 59static inline void at91_wdt_start(void)
58{ 60{
59 at91_sys_write(AT91_ST_WDMR, AT91_ST_EXTEN | AT91_ST_RSTEN | (((65536 * wdt_time) >> 8) & AT91_ST_WDV)); 61 at91_sys_write(AT91_ST_WDMR, AT91_ST_EXTEN | AT91_ST_RSTEN |
62 (((65536 * wdt_time) >> 8) & AT91_ST_WDV));
60 at91_sys_write(AT91_ST_CR, AT91_ST_WDRST); 63 at91_sys_write(AT91_ST_CR, AT91_ST_WDRST);
61} 64}
62 65
63/* 66/*
64 * Reload the watchdog timer. (ie, pat the watchdog) 67 * Reload the watchdog timer. (ie, pat the watchdog)
65 */ 68 */
66static void inline at91_wdt_reload(void) 69static inline void at91_wdt_reload(void)
67{ 70{
68 at91_sys_write(AT91_ST_CR, AT91_ST_WDRST); 71 at91_sys_write(AT91_ST_CR, AT91_ST_WDRST);
69} 72}
@@ -89,8 +92,9 @@ static int at91_wdt_open(struct inode *inode, struct file *file)
89 */ 92 */
90static int at91_wdt_close(struct inode *inode, struct file *file) 93static int at91_wdt_close(struct inode *inode, struct file *file)
91{ 94{
95 /* Disable the watchdog when file is closed */
92 if (!nowayout) 96 if (!nowayout)
93 at91_wdt_stop(); /* Disable the watchdog when file is closed */ 97 at91_wdt_stop();
94 98
95 clear_bit(0, &at91wdt_busy); 99 clear_bit(0, &at91wdt_busy);
96 return 0; 100 return 0;
@@ -110,7 +114,8 @@ static int at91_wdt_settimeout(int new_time)
110 if ((new_time <= 0) || (new_time > WDT_MAX_TIME)) 114 if ((new_time <= 0) || (new_time > WDT_MAX_TIME))
111 return -EINVAL; 115 return -EINVAL;
112 116
113 /* Set new watchdog time. It will be used when at91_wdt_start() is called. */ 117 /* Set new watchdog time. It will be used when
118 at91_wdt_start() is called. */
114 wdt_time = new_time; 119 wdt_time = new_time;
115 return 0; 120 return 0;
116} 121}
@@ -123,60 +128,52 @@ static struct watchdog_info at91_wdt_info = {
123/* 128/*
124 * Handle commands from user-space. 129 * Handle commands from user-space.
125 */ 130 */
126static int at91_wdt_ioctl(struct inode *inode, struct file *file, 131static long at91_wdt_ioct(struct file *file,
127 unsigned int cmd, unsigned long arg) 132 unsigned int cmd, unsigned long arg)
128{ 133{
129 void __user *argp = (void __user *)arg; 134 void __user *argp = (void __user *)arg;
130 int __user *p = argp; 135 int __user *p = argp;
131 int new_value; 136 int new_value;
132 137
133 switch(cmd) { 138 switch (cmd) {
134 case WDIOC_KEEPALIVE: 139 case WDIOC_GETSUPPORT:
135 at91_wdt_reload(); /* pat the watchdog */ 140 return copy_to_user(argp, &at91_wdt_info,
136 return 0; 141 sizeof(at91_wdt_info)) ? -EFAULT : 0;
137 142 case WDIOC_GETSTATUS:
138 case WDIOC_GETSUPPORT: 143 case WDIOC_GETBOOTSTATUS:
139 return copy_to_user(argp, &at91_wdt_info, sizeof(at91_wdt_info)) ? -EFAULT : 0; 144 return put_user(0, p);
140 145 case WDIOC_SETOPTIONS:
141 case WDIOC_SETTIMEOUT: 146 if (get_user(new_value, p))
142 if (get_user(new_value, p)) 147 return -EFAULT;
143 return -EFAULT; 148 if (new_value & WDIOS_DISABLECARD)
144 149 at91_wdt_stop();
145 if (at91_wdt_settimeout(new_value)) 150 if (new_value & WDIOS_ENABLECARD)
146 return -EINVAL;
147
148 /* Enable new time value */
149 at91_wdt_start(); 151 at91_wdt_start();
150 152 return 0;
151 /* Return current value */ 153 case WDIOC_KEEPALIVE:
152 return put_user(wdt_time, p); 154 at91_wdt_reload(); /* pat the watchdog */
153 155 return 0;
154 case WDIOC_GETTIMEOUT: 156 case WDIOC_SETTIMEOUT:
155 return put_user(wdt_time, p); 157 if (get_user(new_value, p))
156 158 return -EFAULT;
157 case WDIOC_GETSTATUS: 159 if (at91_wdt_settimeout(new_value))
158 case WDIOC_GETBOOTSTATUS: 160 return -EINVAL;
159 return put_user(0, p); 161 /* Enable new time value */
160 162 at91_wdt_start();
161 case WDIOC_SETOPTIONS: 163 /* Return current value */
162 if (get_user(new_value, p)) 164 return put_user(wdt_time, p);
163 return -EFAULT; 165 case WDIOC_GETTIMEOUT:
164 166 return put_user(wdt_time, p);
165 if (new_value & WDIOS_DISABLECARD) 167 default:
166 at91_wdt_stop(); 168 return -ENOTTY;
167 if (new_value & WDIOS_ENABLECARD)
168 at91_wdt_start();
169 return 0;
170
171 default:
172 return -ENOTTY;
173 } 169 }
174} 170}
175 171
176/* 172/*
177 * Pat the watchdog whenever device is written to. 173 * Pat the watchdog whenever device is written to.
178 */ 174 */
179static ssize_t at91_wdt_write(struct file *file, const char *data, size_t len, loff_t *ppos) 175static ssize_t at91_wdt_write(struct file *file, const char *data,
176 size_t len, loff_t *ppos)
180{ 177{
181 at91_wdt_reload(); /* pat the watchdog */ 178 at91_wdt_reload(); /* pat the watchdog */
182 return len; 179 return len;
@@ -187,7 +184,7 @@ static ssize_t at91_wdt_write(struct file *file, const char *data, size_t len, l
187static const struct file_operations at91wdt_fops = { 184static const struct file_operations at91wdt_fops = {
188 .owner = THIS_MODULE, 185 .owner = THIS_MODULE,
189 .llseek = no_llseek, 186 .llseek = no_llseek,
190 .ioctl = at91_wdt_ioctl, 187 .unlocked_ioctl = at91_wdt_ioctl,
191 .open = at91_wdt_open, 188 .open = at91_wdt_open,
192 .release = at91_wdt_close, 189 .release = at91_wdt_close,
193 .write = at91_wdt_write, 190 .write = at91_wdt_write,
@@ -211,7 +208,8 @@ static int __init at91wdt_probe(struct platform_device *pdev)
211 if (res) 208 if (res)
212 return res; 209 return res;
213 210
214 printk("AT91 Watchdog Timer enabled (%d seconds%s)\n", wdt_time, nowayout ? ", nowayout" : ""); 211 printk(KERN_INFO "AT91 Watchdog Timer enabled (%d seconds%s)\n",
212 wdt_time, nowayout ? ", nowayout" : "");
215 return 0; 213 return 0;
216} 214}
217 215
@@ -265,7 +263,8 @@ static struct platform_driver at91wdt_driver = {
265 263
266static int __init at91_wdt_init(void) 264static int __init at91_wdt_init(void)
267{ 265{
268 /* Check that the heartbeat value is within range; if not reset to the default */ 266 /* Check that the heartbeat value is within range;
267 if not reset to the default */
269 if (at91_wdt_settimeout(wdt_time)) { 268 if (at91_wdt_settimeout(wdt_time)) {
270 at91_wdt_settimeout(WDT_DEFAULT_TIME); 269 at91_wdt_settimeout(WDT_DEFAULT_TIME);
271 pr_info("at91_wdt: wdt_time value must be 1 <= wdt_time <= 256, using %d\n", wdt_time); 270 pr_info("at91_wdt: wdt_time value must be 1 <= wdt_time <= 256, using %d\n", wdt_time);
diff --git a/drivers/watchdog/bfin_wdt.c b/drivers/watchdog/bfin_wdt.c
index 03b3e3d91e7c..31b42253054e 100644
--- a/drivers/watchdog/bfin_wdt.c
+++ b/drivers/watchdog/bfin_wdt.c
@@ -24,8 +24,8 @@
24#include <linux/reboot.h> 24#include <linux/reboot.h>
25#include <linux/init.h> 25#include <linux/init.h>
26#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/uaccess.h>
27#include <asm/blackfin.h> 28#include <asm/blackfin.h>
28#include <asm/uaccess.h>
29 29
30#define stamp(fmt, args...) pr_debug("%s:%i: " fmt "\n", __func__, __LINE__, ## args) 30#define stamp(fmt, args...) pr_debug("%s:%i: " fmt "\n", __func__, __LINE__, ## args)
31#define stampit() stamp("here i am") 31#define stampit() stamp("here i am")
@@ -148,7 +148,8 @@ static int bfin_wdt_set_timeout(unsigned long t)
148 int run = bfin_wdt_running(); 148 int run = bfin_wdt_running();
149 bfin_wdt_stop(); 149 bfin_wdt_stop();
150 bfin_write_WDOG_CNT(cnt); 150 bfin_write_WDOG_CNT(cnt);
151 if (run) bfin_wdt_start(); 151 if (run)
152 bfin_wdt_start();
152 } 153 }
153 spin_unlock_irqrestore(&bfin_wdt_spinlock, flags); 154 spin_unlock_irqrestore(&bfin_wdt_spinlock, flags);
154 155
@@ -191,16 +192,15 @@ static int bfin_wdt_release(struct inode *inode, struct file *file)
191{ 192{
192 stampit(); 193 stampit();
193 194
194 if (expect_close == 42) { 195 if (expect_close == 42)
195 bfin_wdt_stop(); 196 bfin_wdt_stop();
196 } else { 197 else {
197 printk(KERN_CRIT PFX "Unexpected close, not stopping watchdog!\n"); 198 printk(KERN_CRIT PFX
199 "Unexpected close, not stopping watchdog!\n");
198 bfin_wdt_keepalive(); 200 bfin_wdt_keepalive();
199 } 201 }
200
201 expect_close = 0; 202 expect_close = 0;
202 clear_bit(0, &open_check); 203 clear_bit(0, &open_check);
203
204 return 0; 204 return 0;
205} 205}
206 206
@@ -214,7 +214,7 @@ static int bfin_wdt_release(struct inode *inode, struct file *file)
214 * Pings the watchdog on write. 214 * Pings the watchdog on write.
215 */ 215 */
216static ssize_t bfin_wdt_write(struct file *file, const char __user *data, 216static ssize_t bfin_wdt_write(struct file *file, const char __user *data,
217 size_t len, loff_t *ppos) 217 size_t len, loff_t *ppos)
218{ 218{
219 stampit(); 219 stampit();
220 220
@@ -241,7 +241,6 @@ static ssize_t bfin_wdt_write(struct file *file, const char __user *data,
241 241
242/** 242/**
243 * bfin_wdt_ioctl - Query Device 243 * bfin_wdt_ioctl - Query Device
244 * @inode: inode of device
245 * @file: file handle of device 244 * @file: file handle of device
246 * @cmd: watchdog command 245 * @cmd: watchdog command
247 * @arg: argument 246 * @arg: argument
@@ -249,8 +248,8 @@ static ssize_t bfin_wdt_write(struct file *file, const char __user *data,
249 * Query basic information from the device or ping it, as outlined by the 248 * Query basic information from the device or ping it, as outlined by the
250 * watchdog API. 249 * watchdog API.
251 */ 250 */
252static int bfin_wdt_ioctl(struct inode *inode, struct file *file, 251static long bfin_wdt_ioctl(struct file *file,
253 unsigned int cmd, unsigned long arg) 252 unsigned int cmd, unsigned long arg)
254{ 253{
255 void __user *argp = (void __user *)arg; 254 void __user *argp = (void __user *)arg;
256 int __user *p = argp; 255 int __user *p = argp;
@@ -258,59 +257,49 @@ static int bfin_wdt_ioctl(struct inode *inode, struct file *file,
258 stampit(); 257 stampit();
259 258
260 switch (cmd) { 259 switch (cmd) {
261 default: 260 case WDIOC_GETSUPPORT:
262 return -ENOTTY; 261 if (copy_to_user(argp, &bfin_wdt_info, sizeof(bfin_wdt_info)))
263 262 return -EFAULT;
264 case WDIOC_GETSUPPORT: 263 else
265 if (copy_to_user(argp, &bfin_wdt_info, sizeof(bfin_wdt_info)))
266 return -EFAULT;
267 else
268 return 0;
269
270 case WDIOC_GETSTATUS:
271 case WDIOC_GETBOOTSTATUS:
272 return put_user(!!(_bfin_swrst & SWRST_RESET_WDOG), p);
273
274 case WDIOC_KEEPALIVE:
275 bfin_wdt_keepalive();
276 return 0; 264 return 0;
277 265 case WDIOC_GETSTATUS:
278 case WDIOC_SETTIMEOUT: { 266 case WDIOC_GETBOOTSTATUS:
279 int new_timeout; 267 return put_user(!!(_bfin_swrst & SWRST_RESET_WDOG), p);
280 268 case WDIOC_SETOPTIONS: {
281 if (get_user(new_timeout, p)) 269 unsigned long flags;
282 return -EFAULT; 270 int options, ret = -EINVAL;
283 271
284 if (bfin_wdt_set_timeout(new_timeout)) 272 if (get_user(options, p))
285 return -EINVAL; 273 return -EFAULT;
274
275 spin_lock_irqsave(&bfin_wdt_spinlock, flags);
276 if (options & WDIOS_DISABLECARD) {
277 bfin_wdt_stop();
278 ret = 0;
286 } 279 }
287 /* Fall */ 280 if (options & WDIOS_ENABLECARD) {
288 case WDIOC_GETTIMEOUT: 281 bfin_wdt_start();
289 return put_user(timeout, p); 282 ret = 0;
290
291 case WDIOC_SETOPTIONS: {
292 unsigned long flags;
293 int options, ret = -EINVAL;
294
295 if (get_user(options, p))
296 return -EFAULT;
297
298 spin_lock_irqsave(&bfin_wdt_spinlock, flags);
299
300 if (options & WDIOS_DISABLECARD) {
301 bfin_wdt_stop();
302 ret = 0;
303 }
304
305 if (options & WDIOS_ENABLECARD) {
306 bfin_wdt_start();
307 ret = 0;
308 }
309
310 spin_unlock_irqrestore(&bfin_wdt_spinlock, flags);
311
312 return ret;
313 } 283 }
284 spin_unlock_irqrestore(&bfin_wdt_spinlock, flags);
285 return ret;
286 }
287 case WDIOC_KEEPALIVE:
288 bfin_wdt_keepalive();
289 return 0;
290 case WDIOC_SETTIMEOUT: {
291 int new_timeout;
292
293 if (get_user(new_timeout, p))
294 return -EFAULT;
295 if (bfin_wdt_set_timeout(new_timeout))
296 return -EINVAL;
297 }
298 /* Fall */
299 case WDIOC_GETTIMEOUT:
300 return put_user(timeout, p);
301 default:
302 return -ENOTTY;
314 } 303 }
315} 304}
316 305
@@ -323,8 +312,8 @@ static int bfin_wdt_ioctl(struct inode *inode, struct file *file,
323 * Handles specific events, such as turning off the watchdog during a 312 * Handles specific events, such as turning off the watchdog during a
324 * shutdown event. 313 * shutdown event.
325 */ 314 */
326static int bfin_wdt_notify_sys(struct notifier_block *this, unsigned long code, 315static int bfin_wdt_notify_sys(struct notifier_block *this,
327 void *unused) 316 unsigned long code, void *unused)
328{ 317{
329 stampit(); 318 stampit();
330 319
@@ -379,12 +368,12 @@ static int bfin_wdt_resume(struct platform_device *pdev)
379#endif 368#endif
380 369
381static const struct file_operations bfin_wdt_fops = { 370static const struct file_operations bfin_wdt_fops = {
382 .owner = THIS_MODULE, 371 .owner = THIS_MODULE,
383 .llseek = no_llseek, 372 .llseek = no_llseek,
384 .write = bfin_wdt_write, 373 .write = bfin_wdt_write,
385 .ioctl = bfin_wdt_ioctl, 374 .unlocked_ioctl = bfin_wdt_ioctl,
386 .open = bfin_wdt_open, 375 .open = bfin_wdt_open,
387 .release = bfin_wdt_release, 376 .release = bfin_wdt_release,
388}; 377};
389 378
390static struct miscdevice bfin_wdt_miscdev = { 379static struct miscdevice bfin_wdt_miscdev = {
@@ -396,8 +385,8 @@ static struct miscdevice bfin_wdt_miscdev = {
396static struct watchdog_info bfin_wdt_info = { 385static struct watchdog_info bfin_wdt_info = {
397 .identity = "Blackfin Watchdog", 386 .identity = "Blackfin Watchdog",
398 .options = WDIOF_SETTIMEOUT | 387 .options = WDIOF_SETTIMEOUT |
399 WDIOF_KEEPALIVEPING | 388 WDIOF_KEEPALIVEPING |
400 WDIOF_MAGICCLOSE, 389 WDIOF_MAGICCLOSE,
401}; 390};
402 391
403static struct notifier_block bfin_wdt_notifier = { 392static struct notifier_block bfin_wdt_notifier = {
@@ -416,14 +405,16 @@ static int __devinit bfin_wdt_probe(struct platform_device *pdev)
416 405
417 ret = register_reboot_notifier(&bfin_wdt_notifier); 406 ret = register_reboot_notifier(&bfin_wdt_notifier);
418 if (ret) { 407 if (ret) {
419 pr_devinit(KERN_ERR PFX "cannot register reboot notifier (err=%d)\n", ret); 408 pr_devinit(KERN_ERR PFX
409 "cannot register reboot notifier (err=%d)\n", ret);
420 return ret; 410 return ret;
421 } 411 }
422 412
423 ret = misc_register(&bfin_wdt_miscdev); 413 ret = misc_register(&bfin_wdt_miscdev);
424 if (ret) { 414 if (ret) {
425 pr_devinit(KERN_ERR PFX "cannot register miscdev on minor=%d (err=%d)\n", 415 pr_devinit(KERN_ERR PFX
426 WATCHDOG_MINOR, ret); 416 "cannot register miscdev on minor=%d (err=%d)\n",
417 WATCHDOG_MINOR, ret);
427 unregister_reboot_notifier(&bfin_wdt_notifier); 418 unregister_reboot_notifier(&bfin_wdt_notifier);
428 return ret; 419 return ret;
429 } 420 }
@@ -516,7 +507,11 @@ MODULE_LICENSE("GPL");
516MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); 507MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
517 508
518module_param(timeout, uint, 0); 509module_param(timeout, uint, 0);
519MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds. (1<=timeout<=((2^32)/SCLK), default=" __MODULE_STRING(WATCHDOG_TIMEOUT) ")"); 510MODULE_PARM_DESC(timeout,
511 "Watchdog timeout in seconds. (1<=timeout<=((2^32)/SCLK), default="
512 __MODULE_STRING(WATCHDOG_TIMEOUT) ")");
520 513
521module_param(nowayout, int, 0); 514module_param(nowayout, int, 0);
522MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 515MODULE_PARM_DESC(nowayout,
516 "Watchdog cannot be stopped once started (default="
517 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c
index 770824458d45..c3b78a76f173 100644
--- a/drivers/watchdog/booke_wdt.c
+++ b/drivers/watchdog/booke_wdt.c
@@ -18,9 +18,9 @@
18#include <linux/miscdevice.h> 18#include <linux/miscdevice.h>
19#include <linux/notifier.h> 19#include <linux/notifier.h>
20#include <linux/watchdog.h> 20#include <linux/watchdog.h>
21#include <linux/uaccess.h>
21 22
22#include <asm/reg_booke.h> 23#include <asm/reg_booke.h>
23#include <asm/uaccess.h>
24#include <asm/system.h> 24#include <asm/system.h>
25 25
26/* If the kernel parameter wdt=1, the watchdog will be enabled at boot. 26/* If the kernel parameter wdt=1, the watchdog will be enabled at boot.
@@ -32,7 +32,7 @@
32 */ 32 */
33 33
34#ifdef CONFIG_FSL_BOOKE 34#ifdef CONFIG_FSL_BOOKE
35#define WDT_PERIOD_DEFAULT 63 /* Ex. wdt_period=28 bus=333Mhz , reset=~40sec */ 35#define WDT_PERIOD_DEFAULT 63 /* Ex. wdt_period=28 bus=333Mhz,reset=~40sec */
36#else 36#else
37#define WDT_PERIOD_DEFAULT 3 /* Refer to the PPC40x and PPC4xx manuals */ 37#define WDT_PERIOD_DEFAULT 3 /* Refer to the PPC40x and PPC4xx manuals */
38#endif /* for timing information */ 38#endif /* for timing information */
@@ -82,16 +82,15 @@ static struct watchdog_info ident = {
82 .identity = "PowerPC Book-E Watchdog", 82 .identity = "PowerPC Book-E Watchdog",
83}; 83};
84 84
85static int booke_wdt_ioctl(struct inode *inode, struct file *file, 85static long booke_wdt_ioctl(struct file *file,
86 unsigned int cmd, unsigned long arg) 86 unsigned int cmd, unsigned long arg)
87{ 87{
88 u32 tmp = 0; 88 u32 tmp = 0;
89 u32 __user *p = (u32 __user *)arg; 89 u32 __user *p = (u32 __user *)arg;
90 90
91 switch (cmd) { 91 switch (cmd) {
92 case WDIOC_GETSUPPORT: 92 case WDIOC_GETSUPPORT:
93 if (copy_to_user((struct watchdog_info __user *)arg, &ident, 93 if (copy_to_user(arg, &ident, sizeof(struct watchdog_info)))
94 sizeof(struct watchdog_info)))
95 return -EFAULT; 94 return -EFAULT;
96 case WDIOC_GETSTATUS: 95 case WDIOC_GETSTATUS:
97 return put_user(ident.options, p); 96 return put_user(ident.options, p);
@@ -100,16 +99,6 @@ static int booke_wdt_ioctl(struct inode *inode, struct file *file,
100 tmp = mfspr(SPRN_TSR) & TSR_WRS(3); 99 tmp = mfspr(SPRN_TSR) & TSR_WRS(3);
101 /* returns 1 if last reset was caused by the WDT */ 100 /* returns 1 if last reset was caused by the WDT */
102 return (tmp ? 1 : 0); 101 return (tmp ? 1 : 0);
103 case WDIOC_KEEPALIVE:
104 booke_wdt_ping();
105 return 0;
106 case WDIOC_SETTIMEOUT:
107 if (get_user(booke_wdt_period, p))
108 return -EFAULT;
109 mtspr(SPRN_TCR, (mfspr(SPRN_TCR)&~WDTP(0))|WDTP(booke_wdt_period));
110 return 0;
111 case WDIOC_GETTIMEOUT:
112 return put_user(booke_wdt_period, p);
113 case WDIOC_SETOPTIONS: 102 case WDIOC_SETOPTIONS:
114 if (get_user(tmp, p)) 103 if (get_user(tmp, p))
115 return -EINVAL; 104 return -EINVAL;
@@ -119,6 +108,17 @@ static int booke_wdt_ioctl(struct inode *inode, struct file *file,
119 } else 108 } else
120 return -EINVAL; 109 return -EINVAL;
121 return 0; 110 return 0;
111 case WDIOC_KEEPALIVE:
112 booke_wdt_ping();
113 return 0;
114 case WDIOC_SETTIMEOUT:
115 if (get_user(booke_wdt_period, p))
116 return -EFAULT;
117 mtspr(SPRN_TCR, (mfspr(SPRN_TCR) & ~WDTP(0)) |
118 WDTP(booke_wdt_period));
119 return 0;
120 case WDIOC_GETTIMEOUT:
121 return put_user(booke_wdt_period, p);
122 default: 122 default:
123 return -ENOTTY; 123 return -ENOTTY;
124 } 124 }
@@ -132,8 +132,9 @@ static int booke_wdt_open(struct inode *inode, struct file *file)
132 if (booke_wdt_enabled == 0) { 132 if (booke_wdt_enabled == 0) {
133 booke_wdt_enabled = 1; 133 booke_wdt_enabled = 1;
134 on_each_cpu(__booke_wdt_enable, NULL, 0); 134 on_each_cpu(__booke_wdt_enable, NULL, 0);
135 printk(KERN_INFO "PowerPC Book-E Watchdog Timer Enabled " 135 printk(KERN_INFO
136 "(wdt_period=%d)\n", booke_wdt_period); 136 "PowerPC Book-E Watchdog Timer Enabled (wdt_period=%d)\n",
137 booke_wdt_period);
137 } 138 }
138 spin_unlock(&booke_wdt_lock); 139 spin_unlock(&booke_wdt_lock);
139 140
@@ -144,7 +145,7 @@ static const struct file_operations booke_wdt_fops = {
144 .owner = THIS_MODULE, 145 .owner = THIS_MODULE,
145 .llseek = no_llseek, 146 .llseek = no_llseek,
146 .write = booke_wdt_write, 147 .write = booke_wdt_write,
147 .ioctl = booke_wdt_ioctl, 148 .unlocked_ioctl = booke_wdt_ioctl,
148 .open = booke_wdt_open, 149 .open = booke_wdt_open,
149}; 150};
150 151
@@ -175,8 +176,9 @@ static int __init booke_wdt_init(void)
175 176
176 spin_lock(&booke_wdt_lock); 177 spin_lock(&booke_wdt_lock);
177 if (booke_wdt_enabled == 1) { 178 if (booke_wdt_enabled == 1) {
178 printk(KERN_INFO "PowerPC Book-E Watchdog Timer Enabled " 179 printk(KERN_INFO
179 "(wdt_period=%d)\n", booke_wdt_period); 180 "PowerPC Book-E Watchdog Timer Enabled (wdt_period=%d)\n",
181 booke_wdt_period);
180 on_each_cpu(__booke_wdt_enable, NULL, 0); 182 on_each_cpu(__booke_wdt_enable, NULL, 0);
181 } 183 }
182 spin_unlock(&booke_wdt_lock); 184 spin_unlock(&booke_wdt_lock);
diff --git a/drivers/watchdog/cpu5wdt.c b/drivers/watchdog/cpu5wdt.c
index df72f90123df..71f6d7eec9a8 100644
--- a/drivers/watchdog/cpu5wdt.c
+++ b/drivers/watchdog/cpu5wdt.c
@@ -30,16 +30,16 @@
30#include <linux/timer.h> 30#include <linux/timer.h>
31#include <linux/completion.h> 31#include <linux/completion.h>
32#include <linux/jiffies.h> 32#include <linux/jiffies.h>
33#include <asm/io.h> 33#include <linux/io.h>
34#include <asm/uaccess.h> 34#include <linux/uaccess.h>
35
36#include <linux/watchdog.h> 35#include <linux/watchdog.h>
37 36
38/* adjustable parameters */ 37/* adjustable parameters */
39 38
40static int verbose = 0; 39static int verbose;
41static int port = 0x91; 40static int port = 0x91;
42static int ticks = 10000; 41static int ticks = 10000;
42static spinlock_t cpu5wdt_lock;
43 43
44#define PFX "cpu5wdt: " 44#define PFX "cpu5wdt: "
45 45
@@ -70,12 +70,13 @@ static struct {
70 70
71static void cpu5wdt_trigger(unsigned long unused) 71static void cpu5wdt_trigger(unsigned long unused)
72{ 72{
73 if ( verbose > 2 ) 73 if (verbose > 2)
74 printk(KERN_DEBUG PFX "trigger at %i ticks\n", ticks); 74 printk(KERN_DEBUG PFX "trigger at %i ticks\n", ticks);
75 75
76 if( cpu5wdt_device.running ) 76 if (cpu5wdt_device.running)
77 ticks--; 77 ticks--;
78 78
79 spin_lock(&cpu5wdt_lock);
79 /* keep watchdog alive */ 80 /* keep watchdog alive */
80 outb(1, port + CPU5WDT_TRIGGER_REG); 81 outb(1, port + CPU5WDT_TRIGGER_REG);
81 82
@@ -86,6 +87,7 @@ static void cpu5wdt_trigger(unsigned long unused)
86 /* ticks doesn't matter anyway */ 87 /* ticks doesn't matter anyway */
87 complete(&cpu5wdt_device.stop); 88 complete(&cpu5wdt_device.stop);
88 } 89 }
90 spin_unlock(&cpu5wdt_lock);
89 91
90} 92}
91 93
@@ -93,14 +95,17 @@ static void cpu5wdt_reset(void)
93{ 95{
94 ticks = cpu5wdt_device.default_ticks; 96 ticks = cpu5wdt_device.default_ticks;
95 97
96 if ( verbose ) 98 if (verbose)
97 printk(KERN_DEBUG PFX "reset (%i ticks)\n", (int) ticks); 99 printk(KERN_DEBUG PFX "reset (%i ticks)\n", (int) ticks);
98 100
99} 101}
100 102
101static void cpu5wdt_start(void) 103static void cpu5wdt_start(void)
102{ 104{
103 if ( !cpu5wdt_device.queue ) { 105 unsigned long flags;
106
107 spin_lock_irqsave(&cpu5wdt_lock, flags);
108 if (!cpu5wdt_device.queue) {
104 cpu5wdt_device.queue = 1; 109 cpu5wdt_device.queue = 1;
105 outb(0, port + CPU5WDT_TIME_A_REG); 110 outb(0, port + CPU5WDT_TIME_A_REG);
106 outb(0, port + CPU5WDT_TIME_B_REG); 111 outb(0, port + CPU5WDT_TIME_B_REG);
@@ -111,18 +116,20 @@ static void cpu5wdt_start(void)
111 } 116 }
112 /* if process dies, counter is not decremented */ 117 /* if process dies, counter is not decremented */
113 cpu5wdt_device.running++; 118 cpu5wdt_device.running++;
119 spin_unlock_irqrestore(&cpu5wdt_lock, flags);
114} 120}
115 121
116static int cpu5wdt_stop(void) 122static int cpu5wdt_stop(void)
117{ 123{
118 if ( cpu5wdt_device.running ) 124 unsigned long flags;
119 cpu5wdt_device.running = 0;
120 125
126 spin_lock_irqsave(&cpu5wdt_lock, flags);
127 if (cpu5wdt_device.running)
128 cpu5wdt_device.running = 0;
121 ticks = cpu5wdt_device.default_ticks; 129 ticks = cpu5wdt_device.default_ticks;
122 130 spin_unlock_irqrestore(&cpu5wdt_lock, flags);
123 if ( verbose ) 131 if (verbose)
124 printk(KERN_CRIT PFX "stop not possible\n"); 132 printk(KERN_CRIT PFX "stop not possible\n");
125
126 return -EIO; 133 return -EIO;
127} 134}
128 135
@@ -130,9 +137,8 @@ static int cpu5wdt_stop(void)
130 137
131static int cpu5wdt_open(struct inode *inode, struct file *file) 138static int cpu5wdt_open(struct inode *inode, struct file *file)
132{ 139{
133 if ( test_and_set_bit(0, &cpu5wdt_device.inuse) ) 140 if (test_and_set_bit(0, &cpu5wdt_device.inuse))
134 return -EBUSY; 141 return -EBUSY;
135
136 return nonseekable_open(inode, file); 142 return nonseekable_open(inode, file);
137} 143}
138 144
@@ -142,67 +148,58 @@ static int cpu5wdt_release(struct inode *inode, struct file *file)
142 return 0; 148 return 0;
143} 149}
144 150
145static int cpu5wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) 151static long cpu5wdt_ioctl(struct file *file, unsigned int cmd,
152 unsigned long arg)
146{ 153{
147 void __user *argp = (void __user *)arg; 154 void __user *argp = (void __user *)arg;
155 int __user *p = argp;
148 unsigned int value; 156 unsigned int value;
149 static struct watchdog_info ident = 157 static struct watchdog_info ident = {
150 {
151 .options = WDIOF_CARDRESET, 158 .options = WDIOF_CARDRESET,
152 .identity = "CPU5 WDT", 159 .identity = "CPU5 WDT",
153 }; 160 };
154 161
155 switch(cmd) { 162 switch (cmd) {
156 case WDIOC_KEEPALIVE: 163 case WDIOC_GETSUPPORT:
157 cpu5wdt_reset(); 164 if (copy_to_user(argp, &ident, sizeof(ident)))
158 break; 165 return -EFAULT;
159 case WDIOC_GETSTATUS: 166 break;
160 value = inb(port + CPU5WDT_STATUS_REG); 167 case WDIOC_GETSTATUS:
161 value = (value >> 2) & 1; 168 value = inb(port + CPU5WDT_STATUS_REG);
162 if ( copy_to_user(argp, &value, sizeof(int)) ) 169 value = (value >> 2) & 1;
163 return -EFAULT; 170 return put_user(value, p);
164 break; 171 case WDIOC_GETBOOTSTATUS:
165 case WDIOC_GETBOOTSTATUS: 172 return put_user(0, p);
166 if ( copy_to_user(argp, &value, sizeof(int)) ) 173 case WDIOC_SETOPTIONS:
167 return -EFAULT; 174 if (get_user(value, p))
168 break; 175 return -EFAULT;
169 case WDIOC_GETSUPPORT: 176 if (value & WDIOS_ENABLECARD)
170 if ( copy_to_user(argp, &ident, sizeof(ident)) ) 177 cpu5wdt_start();
171 return -EFAULT; 178 if (value & WDIOS_DISABLECARD)
172 break; 179 cpu5wdt_stop();
173 case WDIOC_SETOPTIONS: 180 break;
174 if ( copy_from_user(&value, argp, sizeof(int)) ) 181 case WDIOC_KEEPALIVE:
175 return -EFAULT; 182 cpu5wdt_reset();
176 switch(value) { 183 break;
177 case WDIOS_ENABLECARD: 184 default:
178 cpu5wdt_start(); 185 return -ENOTTY;
179 break;
180 case WDIOS_DISABLECARD:
181 return cpu5wdt_stop();
182 default:
183 return -EINVAL;
184 }
185 break;
186 default:
187 return -ENOTTY;
188 } 186 }
189 return 0; 187 return 0;
190} 188}
191 189
192static ssize_t cpu5wdt_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) 190static ssize_t cpu5wdt_write(struct file *file, const char __user *buf,
191 size_t count, loff_t *ppos)
193{ 192{
194 if ( !count ) 193 if (!count)
195 return -EIO; 194 return -EIO;
196
197 cpu5wdt_reset(); 195 cpu5wdt_reset();
198
199 return count; 196 return count;
200} 197}
201 198
202static const struct file_operations cpu5wdt_fops = { 199static const struct file_operations cpu5wdt_fops = {
203 .owner = THIS_MODULE, 200 .owner = THIS_MODULE,
204 .llseek = no_llseek, 201 .llseek = no_llseek,
205 .ioctl = cpu5wdt_ioctl, 202 .unlocked_ioctl = cpu5wdt_ioctl,
206 .open = cpu5wdt_open, 203 .open = cpu5wdt_open,
207 .write = cpu5wdt_write, 204 .write = cpu5wdt_write,
208 .release = cpu5wdt_release, 205 .release = cpu5wdt_release,
@@ -221,37 +218,36 @@ static int __devinit cpu5wdt_init(void)
221 unsigned int val; 218 unsigned int val;
222 int err; 219 int err;
223 220
224 if ( verbose ) 221 if (verbose)
225 printk(KERN_DEBUG PFX "port=0x%x, verbose=%i\n", port, verbose); 222 printk(KERN_DEBUG PFX
223 "port=0x%x, verbose=%i\n", port, verbose);
226 224
227 if ( !request_region(port, CPU5WDT_EXTENT, PFX) ) { 225 init_completion(&cpu5wdt_device.stop);
226 spin_lock_init(&cpu5wdt_lock);
227 cpu5wdt_device.queue = 0;
228 setup_timer(&cpu5wdt_device.timer, cpu5wdt_trigger, 0);
229 cpu5wdt_device.default_ticks = ticks;
230
231 if (!request_region(port, CPU5WDT_EXTENT, PFX)) {
228 printk(KERN_ERR PFX "request_region failed\n"); 232 printk(KERN_ERR PFX "request_region failed\n");
229 err = -EBUSY; 233 err = -EBUSY;
230 goto no_port; 234 goto no_port;
231 } 235 }
232 236
233 if ( (err = misc_register(&cpu5wdt_misc)) < 0 ) {
234 printk(KERN_ERR PFX "misc_register failed\n");
235 goto no_misc;
236 }
237
238 /* watchdog reboot? */ 237 /* watchdog reboot? */
239 val = inb(port + CPU5WDT_STATUS_REG); 238 val = inb(port + CPU5WDT_STATUS_REG);
240 val = (val >> 2) & 1; 239 val = (val >> 2) & 1;
241 if ( !val ) 240 if (!val)
242 printk(KERN_INFO PFX "sorry, was my fault\n"); 241 printk(KERN_INFO PFX "sorry, was my fault\n");
243 242
244 init_completion(&cpu5wdt_device.stop); 243 err = misc_register(&cpu5wdt_misc);
245 cpu5wdt_device.queue = 0; 244 if (err < 0) {
246 245 printk(KERN_ERR PFX "misc_register failed\n");
247 clear_bit(0, &cpu5wdt_device.inuse); 246 goto no_misc;
248 247 }
249 setup_timer(&cpu5wdt_device.timer, cpu5wdt_trigger, 0);
250 248
251 cpu5wdt_device.default_ticks = ticks;
252 249
253 printk(KERN_INFO PFX "init success\n"); 250 printk(KERN_INFO PFX "init success\n");
254
255 return 0; 251 return 0;
256 252
257no_misc: 253no_misc:
@@ -267,7 +263,7 @@ static int __devinit cpu5wdt_init_module(void)
267 263
268static void __devexit cpu5wdt_exit(void) 264static void __devexit cpu5wdt_exit(void)
269{ 265{
270 if ( cpu5wdt_device.queue ) { 266 if (cpu5wdt_device.queue) {
271 cpu5wdt_device.queue = 0; 267 cpu5wdt_device.queue = 0;
272 wait_for_completion(&cpu5wdt_device.stop); 268 wait_for_completion(&cpu5wdt_device.stop);
273 } 269 }
diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c
index 1782c79eff06..2e1360286732 100644
--- a/drivers/watchdog/davinci_wdt.c
+++ b/drivers/watchdog/davinci_wdt.c
@@ -22,10 +22,9 @@
22#include <linux/bitops.h> 22#include <linux/bitops.h>
23#include <linux/platform_device.h> 23#include <linux/platform_device.h>
24#include <linux/spinlock.h> 24#include <linux/spinlock.h>
25 25#include <linux/uaccess.h>
26#include <asm/hardware.h> 26#include <linux/io.h>
27#include <asm/uaccess.h> 27#include <mach/hardware.h>
28#include <asm/io.h>
29 28
30#define MODULE_NAME "DAVINCI-WDT: " 29#define MODULE_NAME "DAVINCI-WDT: "
31 30
@@ -143,9 +142,8 @@ static struct watchdog_info ident = {
143 .identity = "DaVinci Watchdog", 142 .identity = "DaVinci Watchdog",
144}; 143};
145 144
146static int 145static long davinci_wdt_ioctl(struct file *file,
147davinci_wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, 146 unsigned int cmd, unsigned long arg)
148 unsigned long arg)
149{ 147{
150 int ret = -ENOTTY; 148 int ret = -ENOTTY;
151 149
@@ -160,14 +158,14 @@ davinci_wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
160 ret = put_user(0, (int *)arg); 158 ret = put_user(0, (int *)arg);
161 break; 159 break;
162 160
163 case WDIOC_GETTIMEOUT:
164 ret = put_user(heartbeat, (int *)arg);
165 break;
166
167 case WDIOC_KEEPALIVE: 161 case WDIOC_KEEPALIVE:
168 wdt_service(); 162 wdt_service();
169 ret = 0; 163 ret = 0;
170 break; 164 break;
165
166 case WDIOC_GETTIMEOUT:
167 ret = put_user(heartbeat, (int *)arg);
168 break;
171 } 169 }
172 return ret; 170 return ret;
173} 171}
@@ -184,7 +182,7 @@ static const struct file_operations davinci_wdt_fops = {
184 .owner = THIS_MODULE, 182 .owner = THIS_MODULE,
185 .llseek = no_llseek, 183 .llseek = no_llseek,
186 .write = davinci_wdt_write, 184 .write = davinci_wdt_write,
187 .ioctl = davinci_wdt_ioctl, 185 .unlocked_ioctl = davinci_wdt_ioctl,
188 .open = davinci_wdt_open, 186 .open = davinci_wdt_open,
189 .release = davinci_wdt_release, 187 .release = davinci_wdt_release,
190}; 188};
diff --git a/drivers/watchdog/ep93xx_wdt.c b/drivers/watchdog/ep93xx_wdt.c
index 0e4787a0bb87..e9f950ff86ea 100644
--- a/drivers/watchdog/ep93xx_wdt.c
+++ b/drivers/watchdog/ep93xx_wdt.c
@@ -28,9 +28,8 @@
28#include <linux/miscdevice.h> 28#include <linux/miscdevice.h>
29#include <linux/watchdog.h> 29#include <linux/watchdog.h>
30#include <linux/timer.h> 30#include <linux/timer.h>
31 31#include <linux/uaccess.h>
32#include <asm/hardware.h> 32#include <mach/hardware.h>
33#include <asm/uaccess.h>
34 33
35#define WDT_VERSION "0.3" 34#define WDT_VERSION "0.3"
36#define PFX "ep93xx_wdt: " 35#define PFX "ep93xx_wdt: "
@@ -136,9 +135,8 @@ static struct watchdog_info ident = {
136 .identity = "EP93xx Watchdog", 135 .identity = "EP93xx Watchdog",
137}; 136};
138 137
139static int 138static long ep93xx_wdt_ioctl(struct file *file,
140ep93xx_wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, 139 unsigned int cmd, unsigned long arg)
141 unsigned long arg)
142{ 140{
143 int ret = -ENOTTY; 141 int ret = -ENOTTY;
144 142
@@ -156,15 +154,15 @@ ep93xx_wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
156 ret = put_user(boot_status, (int __user *)arg); 154 ret = put_user(boot_status, (int __user *)arg);
157 break; 155 break;
158 156
159 case WDIOC_GETTIMEOUT:
160 /* actually, it is 0.250 seconds.... */
161 ret = put_user(1, (int __user *)arg);
162 break;
163
164 case WDIOC_KEEPALIVE: 157 case WDIOC_KEEPALIVE:
165 wdt_keepalive(); 158 wdt_keepalive();
166 ret = 0; 159 ret = 0;
167 break; 160 break;
161
162 case WDIOC_GETTIMEOUT:
163 /* actually, it is 0.250 seconds.... */
164 ret = put_user(1, (int __user *)arg);
165 break;
168 } 166 }
169 return ret; 167 return ret;
170} 168}
@@ -174,8 +172,8 @@ static int ep93xx_wdt_release(struct inode *inode, struct file *file)
174 if (test_bit(WDT_OK_TO_CLOSE, &wdt_status)) 172 if (test_bit(WDT_OK_TO_CLOSE, &wdt_status))
175 wdt_shutdown(); 173 wdt_shutdown();
176 else 174 else
177 printk(KERN_CRIT PFX "Device closed unexpectedly - " 175 printk(KERN_CRIT PFX
178 "timer will not stop\n"); 176 "Device closed unexpectedly - timer will not stop\n");
179 177
180 clear_bit(WDT_IN_USE, &wdt_status); 178 clear_bit(WDT_IN_USE, &wdt_status);
181 clear_bit(WDT_OK_TO_CLOSE, &wdt_status); 179 clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
@@ -186,7 +184,7 @@ static int ep93xx_wdt_release(struct inode *inode, struct file *file)
186static const struct file_operations ep93xx_wdt_fops = { 184static const struct file_operations ep93xx_wdt_fops = {
187 .owner = THIS_MODULE, 185 .owner = THIS_MODULE,
188 .write = ep93xx_wdt_write, 186 .write = ep93xx_wdt_write,
189 .ioctl = ep93xx_wdt_ioctl, 187 .unlocked_ioctl = ep93xx_wdt_ioctl,
190 .open = ep93xx_wdt_open, 188 .open = ep93xx_wdt_open,
191 .release = ep93xx_wdt_release, 189 .release = ep93xx_wdt_release,
192}; 190};
@@ -243,7 +241,9 @@ module_param(nowayout, int, 0);
243MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"); 241MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started");
244 242
245module_param(timeout, int, 0); 243module_param(timeout, int, 0);
246MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds. (1<=timeout<=3600, default=" __MODULE_STRING(WATCHDOG_TIMEOUT) ")"); 244MODULE_PARM_DESC(timeout,
245 "Watchdog timeout in seconds. (1<=timeout<=3600, default="
246 __MODULE_STRING(WATCHDOG_TIMEOUT) ")");
247 247
248MODULE_AUTHOR("Ray Lehtiniemi <rayl@mail.com>," 248MODULE_AUTHOR("Ray Lehtiniemi <rayl@mail.com>,"
249 "Alessandro Zummo <a.zummo@towertech.it>"); 249 "Alessandro Zummo <a.zummo@towertech.it>");
diff --git a/drivers/watchdog/eurotechwdt.c b/drivers/watchdog/eurotechwdt.c
index b14e9d1f164d..bbd14e34319f 100644
--- a/drivers/watchdog/eurotechwdt.c
+++ b/drivers/watchdog/eurotechwdt.c
@@ -56,14 +56,15 @@
56#include <linux/notifier.h> 56#include <linux/notifier.h>
57#include <linux/reboot.h> 57#include <linux/reboot.h>
58#include <linux/init.h> 58#include <linux/init.h>
59#include <linux/io.h>
60#include <linux/uaccess.h>
59 61
60#include <asm/io.h>
61#include <asm/uaccess.h>
62#include <asm/system.h> 62#include <asm/system.h>
63 63
64static unsigned long eurwdt_is_open; 64static unsigned long eurwdt_is_open;
65static int eurwdt_timeout; 65static int eurwdt_timeout;
66static char eur_expect_close; 66static char eur_expect_close;
67static spinlock_t eurwdt_lock;
67 68
68/* 69/*
69 * You must set these - there is no sane way to probe for this board. 70 * You must set these - there is no sane way to probe for this board.
@@ -78,7 +79,9 @@ static char *ev = "int";
78 79
79static int nowayout = WATCHDOG_NOWAYOUT; 80static int nowayout = WATCHDOG_NOWAYOUT;
80module_param(nowayout, int, 0); 81module_param(nowayout, int, 0);
81MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 82MODULE_PARM_DESC(nowayout,
83 "Watchdog cannot be stopped once started (default="
84 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
82 85
83/* 86/*
84 * Some symbolic names 87 * Some symbolic names
@@ -137,7 +140,8 @@ static void eurwdt_activate_timer(void)
137{ 140{
138 eurwdt_disable_timer(); 141 eurwdt_disable_timer();
139 eurwdt_write_reg(WDT_CTRL_REG, 0x01); /* activate the WDT */ 142 eurwdt_write_reg(WDT_CTRL_REG, 0x01); /* activate the WDT */
140 eurwdt_write_reg(WDT_OUTPIN_CFG, !strcmp("int", ev) ? WDT_EVENT_INT : WDT_EVENT_REBOOT); 143 eurwdt_write_reg(WDT_OUTPIN_CFG,
144 !strcmp("int", ev) ? WDT_EVENT_INT : WDT_EVENT_REBOOT);
141 145
142 /* Setting interrupt line */ 146 /* Setting interrupt line */
143 if (irq == 2 || irq > 15 || irq < 0) { 147 if (irq == 2 || irq > 15 || irq < 0) {
@@ -206,21 +210,21 @@ size_t count, loff_t *ppos)
206 210
207 for (i = 0; i != count; i++) { 211 for (i = 0; i != count; i++) {
208 char c; 212 char c;
209 if(get_user(c, buf+i)) 213 if (get_user(c, buf + i))
210 return -EFAULT; 214 return -EFAULT;
211 if (c == 'V') 215 if (c == 'V')
212 eur_expect_close = 42; 216 eur_expect_close = 42;
213 } 217 }
214 } 218 }
219 spin_lock(&eurwdt_lock);
215 eurwdt_ping(); /* the default timeout */ 220 eurwdt_ping(); /* the default timeout */
221 spin_unlock(&eurwdt_lock);
216 } 222 }
217
218 return count; 223 return count;
219} 224}
220 225
221/** 226/**
222 * eurwdt_ioctl: 227 * eurwdt_ioctl:
223 * @inode: inode of the device
224 * @file: file handle to the device 228 * @file: file handle to the device
225 * @cmd: watchdog command 229 * @cmd: watchdog command
226 * @arg: argument pointer 230 * @arg: argument pointer
@@ -229,13 +233,14 @@ size_t count, loff_t *ppos)
229 * according to their available features. 233 * according to their available features.
230 */ 234 */
231 235
232static int eurwdt_ioctl(struct inode *inode, struct file *file, 236static long eurwdt_ioctl(struct file *file,
233 unsigned int cmd, unsigned long arg) 237 unsigned int cmd, unsigned long arg)
234{ 238{
235 void __user *argp = (void __user *)arg; 239 void __user *argp = (void __user *)arg;
236 int __user *p = argp; 240 int __user *p = argp;
237 static struct watchdog_info ident = { 241 static struct watchdog_info ident = {
238 .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE, 242 .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT
243 | WDIOF_MAGICCLOSE,
239 .firmware_version = 1, 244 .firmware_version = 1,
240 .identity = "WDT Eurotech CPU-1220/1410", 245 .identity = "WDT Eurotech CPU-1220/1410",
241 }; 246 };
@@ -243,10 +248,7 @@ static int eurwdt_ioctl(struct inode *inode, struct file *file,
243 int time; 248 int time;
244 int options, retval = -EINVAL; 249 int options, retval = -EINVAL;
245 250
246 switch(cmd) { 251 switch (cmd) {
247 default:
248 return -ENOTTY;
249
250 case WDIOC_GETSUPPORT: 252 case WDIOC_GETSUPPORT:
251 return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0; 253 return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
252 254
@@ -254,8 +256,26 @@ static int eurwdt_ioctl(struct inode *inode, struct file *file,
254 case WDIOC_GETBOOTSTATUS: 256 case WDIOC_GETBOOTSTATUS:
255 return put_user(0, p); 257 return put_user(0, p);
256 258
259 case WDIOC_SETOPTIONS:
260 if (get_user(options, p))
261 return -EFAULT;
262 spin_lock(&eurwdt_lock);
263 if (options & WDIOS_DISABLECARD) {
264 eurwdt_disable_timer();
265 retval = 0;
266 }
267 if (options & WDIOS_ENABLECARD) {
268 eurwdt_activate_timer();
269 eurwdt_ping();
270 retval = 0;
271 }
272 spin_unlock(&eurwdt_lock);
273 return retval;
274
257 case WDIOC_KEEPALIVE: 275 case WDIOC_KEEPALIVE:
276 spin_lock(&eurwdt_lock);
258 eurwdt_ping(); 277 eurwdt_ping();
278 spin_unlock(&eurwdt_lock);
259 return 0; 279 return 0;
260 280
261 case WDIOC_SETTIMEOUT: 281 case WDIOC_SETTIMEOUT:
@@ -266,26 +286,17 @@ static int eurwdt_ioctl(struct inode *inode, struct file *file,
266 if (time < 0 || time > 255) 286 if (time < 0 || time > 255)
267 return -EINVAL; 287 return -EINVAL;
268 288
289 spin_lock(&eurwdt_lock);
269 eurwdt_timeout = time; 290 eurwdt_timeout = time;
270 eurwdt_set_timeout(time); 291 eurwdt_set_timeout(time);
292 spin_unlock(&eurwdt_lock);
271 /* Fall */ 293 /* Fall */
272 294
273 case WDIOC_GETTIMEOUT: 295 case WDIOC_GETTIMEOUT:
274 return put_user(eurwdt_timeout, p); 296 return put_user(eurwdt_timeout, p);
275 297
276 case WDIOC_SETOPTIONS: 298 default:
277 if (get_user(options, p)) 299 return -ENOTTY;
278 return -EFAULT;
279 if (options & WDIOS_DISABLECARD) {
280 eurwdt_disable_timer();
281 retval = 0;
282 }
283 if (options & WDIOS_ENABLECARD) {
284 eurwdt_activate_timer();
285 eurwdt_ping();
286 retval = 0;
287 }
288 return retval;
289 } 300 }
290} 301}
291 302
@@ -322,10 +333,11 @@ static int eurwdt_open(struct inode *inode, struct file *file)
322 333
323static int eurwdt_release(struct inode *inode, struct file *file) 334static int eurwdt_release(struct inode *inode, struct file *file)
324{ 335{
325 if (eur_expect_close == 42) { 336 if (eur_expect_close == 42)
326 eurwdt_disable_timer(); 337 eurwdt_disable_timer();
327 } else { 338 else {
328 printk(KERN_CRIT "eurwdt: Unexpected close, not stopping watchdog!\n"); 339 printk(KERN_CRIT
340 "eurwdt: Unexpected close, not stopping watchdog!\n");
329 eurwdt_ping(); 341 eurwdt_ping();
330 } 342 }
331 clear_bit(0, &eurwdt_is_open); 343 clear_bit(0, &eurwdt_is_open);
@@ -348,10 +360,8 @@ static int eurwdt_release(struct inode *inode, struct file *file)
348static int eurwdt_notify_sys(struct notifier_block *this, unsigned long code, 360static int eurwdt_notify_sys(struct notifier_block *this, unsigned long code,
349 void *unused) 361 void *unused)
350{ 362{
351 if (code == SYS_DOWN || code == SYS_HALT) { 363 if (code == SYS_DOWN || code == SYS_HALT)
352 /* Turn the card off */ 364 eurwdt_disable_timer(); /* Turn the card off */
353 eurwdt_disable_timer();
354 }
355 365
356 return NOTIFY_DONE; 366 return NOTIFY_DONE;
357} 367}
@@ -362,11 +372,11 @@ static int eurwdt_notify_sys(struct notifier_block *this, unsigned long code,
362 372
363 373
364static const struct file_operations eurwdt_fops = { 374static const struct file_operations eurwdt_fops = {
365 .owner = THIS_MODULE, 375 .owner = THIS_MODULE,
366 .llseek = no_llseek, 376 .llseek = no_llseek,
367 .write = eurwdt_write, 377 .write = eurwdt_write,
368 .ioctl = eurwdt_ioctl, 378 .unlocked_ioctl = eurwdt_ioctl,
369 .open = eurwdt_open, 379 .open = eurwdt_open,
370 .release = eurwdt_release, 380 .release = eurwdt_release,
371}; 381};
372 382
@@ -419,7 +429,7 @@ static int __init eurwdt_init(void)
419 int ret; 429 int ret;
420 430
421 ret = request_irq(irq, eurwdt_interrupt, IRQF_DISABLED, "eurwdt", NULL); 431 ret = request_irq(irq, eurwdt_interrupt, IRQF_DISABLED, "eurwdt", NULL);
422 if(ret) { 432 if (ret) {
423 printk(KERN_ERR "eurwdt: IRQ %d is not free.\n", irq); 433 printk(KERN_ERR "eurwdt: IRQ %d is not free.\n", irq);
424 goto out; 434 goto out;
425 } 435 }
@@ -432,10 +442,13 @@ static int __init eurwdt_init(void)
432 442
433 ret = register_reboot_notifier(&eurwdt_notifier); 443 ret = register_reboot_notifier(&eurwdt_notifier);
434 if (ret) { 444 if (ret) {
435 printk(KERN_ERR "eurwdt: can't register reboot notifier (err=%d)\n", ret); 445 printk(KERN_ERR
446 "eurwdt: can't register reboot notifier (err=%d)\n", ret);
436 goto outreg; 447 goto outreg;
437 } 448 }
438 449
450 spin_lock_init(&eurwdt_lock);
451
439 ret = misc_register(&eurwdt_miscdev); 452 ret = misc_register(&eurwdt_miscdev);
440 if (ret) { 453 if (ret) {
441 printk(KERN_ERR "eurwdt: can't misc_register on minor=%d\n", 454 printk(KERN_ERR "eurwdt: can't misc_register on minor=%d\n",
diff --git a/drivers/watchdog/geodewdt.c b/drivers/watchdog/geodewdt.c
index 30d09cbbad94..614a5c7017b6 100644
--- a/drivers/watchdog/geodewdt.c
+++ b/drivers/watchdog/geodewdt.c
@@ -17,8 +17,8 @@
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/platform_device.h> 18#include <linux/platform_device.h>
19#include <linux/reboot.h> 19#include <linux/reboot.h>
20#include <linux/uaccess.h>
20 21
21#include <asm/uaccess.h>
22#include <asm/geode.h> 22#include <asm/geode.h>
23 23
24#define GEODEWDT_HZ 500 24#define GEODEWDT_HZ 500
@@ -77,27 +77,24 @@ static int geodewdt_set_heartbeat(int val)
77 return 0; 77 return 0;
78} 78}
79 79
80static int 80static int geodewdt_open(struct inode *inode, struct file *file)
81geodewdt_open(struct inode *inode, struct file *file)
82{ 81{
83 if (test_and_set_bit(WDT_FLAGS_OPEN, &wdt_flags)) 82 if (test_and_set_bit(WDT_FLAGS_OPEN, &wdt_flags))
84 return -EBUSY; 83 return -EBUSY;
85 84
86 if (!test_and_clear_bit(WDT_FLAGS_ORPHAN, &wdt_flags)) 85 if (!test_and_clear_bit(WDT_FLAGS_ORPHAN, &wdt_flags))
87 __module_get(THIS_MODULE); 86 __module_get(THIS_MODULE);
88 87
89 geodewdt_ping(); 88 geodewdt_ping();
90 return nonseekable_open(inode, file); 89 return nonseekable_open(inode, file);
91} 90}
92 91
93static int 92static int geodewdt_release(struct inode *inode, struct file *file)
94geodewdt_release(struct inode *inode, struct file *file)
95{ 93{
96 if (safe_close) { 94 if (safe_close) {
97 geodewdt_disable(); 95 geodewdt_disable();
98 module_put(THIS_MODULE); 96 module_put(THIS_MODULE);
99 } 97 } else {
100 else {
101 printk(KERN_CRIT "Unexpected close - watchdog is not stopping.\n"); 98 printk(KERN_CRIT "Unexpected close - watchdog is not stopping.\n");
102 geodewdt_ping(); 99 geodewdt_ping();
103 100
@@ -109,11 +106,10 @@ geodewdt_release(struct inode *inode, struct file *file)
109 return 0; 106 return 0;
110} 107}
111 108
112static ssize_t 109static ssize_t geodewdt_write(struct file *file, const char __user *data,
113geodewdt_write(struct file *file, const char __user *data, size_t len, 110 size_t len, loff_t *ppos)
114 loff_t *ppos)
115{ 111{
116 if(len) { 112 if (len) {
117 if (!nowayout) { 113 if (!nowayout) {
118 size_t i; 114 size_t i;
119 safe_close = 0; 115 safe_close = 0;
@@ -134,9 +130,8 @@ geodewdt_write(struct file *file, const char __user *data, size_t len,
134 return len; 130 return len;
135} 131}
136 132
137static int 133static int geodewdt_ioctl(struct inode *inode, struct file *file,
138geodewdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, 134 unsigned int cmd, unsigned long arg)
139 unsigned long arg)
140{ 135{
141 void __user *argp = (void __user *)arg; 136 void __user *argp = (void __user *)arg;
142 int __user *p = argp; 137 int __user *p = argp;
@@ -147,9 +142,9 @@ geodewdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
147 | WDIOF_MAGICCLOSE, 142 | WDIOF_MAGICCLOSE,
148 .firmware_version = 1, 143 .firmware_version = 1,
149 .identity = WATCHDOG_NAME, 144 .identity = WATCHDOG_NAME,
150 }; 145 };
151 146
152 switch(cmd) { 147 switch (cmd) {
153 case WDIOC_GETSUPPORT: 148 case WDIOC_GETSUPPORT:
154 return copy_to_user(argp, &ident, 149 return copy_to_user(argp, &ident,
155 sizeof(ident)) ? -EFAULT : 0; 150 sizeof(ident)) ? -EFAULT : 0;
@@ -159,22 +154,6 @@ geodewdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
159 case WDIOC_GETBOOTSTATUS: 154 case WDIOC_GETBOOTSTATUS:
160 return put_user(0, p); 155 return put_user(0, p);
161 156
162 case WDIOC_KEEPALIVE:
163 geodewdt_ping();
164 return 0;
165
166 case WDIOC_SETTIMEOUT:
167 if (get_user(interval, p))
168 return -EFAULT;
169
170 if (geodewdt_set_heartbeat(interval))
171 return -EINVAL;
172
173/* Fall through */
174
175 case WDIOC_GETTIMEOUT:
176 return put_user(timeout, p);
177
178 case WDIOC_SETOPTIONS: 157 case WDIOC_SETOPTIONS:
179 { 158 {
180 int options, ret = -EINVAL; 159 int options, ret = -EINVAL;
@@ -194,6 +173,20 @@ geodewdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
194 173
195 return ret; 174 return ret;
196 } 175 }
176 case WDIOC_KEEPALIVE:
177 geodewdt_ping();
178 return 0;
179
180 case WDIOC_SETTIMEOUT:
181 if (get_user(interval, p))
182 return -EFAULT;
183
184 if (geodewdt_set_heartbeat(interval))
185 return -EINVAL;
186 /* Fall through */
187 case WDIOC_GETTIMEOUT:
188 return put_user(timeout, p);
189
197 default: 190 default:
198 return -ENOTTY; 191 return -ENOTTY;
199 } 192 }
@@ -202,22 +195,21 @@ geodewdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
202} 195}
203 196
204static const struct file_operations geodewdt_fops = { 197static const struct file_operations geodewdt_fops = {
205 .owner = THIS_MODULE, 198 .owner = THIS_MODULE,
206 .llseek = no_llseek, 199 .llseek = no_llseek,
207 .write = geodewdt_write, 200 .write = geodewdt_write,
208 .ioctl = geodewdt_ioctl, 201 .ioctl = geodewdt_ioctl,
209 .open = geodewdt_open, 202 .open = geodewdt_open,
210 .release = geodewdt_release, 203 .release = geodewdt_release,
211}; 204};
212 205
213static struct miscdevice geodewdt_miscdev = { 206static struct miscdevice geodewdt_miscdev = {
214 .minor = WATCHDOG_MINOR, 207 .minor = WATCHDOG_MINOR,
215 .name = "watchdog", 208 .name = "watchdog",
216 .fops = &geodewdt_fops 209 .fops = &geodewdt_fops,
217}; 210};
218 211
219static int __devinit 212static int __devinit geodewdt_probe(struct platform_device *dev)
220geodewdt_probe(struct platform_device *dev)
221{ 213{
222 int ret, timer; 214 int ret, timer;
223 215
@@ -248,15 +240,13 @@ geodewdt_probe(struct platform_device *dev)
248 return ret; 240 return ret;
249} 241}
250 242
251static int __devexit 243static int __devexit geodewdt_remove(struct platform_device *dev)
252geodewdt_remove(struct platform_device *dev)
253{ 244{
254 misc_deregister(&geodewdt_miscdev); 245 misc_deregister(&geodewdt_miscdev);
255 return 0; 246 return 0;
256} 247}
257 248
258static void 249static void geodewdt_shutdown(struct platform_device *dev)
259geodewdt_shutdown(struct platform_device *dev)
260{ 250{
261 geodewdt_disable(); 251 geodewdt_disable();
262} 252}
@@ -271,8 +261,7 @@ static struct platform_driver geodewdt_driver = {
271 }, 261 },
272}; 262};
273 263
274static int __init 264static int __init geodewdt_init(void)
275geodewdt_init(void)
276{ 265{
277 int ret; 266 int ret;
278 267
@@ -292,8 +281,7 @@ err:
292 return ret; 281 return ret;
293} 282}
294 283
295static void __exit 284static void __exit geodewdt_exit(void)
296geodewdt_exit(void)
297{ 285{
298 platform_device_unregister(geodewdt_platform_device); 286 platform_device_unregister(geodewdt_platform_device);
299 platform_driver_unregister(&geodewdt_driver); 287 platform_driver_unregister(&geodewdt_driver);
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index eaa3f2a79ff5..d039d5f2fd1c 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -39,9 +39,7 @@
39#include <linux/string.h> 39#include <linux/string.h>
40#include <linux/bootmem.h> 40#include <linux/bootmem.h>
41#include <linux/slab.h> 41#include <linux/slab.h>
42#include <asm/dmi.h>
43#include <asm/desc.h> 42#include <asm/desc.h>
44#include <asm/kdebug.h>
45 43
46#define PCI_BIOS32_SD_VALUE 0x5F32335F /* "_32_" */ 44#define PCI_BIOS32_SD_VALUE 0x5F32335F /* "_32_" */
47#define CRU_BIOS_SIGNATURE_VALUE 0x55524324 45#define CRU_BIOS_SIGNATURE_VALUE 0x55524324
@@ -407,7 +405,7 @@ static int __devinit detect_cru_service(void)
407 dmi_walk(dmi_find_cru); 405 dmi_walk(dmi_find_cru);
408 406
409 /* if cru_rom_addr has been set then we found a CRU service */ 407 /* if cru_rom_addr has been set then we found a CRU service */
410 return ((cru_rom_addr != NULL)? 0: -ENODEV); 408 return ((cru_rom_addr != NULL) ? 0: -ENODEV);
411} 409}
412 410
413/* ------------------------------------------------------------------------- */ 411/* ------------------------------------------------------------------------- */
@@ -420,7 +418,7 @@ static int __devinit detect_cru_service(void)
420static int hpwdt_pretimeout(struct notifier_block *nb, unsigned long ulReason, 418static int hpwdt_pretimeout(struct notifier_block *nb, unsigned long ulReason,
421 void *data) 419 void *data)
422{ 420{
423 static unsigned long rom_pl; 421 unsigned long rom_pl;
424 static int die_nmi_called; 422 static int die_nmi_called;
425 423
426 if (ulReason != DIE_NMI && ulReason != DIE_NMI_IPI) 424 if (ulReason != DIE_NMI && ulReason != DIE_NMI_IPI)
@@ -535,7 +533,7 @@ static ssize_t hpwdt_write(struct file *file, const char __user *data,
535 /* scan to see whether or not we got the magic char. */ 533 /* scan to see whether or not we got the magic char. */
536 for (i = 0; i != len; i++) { 534 for (i = 0; i != len; i++) {
537 char c; 535 char c;
538 if (get_user(c, data+i)) 536 if (get_user(c, data + i))
539 return -EFAULT; 537 return -EFAULT;
540 if (c == 'V') 538 if (c == 'V')
541 expect_release = 42; 539 expect_release = 42;
diff --git a/drivers/watchdog/i6300esb.c b/drivers/watchdog/i6300esb.c
index ca44fd9b19bb..c13383f7fcb9 100644
--- a/drivers/watchdog/i6300esb.c
+++ b/drivers/watchdog/i6300esb.c
@@ -9,18 +9,18 @@
9 * as published by the Free Software Foundation; either version 9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 * 11 *
12 * based on i810-tco.c which is in turn based on softdog.c 12 * based on i810-tco.c which is in turn based on softdog.c
13 * 13 *
14 * The timer is implemented in the following I/O controller hubs: 14 * The timer is implemented in the following I/O controller hubs:
15 * (See the intel documentation on http://developer.intel.com.) 15 * (See the intel documentation on http://developer.intel.com.)
16 * 6300ESB chip : document number 300641-003 16 * 6300ESB chip : document number 300641-003
17 * 17 *
18 * 2004YYZZ Ross Biro 18 * 2004YYZZ Ross Biro
19 * Initial version 0.01 19 * Initial version 0.01
20 * 2004YYZZ Ross Biro 20 * 2004YYZZ Ross Biro
21 * Version 0.02 21 * Version 0.02
22 * 20050210 David Härdeman <david@2gen.com> 22 * 20050210 David Härdeman <david@2gen.com>
23 * Ported driver to kernel 2.6 23 * Ported driver to kernel 2.6
24 */ 24 */
25 25
26/* 26/*
@@ -38,9 +38,8 @@
38#include <linux/init.h> 38#include <linux/init.h>
39#include <linux/pci.h> 39#include <linux/pci.h>
40#include <linux/ioport.h> 40#include <linux/ioport.h>
41 41#include <linux/uaccess.h>
42#include <asm/uaccess.h> 42#include <linux/io.h>
43#include <asm/io.h>
44 43
45/* Module and version information */ 44/* Module and version information */
46#define ESB_VERSION "0.03" 45#define ESB_VERSION "0.03"
@@ -59,17 +58,17 @@
59#define ESB_RELOAD_REG BASEADDR + 0x0c /* Reload register */ 58#define ESB_RELOAD_REG BASEADDR + 0x0c /* Reload register */
60 59
61/* Lock register bits */ 60/* Lock register bits */
62#define ESB_WDT_FUNC ( 0x01 << 2 ) /* Watchdog functionality */ 61#define ESB_WDT_FUNC (0x01 << 2) /* Watchdog functionality */
63#define ESB_WDT_ENABLE ( 0x01 << 1 ) /* Enable WDT */ 62#define ESB_WDT_ENABLE (0x01 << 1) /* Enable WDT */
64#define ESB_WDT_LOCK ( 0x01 << 0 ) /* Lock (nowayout) */ 63#define ESB_WDT_LOCK (0x01 << 0) /* Lock (nowayout) */
65 64
66/* Config register bits */ 65/* Config register bits */
67#define ESB_WDT_REBOOT ( 0x01 << 5 ) /* Enable reboot on timeout */ 66#define ESB_WDT_REBOOT (0x01 << 5) /* Enable reboot on timeout */
68#define ESB_WDT_FREQ ( 0x01 << 2 ) /* Decrement frequency */ 67#define ESB_WDT_FREQ (0x01 << 2) /* Decrement frequency */
69#define ESB_WDT_INTTYPE ( 0x11 << 0 ) /* Interrupt type on timer1 timeout */ 68#define ESB_WDT_INTTYPE (0x11 << 0) /* Interrupt type on timer1 timeout */
70 69
71/* Reload register bits */ 70/* Reload register bits */
72#define ESB_WDT_RELOAD ( 0x01 << 8 ) /* prevent timeout */ 71#define ESB_WDT_RELOAD (0x01 << 8) /* prevent timeout */
73 72
74/* Magic constants */ 73/* Magic constants */
75#define ESB_UNLOCK1 0x80 /* Step 1 to unlock reset registers */ 74#define ESB_UNLOCK1 0x80 /* Step 1 to unlock reset registers */
@@ -84,14 +83,20 @@ static unsigned short triggered; /* The status of the watchdog upon boot */
84static char esb_expect_close; 83static char esb_expect_close;
85 84
86/* module parameters */ 85/* module parameters */
87#define WATCHDOG_HEARTBEAT 30 /* 30 sec default heartbeat (1<heartbeat<2*1023) */ 86/* 30 sec default heartbeat (1 < heartbeat < 2*1023) */
87#define WATCHDOG_HEARTBEAT 30
88static int heartbeat = WATCHDOG_HEARTBEAT; /* in seconds */ 88static int heartbeat = WATCHDOG_HEARTBEAT; /* in seconds */
89
89module_param(heartbeat, int, 0); 90module_param(heartbeat, int, 0);
90MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. (1<heartbeat<2046, default=" __MODULE_STRING(WATCHDOG_HEARTBEAT) ")"); 91MODULE_PARM_DESC(heartbeat,
92 "Watchdog heartbeat in seconds. (1<heartbeat<2046, default="
93 __MODULE_STRING(WATCHDOG_HEARTBEAT) ")");
91 94
92static int nowayout = WATCHDOG_NOWAYOUT; 95static int nowayout = WATCHDOG_NOWAYOUT;
93module_param(nowayout, int, 0); 96module_param(nowayout, int, 0);
94MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 97MODULE_PARM_DESC(nowayout,
98 "Watchdog cannot be stopped once started (default="
99 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
95 100
96/* 101/*
97 * Some i6300ESB specific functions 102 * Some i6300ESB specific functions
@@ -103,9 +108,10 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" _
103 * reload register. After this the appropriate registers can be written 108 * reload register. After this the appropriate registers can be written
104 * to once before they need to be unlocked again. 109 * to once before they need to be unlocked again.
105 */ 110 */
106static inline void esb_unlock_registers(void) { 111static inline void esb_unlock_registers(void)
107 writeb(ESB_UNLOCK1, ESB_RELOAD_REG); 112{
108 writeb(ESB_UNLOCK2, ESB_RELOAD_REG); 113 writeb(ESB_UNLOCK1, ESB_RELOAD_REG);
114 writeb(ESB_UNLOCK2, ESB_RELOAD_REG);
109} 115}
110 116
111static void esb_timer_start(void) 117static void esb_timer_start(void)
@@ -114,8 +120,7 @@ static void esb_timer_start(void)
114 120
115 /* Enable or Enable + Lock? */ 121 /* Enable or Enable + Lock? */
116 val = 0x02 | (nowayout ? 0x01 : 0x00); 122 val = 0x02 | (nowayout ? 0x01 : 0x00);
117 123 pci_write_config_byte(esb_pci, ESB_LOCK_REG, val);
118 pci_write_config_byte(esb_pci, ESB_LOCK_REG, val);
119} 124}
120 125
121static int esb_timer_stop(void) 126static int esb_timer_stop(void)
@@ -140,7 +145,7 @@ static void esb_timer_keepalive(void)
140 spin_lock(&esb_lock); 145 spin_lock(&esb_lock);
141 esb_unlock_registers(); 146 esb_unlock_registers();
142 writew(ESB_WDT_RELOAD, ESB_RELOAD_REG); 147 writew(ESB_WDT_RELOAD, ESB_RELOAD_REG);
143 /* FIXME: Do we need to flush anything here? */ 148 /* FIXME: Do we need to flush anything here? */
144 spin_unlock(&esb_lock); 149 spin_unlock(&esb_lock);
145} 150}
146 151
@@ -165,9 +170,9 @@ static int esb_timer_set_heartbeat(int time)
165 170
166 /* Write timer 2 */ 171 /* Write timer 2 */
167 esb_unlock_registers(); 172 esb_unlock_registers();
168 writel(val, ESB_TIMER2_REG); 173 writel(val, ESB_TIMER2_REG);
169 174
170 /* Reload */ 175 /* Reload */
171 esb_unlock_registers(); 176 esb_unlock_registers();
172 writew(ESB_WDT_RELOAD, ESB_RELOAD_REG); 177 writew(ESB_WDT_RELOAD, ESB_RELOAD_REG);
173 178
@@ -179,54 +184,55 @@ static int esb_timer_set_heartbeat(int time)
179 return 0; 184 return 0;
180} 185}
181 186
182static int esb_timer_read (void) 187static int esb_timer_read(void)
183{ 188{
184 u32 count; 189 u32 count;
185 190
186 /* This isn't documented, and doesn't take into 191 /* This isn't documented, and doesn't take into
187 * acount which stage is running, but it looks 192 * acount which stage is running, but it looks
188 * like a 20 bit count down, so we might as well report it. 193 * like a 20 bit count down, so we might as well report it.
189 */ 194 */
190 pci_read_config_dword(esb_pci, 0x64, &count); 195 pci_read_config_dword(esb_pci, 0x64, &count);
191 return (int)count; 196 return (int)count;
192} 197}
193 198
194/* 199/*
195 * /dev/watchdog handling 200 * /dev/watchdog handling
196 */ 201 */
197 202
198static int esb_open (struct inode *inode, struct file *file) 203static int esb_open(struct inode *inode, struct file *file)
199{ 204{
200 /* /dev/watchdog can only be opened once */ 205 /* /dev/watchdog can only be opened once */
201 if (test_and_set_bit(0, &timer_alive)) 206 if (test_and_set_bit(0, &timer_alive))
202 return -EBUSY; 207 return -EBUSY;
203 208
204 /* Reload and activate timer */ 209 /* Reload and activate timer */
205 esb_timer_keepalive (); 210 esb_timer_keepalive();
206 esb_timer_start (); 211 esb_timer_start();
207 212
208 return nonseekable_open(inode, file); 213 return nonseekable_open(inode, file);
209} 214}
210 215
211static int esb_release (struct inode *inode, struct file *file) 216static int esb_release(struct inode *inode, struct file *file)
212{ 217{
213 /* Shut off the timer. */ 218 /* Shut off the timer. */
214 if (esb_expect_close == 42) { 219 if (esb_expect_close == 42)
215 esb_timer_stop (); 220 esb_timer_stop();
216 } else { 221 else {
217 printk(KERN_CRIT PFX "Unexpected close, not stopping watchdog!\n"); 222 printk(KERN_CRIT PFX
218 esb_timer_keepalive (); 223 "Unexpected close, not stopping watchdog!\n");
219 } 224 esb_timer_keepalive();
220 clear_bit(0, &timer_alive); 225 }
221 esb_expect_close = 0; 226 clear_bit(0, &timer_alive);
222 return 0; 227 esb_expect_close = 0;
228 return 0;
223} 229}
224 230
225static ssize_t esb_write (struct file *file, const char __user *data, 231static ssize_t esb_write(struct file *file, const char __user *data,
226 size_t len, loff_t * ppos) 232 size_t len, loff_t *ppos)
227{ 233{
228 /* See if we got the magic character 'V' and reload the timer */ 234 /* See if we got the magic character 'V' and reload the timer */
229 if (len) { 235 if (len) {
230 if (!nowayout) { 236 if (!nowayout) {
231 size_t i; 237 size_t i;
232 238
@@ -237,7 +243,7 @@ static ssize_t esb_write (struct file *file, const char __user *data,
237 /* scan to see whether or not we got the magic character */ 243 /* scan to see whether or not we got the magic character */
238 for (i = 0; i != len; i++) { 244 for (i = 0; i != len; i++) {
239 char c; 245 char c;
240 if(get_user(c, data+i)) 246 if (get_user(c, data + i))
241 return -EFAULT; 247 return -EFAULT;
242 if (c == 'V') 248 if (c == 'V')
243 esb_expect_close = 42; 249 esb_expect_close = 42;
@@ -245,92 +251,84 @@ static ssize_t esb_write (struct file *file, const char __user *data,
245 } 251 }
246 252
247 /* someone wrote to us, we should reload the timer */ 253 /* someone wrote to us, we should reload the timer */
248 esb_timer_keepalive (); 254 esb_timer_keepalive();
249 } 255 }
250 return len; 256 return len;
251} 257}
252 258
253static int esb_ioctl (struct inode *inode, struct file *file, 259static long esb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
254 unsigned int cmd, unsigned long arg)
255{ 260{
256 int new_options, retval = -EINVAL; 261 int new_options, retval = -EINVAL;
257 int new_heartbeat; 262 int new_heartbeat;
258 void __user *argp = (void __user *)arg; 263 void __user *argp = (void __user *)arg;
259 int __user *p = argp; 264 int __user *p = argp;
260 static struct watchdog_info ident = { 265 static struct watchdog_info ident = {
261 .options = WDIOF_SETTIMEOUT | 266 .options = WDIOF_SETTIMEOUT |
262 WDIOF_KEEPALIVEPING | 267 WDIOF_KEEPALIVEPING |
263 WDIOF_MAGICCLOSE, 268 WDIOF_MAGICCLOSE,
264 .firmware_version = 0, 269 .firmware_version = 0,
265 .identity = ESB_MODULE_NAME, 270 .identity = ESB_MODULE_NAME,
266 }; 271 };
267 272
268 switch (cmd) { 273 switch (cmd) {
269 case WDIOC_GETSUPPORT: 274 case WDIOC_GETSUPPORT:
270 return copy_to_user(argp, &ident, 275 return copy_to_user(argp, &ident,
271 sizeof (ident)) ? -EFAULT : 0; 276 sizeof(ident)) ? -EFAULT : 0;
272
273 case WDIOC_GETSTATUS:
274 return put_user (esb_timer_read(), p);
275
276 case WDIOC_GETBOOTSTATUS:
277 return put_user (triggered, p);
278 277
279 case WDIOC_KEEPALIVE: 278 case WDIOC_GETSTATUS:
280 esb_timer_keepalive (); 279 return put_user(esb_timer_read(), p);
281 return 0;
282 280
283 case WDIOC_SETOPTIONS: 281 case WDIOC_GETBOOTSTATUS:
284 { 282 return put_user(triggered, p);
285 if (get_user (new_options, p))
286 return -EFAULT;
287 283
288 if (new_options & WDIOS_DISABLECARD) { 284 case WDIOC_SETOPTIONS:
289 esb_timer_stop (); 285 {
290 retval = 0; 286 if (get_user(new_options, p))
291 } 287 return -EFAULT;
292 288
293 if (new_options & WDIOS_ENABLECARD) { 289 if (new_options & WDIOS_DISABLECARD) {
294 esb_timer_keepalive (); 290 esb_timer_stop();
295 esb_timer_start (); 291 retval = 0;
296 retval = 0; 292 }
297 }
298
299 return retval;
300 }
301
302 case WDIOC_SETTIMEOUT:
303 {
304 if (get_user(new_heartbeat, p))
305 return -EFAULT;
306
307 if (esb_timer_set_heartbeat(new_heartbeat))
308 return -EINVAL;
309
310 esb_timer_keepalive ();
311 /* Fall */
312 }
313
314 case WDIOC_GETTIMEOUT:
315 return put_user(heartbeat, p);
316 293
317 default: 294 if (new_options & WDIOS_ENABLECARD) {
318 return -ENOTTY; 295 esb_timer_keepalive();
319 } 296 esb_timer_start();
297 retval = 0;
298 }
299 return retval;
300 }
301 case WDIOC_KEEPALIVE:
302 esb_timer_keepalive();
303 return 0;
304
305 case WDIOC_SETTIMEOUT:
306 {
307 if (get_user(new_heartbeat, p))
308 return -EFAULT;
309 if (esb_timer_set_heartbeat(new_heartbeat))
310 return -EINVAL;
311 esb_timer_keepalive();
312 /* Fall */
313 }
314 case WDIOC_GETTIMEOUT:
315 return put_user(heartbeat, p);
316 default:
317 return -ENOTTY;
318 }
320} 319}
321 320
322/* 321/*
323 * Notify system 322 * Notify system
324 */ 323 */
325 324
326static int esb_notify_sys (struct notifier_block *this, unsigned long code, void *unused) 325static int esb_notify_sys(struct notifier_block *this,
326 unsigned long code, void *unused)
327{ 327{
328 if (code==SYS_DOWN || code==SYS_HALT) { 328 if (code == SYS_DOWN || code == SYS_HALT)
329 /* Turn the WDT off */ 329 esb_timer_stop(); /* Turn the WDT off */
330 esb_timer_stop ();
331 }
332 330
333 return NOTIFY_DONE; 331 return NOTIFY_DONE;
334} 332}
335 333
336/* 334/*
@@ -338,22 +336,22 @@ static int esb_notify_sys (struct notifier_block *this, unsigned long code, void
338 */ 336 */
339 337
340static const struct file_operations esb_fops = { 338static const struct file_operations esb_fops = {
341 .owner = THIS_MODULE, 339 .owner = THIS_MODULE,
342 .llseek = no_llseek, 340 .llseek = no_llseek,
343 .write = esb_write, 341 .write = esb_write,
344 .ioctl = esb_ioctl, 342 .unlocked_ioctl = esb_ioctl,
345 .open = esb_open, 343 .open = esb_open,
346 .release = esb_release, 344 .release = esb_release,
347}; 345};
348 346
349static struct miscdevice esb_miscdev = { 347static struct miscdevice esb_miscdev = {
350 .minor = WATCHDOG_MINOR, 348 .minor = WATCHDOG_MINOR,
351 .name = "watchdog", 349 .name = "watchdog",
352 .fops = &esb_fops, 350 .fops = &esb_fops,
353}; 351};
354 352
355static struct notifier_block esb_notifier = { 353static struct notifier_block esb_notifier = {
356 .notifier_call = esb_notify_sys, 354 .notifier_call = esb_notify_sys,
357}; 355};
358 356
359/* 357/*
@@ -365,50 +363,44 @@ static struct notifier_block esb_notifier = {
365 * want to register another driver on the same PCI id. 363 * want to register another driver on the same PCI id.
366 */ 364 */
367static struct pci_device_id esb_pci_tbl[] = { 365static struct pci_device_id esb_pci_tbl[] = {
368 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_9), }, 366 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_9), },
369 { 0, }, /* End of list */ 367 { 0, }, /* End of list */
370}; 368};
371MODULE_DEVICE_TABLE (pci, esb_pci_tbl); 369MODULE_DEVICE_TABLE(pci, esb_pci_tbl);
372 370
373/* 371/*
374 * Init & exit routines 372 * Init & exit routines
375 */ 373 */
376 374
377static unsigned char __init esb_getdevice (void) 375static unsigned char __init esb_getdevice(void)
378{ 376{
379 u8 val1; 377 u8 val1;
380 unsigned short val2; 378 unsigned short val2;
379 /*
380 * Find the PCI device
381 */
381 382
382 struct pci_dev *dev = NULL; 383 esb_pci = pci_get_device(PCI_VENDOR_ID_INTEL,
383 /* 384 PCI_DEVICE_ID_INTEL_ESB_9, NULL);
384 * Find the PCI device
385 */
386
387 for_each_pci_dev(dev) {
388 if (pci_match_id(esb_pci_tbl, dev)) {
389 esb_pci = dev;
390 break;
391 }
392 }
393 385
394 if (esb_pci) { 386 if (esb_pci) {
395 if (pci_enable_device(esb_pci)) { 387 if (pci_enable_device(esb_pci)) {
396 printk (KERN_ERR PFX "failed to enable device\n"); 388 printk(KERN_ERR PFX "failed to enable device\n");
397 goto err_devput; 389 goto err_devput;
398 } 390 }
399 391
400 if (pci_request_region(esb_pci, 0, ESB_MODULE_NAME)) { 392 if (pci_request_region(esb_pci, 0, ESB_MODULE_NAME)) {
401 printk (KERN_ERR PFX "failed to request region\n"); 393 printk(KERN_ERR PFX "failed to request region\n");
402 goto err_disable; 394 goto err_disable;
403 } 395 }
404 396
405 BASEADDR = ioremap(pci_resource_start(esb_pci, 0), 397 BASEADDR = ioremap(pci_resource_start(esb_pci, 0),
406 pci_resource_len(esb_pci, 0)); 398 pci_resource_len(esb_pci, 0));
407 if (BASEADDR == NULL) { 399 if (BASEADDR == NULL) {
408 /* Something's wrong here, BASEADDR has to be set */ 400 /* Something's wrong here, BASEADDR has to be set */
409 printk (KERN_ERR PFX "failed to get BASEADDR\n"); 401 printk(KERN_ERR PFX "failed to get BASEADDR\n");
410 goto err_release; 402 goto err_release;
411 } 403 }
412 404
413 /* 405 /*
414 * The watchdog has two timers, it can be setup so that the 406 * The watchdog has two timers, it can be setup so that the
@@ -425,7 +417,7 @@ static unsigned char __init esb_getdevice (void)
425 /* Check that the WDT isn't already locked */ 417 /* Check that the WDT isn't already locked */
426 pci_read_config_byte(esb_pci, ESB_LOCK_REG, &val1); 418 pci_read_config_byte(esb_pci, ESB_LOCK_REG, &val1);
427 if (val1 & ESB_WDT_LOCK) 419 if (val1 & ESB_WDT_LOCK)
428 printk (KERN_WARNING PFX "nowayout already set\n"); 420 printk(KERN_WARNING PFX "nowayout already set\n");
429 421
430 /* Set the timer to watchdog mode and disable it for now */ 422 /* Set the timer to watchdog mode and disable it for now */
431 pci_write_config_byte(esb_pci, ESB_LOCK_REG, 0x00); 423 pci_write_config_byte(esb_pci, ESB_LOCK_REG, 0x00);
@@ -452,44 +444,44 @@ err_devput:
452 return 0; 444 return 0;
453} 445}
454 446
455static int __init watchdog_init (void) 447static int __init watchdog_init(void)
456{ 448{
457 int ret; 449 int ret;
458 450
459 /* Check whether or not the hardware watchdog is there */ 451 /* Check whether or not the hardware watchdog is there */
460 if (!esb_getdevice () || esb_pci == NULL) 452 if (!esb_getdevice() || esb_pci == NULL)
461 return -ENODEV; 453 return -ENODEV;
462 454
463 /* Check that the heartbeat value is within it's range ; if not reset to the default */ 455 /* Check that the heartbeat value is within it's range;
464 if (esb_timer_set_heartbeat (heartbeat)) { 456 if not reset to the default */
465 esb_timer_set_heartbeat (WATCHDOG_HEARTBEAT); 457 if (esb_timer_set_heartbeat(heartbeat)) {
466 printk(KERN_INFO PFX "heartbeat value must be 1<heartbeat<2046, using %d\n", 458 esb_timer_set_heartbeat(WATCHDOG_HEARTBEAT);
467 heartbeat); 459 printk(KERN_INFO PFX
468 } 460 "heartbeat value must be 1<heartbeat<2046, using %d\n",
469 461 heartbeat);
470 ret = register_reboot_notifier(&esb_notifier); 462 }
471 if (ret != 0) { 463 ret = register_reboot_notifier(&esb_notifier);
472 printk(KERN_ERR PFX "cannot register reboot notifier (err=%d)\n", 464 if (ret != 0) {
473 ret); 465 printk(KERN_ERR PFX
474 goto err_unmap; 466 "cannot register reboot notifier (err=%d)\n", ret);
475 } 467 goto err_unmap;
476 468 }
477 ret = misc_register(&esb_miscdev);
478 if (ret != 0) {
479 printk(KERN_ERR PFX "cannot register miscdev on minor=%d (err=%d)\n",
480 WATCHDOG_MINOR, ret);
481 goto err_notifier;
482 }
483
484 esb_timer_stop ();
485
486 printk (KERN_INFO PFX "initialized (0x%p). heartbeat=%d sec (nowayout=%d)\n",
487 BASEADDR, heartbeat, nowayout);
488 469
489 return 0; 470 ret = misc_register(&esb_miscdev);
471 if (ret != 0) {
472 printk(KERN_ERR PFX
473 "cannot register miscdev on minor=%d (err=%d)\n",
474 WATCHDOG_MINOR, ret);
475 goto err_notifier;
476 }
477 esb_timer_stop();
478 printk(KERN_INFO PFX
479 "initialized (0x%p). heartbeat=%d sec (nowayout=%d)\n",
480 BASEADDR, heartbeat, nowayout);
481 return 0;
490 482
491err_notifier: 483err_notifier:
492 unregister_reboot_notifier(&esb_notifier); 484 unregister_reboot_notifier(&esb_notifier);
493err_unmap: 485err_unmap:
494 iounmap(BASEADDR); 486 iounmap(BASEADDR);
495/* err_release: */ 487/* err_release: */
@@ -498,18 +490,18 @@ err_unmap:
498 pci_disable_device(esb_pci); 490 pci_disable_device(esb_pci);
499/* err_devput: */ 491/* err_devput: */
500 pci_dev_put(esb_pci); 492 pci_dev_put(esb_pci);
501 return ret; 493 return ret;
502} 494}
503 495
504static void __exit watchdog_cleanup (void) 496static void __exit watchdog_cleanup(void)
505{ 497{
506 /* Stop the timer before we leave */ 498 /* Stop the timer before we leave */
507 if (!nowayout) 499 if (!nowayout)
508 esb_timer_stop (); 500 esb_timer_stop();
509 501
510 /* Deregister */ 502 /* Deregister */
511 misc_deregister(&esb_miscdev); 503 misc_deregister(&esb_miscdev);
512 unregister_reboot_notifier(&esb_notifier); 504 unregister_reboot_notifier(&esb_notifier);
513 iounmap(BASEADDR); 505 iounmap(BASEADDR);
514 pci_release_region(esb_pci, 0); 506 pci_release_region(esb_pci, 0);
515 pci_disable_device(esb_pci); 507 pci_disable_device(esb_pci);
diff --git a/drivers/watchdog/iTCO_vendor.h b/drivers/watchdog/iTCO_vendor.h
new file mode 100644
index 000000000000..9e27e6422f66
--- /dev/null
+++ b/drivers/watchdog/iTCO_vendor.h
@@ -0,0 +1,15 @@
1/* iTCO Vendor Specific Support hooks */
2#ifdef CONFIG_ITCO_VENDOR_SUPPORT
3extern void iTCO_vendor_pre_start(unsigned long, unsigned int);
4extern void iTCO_vendor_pre_stop(unsigned long);
5extern void iTCO_vendor_pre_keepalive(unsigned long, unsigned int);
6extern void iTCO_vendor_pre_set_heartbeat(unsigned int);
7extern int iTCO_vendor_check_noreboot_on(void);
8#else
9#define iTCO_vendor_pre_start(acpibase, heartbeat) {}
10#define iTCO_vendor_pre_stop(acpibase) {}
11#define iTCO_vendor_pre_keepalive(acpibase, heartbeat) {}
12#define iTCO_vendor_pre_set_heartbeat(heartbeat) {}
13#define iTCO_vendor_check_noreboot_on() 1
14 /* 1=check noreboot; 0=don't check */
15#endif
diff --git a/drivers/watchdog/iTCO_vendor_support.c b/drivers/watchdog/iTCO_vendor_support.c
index cafc465f2ae3..ca344a85eb95 100644
--- a/drivers/watchdog/iTCO_vendor_support.c
+++ b/drivers/watchdog/iTCO_vendor_support.c
@@ -18,9 +18,9 @@
18 */ 18 */
19 19
20/* Module and version information */ 20/* Module and version information */
21#define DRV_NAME "iTCO_vendor_support" 21#define DRV_NAME "iTCO_vendor_support"
22#define DRV_VERSION "1.01" 22#define DRV_VERSION "1.01"
23#define DRV_RELDATE "11-Nov-2006" 23#define DRV_RELDATE "11-Nov-2006"
24#define PFX DRV_NAME ": " 24#define PFX DRV_NAME ": "
25 25
26/* Includes */ 26/* Includes */
@@ -31,19 +31,22 @@
31#include <linux/kernel.h> /* For printk/panic/... */ 31#include <linux/kernel.h> /* For printk/panic/... */
32#include <linux/init.h> /* For __init/__exit/... */ 32#include <linux/init.h> /* For __init/__exit/... */
33#include <linux/ioport.h> /* For io-port access */ 33#include <linux/ioport.h> /* For io-port access */
34#include <linux/io.h> /* For inb/outb/... */
34 35
35#include <asm/io.h> /* For inb/outb/... */ 36#include "iTCO_vendor.h"
36 37
37/* iTCO defines */ 38/* iTCO defines */
38#define SMI_EN acpibase + 0x30 /* SMI Control and Enable Register */ 39#define SMI_EN acpibase + 0x30 /* SMI Control and Enable Register */
39#define TCOBASE acpibase + 0x60 /* TCO base address */ 40#define TCOBASE acpibase + 0x60 /* TCO base address */
40#define TCO1_STS TCOBASE + 0x04 /* TCO1 Status Register */ 41#define TCO1_STS TCOBASE + 0x04 /* TCO1 Status Register */
41 42
42/* List of vendor support modes */ 43/* List of vendor support modes */
43#define SUPERMICRO_OLD_BOARD 1 /* SuperMicro Pentium 3 Era 370SSE+-OEM1/P3TSSE */ 44/* SuperMicro Pentium 3 Era 370SSE+-OEM1/P3TSSE */
44#define SUPERMICRO_NEW_BOARD 2 /* SuperMicro Pentium 4 / Xeon 4 / EMT64T Era Systems */ 45#define SUPERMICRO_OLD_BOARD 1
46/* SuperMicro Pentium 4 / Xeon 4 / EMT64T Era Systems */
47#define SUPERMICRO_NEW_BOARD 2
45 48
46static int vendorsupport = 0; 49static int vendorsupport;
47module_param(vendorsupport, int, 0); 50module_param(vendorsupport, int, 0);
48MODULE_PARM_DESC(vendorsupport, "iTCO vendor specific support mode, default=0 (none), 1=SuperMicro Pent3, 2=SuperMicro Pent4+"); 51MODULE_PARM_DESC(vendorsupport, "iTCO vendor specific support mode, default=0 (none), 1=SuperMicro Pent3, 2=SuperMicro Pent4+");
49 52
@@ -143,34 +146,35 @@ static void supermicro_old_pre_keepalive(unsigned long acpibase)
143 */ 146 */
144 147
145/* I/O Port's */ 148/* I/O Port's */
146#define SM_REGINDEX 0x2e /* SuperMicro ICH4+ Register Index */ 149#define SM_REGINDEX 0x2e /* SuperMicro ICH4+ Register Index */
147#define SM_DATAIO 0x2f /* SuperMicro ICH4+ Register Data I/O */ 150#define SM_DATAIO 0x2f /* SuperMicro ICH4+ Register Data I/O */
148 151
149/* Control Register's */ 152/* Control Register's */
150#define SM_CTLPAGESW 0x07 /* SuperMicro ICH4+ Control Page Switch */ 153#define SM_CTLPAGESW 0x07 /* SuperMicro ICH4+ Control Page Switch */
151#define SM_CTLPAGE 0x08 /* SuperMicro ICH4+ Control Page Num */ 154#define SM_CTLPAGE 0x08 /* SuperMicro ICH4+ Control Page Num */
152 155
153#define SM_WATCHENABLE 0x30 /* Watchdog enable: Bit 0: 0=off, 1=on */ 156#define SM_WATCHENABLE 0x30 /* Watchdog enable: Bit 0: 0=off, 1=on */
154 157
155#define SM_WATCHPAGE 0x87 /* Watchdog unlock control page */ 158#define SM_WATCHPAGE 0x87 /* Watchdog unlock control page */
156 159
157#define SM_ENDWATCH 0xAA /* Watchdog lock control page */ 160#define SM_ENDWATCH 0xAA /* Watchdog lock control page */
158 161
159#define SM_COUNTMODE 0xf5 /* Watchdog count mode select */ 162#define SM_COUNTMODE 0xf5 /* Watchdog count mode select */
160 /* (Bit 3: 0 = seconds, 1 = minutes */ 163 /* (Bit 3: 0 = seconds, 1 = minutes */
161 164
162#define SM_WATCHTIMER 0xf6 /* 8-bits, Watchdog timer counter (RW) */ 165#define SM_WATCHTIMER 0xf6 /* 8-bits, Watchdog timer counter (RW) */
163 166
164#define SM_RESETCONTROL 0xf7 /* Watchdog reset control */ 167#define SM_RESETCONTROL 0xf7 /* Watchdog reset control */
165 /* Bit 6: timer is reset by kbd interrupt */ 168 /* Bit 6: timer is reset by kbd interrupt */
166 /* Bit 7: timer is reset by mouse interrupt */ 169 /* Bit 7: timer is reset by mouse interrupt */
167 170
168static void supermicro_new_unlock_watchdog(void) 171static void supermicro_new_unlock_watchdog(void)
169{ 172{
170 outb(SM_WATCHPAGE, SM_REGINDEX); /* Write 0x87 to port 0x2e twice */ 173 /* Write 0x87 to port 0x2e twice */
171 outb(SM_WATCHPAGE, SM_REGINDEX); 174 outb(SM_WATCHPAGE, SM_REGINDEX);
172 175 outb(SM_WATCHPAGE, SM_REGINDEX);
173 outb(SM_CTLPAGESW, SM_REGINDEX); /* Switch to watchdog control page */ 176 /* Switch to watchdog control page */
177 outb(SM_CTLPAGESW, SM_REGINDEX);
174 outb(SM_CTLPAGE, SM_DATAIO); 178 outb(SM_CTLPAGE, SM_DATAIO);
175} 179}
176 180
@@ -192,7 +196,7 @@ static void supermicro_new_pre_start(unsigned int heartbeat)
192 outb(val, SM_DATAIO); 196 outb(val, SM_DATAIO);
193 197
194 /* Write heartbeat interval to WDOG */ 198 /* Write heartbeat interval to WDOG */
195 outb (SM_WATCHTIMER, SM_REGINDEX); 199 outb(SM_WATCHTIMER, SM_REGINDEX);
196 outb((heartbeat & 255), SM_DATAIO); 200 outb((heartbeat & 255), SM_DATAIO);
197 201
198 /* Make sure keyboard/mouse interrupts don't interfere */ 202 /* Make sure keyboard/mouse interrupts don't interfere */
@@ -277,7 +281,7 @@ EXPORT_SYMBOL(iTCO_vendor_pre_set_heartbeat);
277 281
278int iTCO_vendor_check_noreboot_on(void) 282int iTCO_vendor_check_noreboot_on(void)
279{ 283{
280 switch(vendorsupport) { 284 switch (vendorsupport) {
281 case SUPERMICRO_OLD_BOARD: 285 case SUPERMICRO_OLD_BOARD:
282 return 0; 286 return 0;
283 default: 287 default:
@@ -288,13 +292,13 @@ EXPORT_SYMBOL(iTCO_vendor_check_noreboot_on);
288 292
289static int __init iTCO_vendor_init_module(void) 293static int __init iTCO_vendor_init_module(void)
290{ 294{
291 printk (KERN_INFO PFX "vendor-support=%d\n", vendorsupport); 295 printk(KERN_INFO PFX "vendor-support=%d\n", vendorsupport);
292 return 0; 296 return 0;
293} 297}
294 298
295static void __exit iTCO_vendor_exit_module(void) 299static void __exit iTCO_vendor_exit_module(void)
296{ 300{
297 printk (KERN_INFO PFX "Module Unloaded\n"); 301 printk(KERN_INFO PFX "Module Unloaded\n");
298} 302}
299 303
300module_init(iTCO_vendor_init_module); 304module_init(iTCO_vendor_init_module);
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 95ba985bd341..bfb93bc2ca9f 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -55,9 +55,9 @@
55 */ 55 */
56 56
57/* Module and version information */ 57/* Module and version information */
58#define DRV_NAME "iTCO_wdt" 58#define DRV_NAME "iTCO_wdt"
59#define DRV_VERSION "1.03" 59#define DRV_VERSION "1.03"
60#define DRV_RELDATE "30-Apr-2008" 60#define DRV_RELDATE "30-Apr-2008"
61#define PFX DRV_NAME ": " 61#define PFX DRV_NAME ": "
62 62
63/* Includes */ 63/* Includes */
@@ -66,7 +66,8 @@
66#include <linux/types.h> /* For standard types (like size_t) */ 66#include <linux/types.h> /* For standard types (like size_t) */
67#include <linux/errno.h> /* For the -ENODEV/... values */ 67#include <linux/errno.h> /* For the -ENODEV/... values */
68#include <linux/kernel.h> /* For printk/panic/... */ 68#include <linux/kernel.h> /* For printk/panic/... */
69#include <linux/miscdevice.h> /* For MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR) */ 69#include <linux/miscdevice.h> /* For MODULE_ALIAS_MISCDEV
70 (WATCHDOG_MINOR) */
70#include <linux/watchdog.h> /* For the watchdog specific items */ 71#include <linux/watchdog.h> /* For the watchdog specific items */
71#include <linux/init.h> /* For __init/__exit/... */ 72#include <linux/init.h> /* For __init/__exit/... */
72#include <linux/fs.h> /* For file operations */ 73#include <linux/fs.h> /* For file operations */
@@ -74,9 +75,10 @@
74#include <linux/pci.h> /* For pci functions */ 75#include <linux/pci.h> /* For pci functions */
75#include <linux/ioport.h> /* For io-port access */ 76#include <linux/ioport.h> /* For io-port access */
76#include <linux/spinlock.h> /* For spin_lock/spin_unlock/... */ 77#include <linux/spinlock.h> /* For spin_lock/spin_unlock/... */
78#include <linux/uaccess.h> /* For copy_to_user/put_user/... */
79#include <linux/io.h> /* For inb/outb/... */
77 80
78#include <asm/uaccess.h> /* For copy_to_user/put_user/... */ 81#include "iTCO_vendor.h"
79#include <asm/io.h> /* For inb/outb/... */
80 82
81/* TCO related info */ 83/* TCO related info */
82enum iTCO_chipsets { 84enum iTCO_chipsets {
@@ -105,7 +107,7 @@ enum iTCO_chipsets {
105 TCO_ICH9, /* ICH9 */ 107 TCO_ICH9, /* ICH9 */
106 TCO_ICH9R, /* ICH9R */ 108 TCO_ICH9R, /* ICH9R */
107 TCO_ICH9DH, /* ICH9DH */ 109 TCO_ICH9DH, /* ICH9DH */
108 TCO_ICH9DO, /* ICH9DO */ 110 TCO_ICH9DO, /* ICH9DO */
109 TCO_631XESB, /* 631xESB/632xESB */ 111 TCO_631XESB, /* 631xESB/632xESB */
110}; 112};
111 113
@@ -140,7 +142,7 @@ static struct {
140 {"ICH9DH", 2}, 142 {"ICH9DH", 2},
141 {"ICH9DO", 2}, 143 {"ICH9DO", 2},
142 {"631xESB/632xESB", 2}, 144 {"631xESB/632xESB", 2},
143 {NULL,0} 145 {NULL, 0}
144}; 146};
145 147
146#define ITCO_PCI_DEVICE(dev, data) \ 148#define ITCO_PCI_DEVICE(dev, data) \
@@ -159,32 +161,32 @@ static struct {
159 * functions that probably will be registered by other drivers. 161 * functions that probably will be registered by other drivers.
160 */ 162 */
161static struct pci_device_id iTCO_wdt_pci_tbl[] = { 163static struct pci_device_id iTCO_wdt_pci_tbl[] = {
162 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801AA_0, TCO_ICH )}, 164 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801AA_0, TCO_ICH)},
163 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801AB_0, TCO_ICH0 )}, 165 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801AB_0, TCO_ICH0)},
164 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801BA_0, TCO_ICH2 )}, 166 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801BA_0, TCO_ICH2)},
165 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801BA_10, TCO_ICH2M )}, 167 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801BA_10, TCO_ICH2M)},
166 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801CA_0, TCO_ICH3 )}, 168 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801CA_0, TCO_ICH3)},
167 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801CA_12, TCO_ICH3M )}, 169 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801CA_12, TCO_ICH3M)},
168 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801DB_0, TCO_ICH4 )}, 170 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801DB_0, TCO_ICH4)},
169 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801DB_12, TCO_ICH4M )}, 171 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801DB_12, TCO_ICH4M)},
170 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801E_0, TCO_CICH )}, 172 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801E_0, TCO_CICH)},
171 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801EB_0, TCO_ICH5 )}, 173 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801EB_0, TCO_ICH5)},
172 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ESB_1, TCO_6300ESB)}, 174 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ESB_1, TCO_6300ESB)},
173 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH6_0, TCO_ICH6 )}, 175 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH6_0, TCO_ICH6)},
174 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH6_1, TCO_ICH6M )}, 176 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH6_1, TCO_ICH6M)},
175 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH6_2, TCO_ICH6W )}, 177 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH6_2, TCO_ICH6W)},
176 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH7_0, TCO_ICH7 )}, 178 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH7_0, TCO_ICH7)},
177 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH7_1, TCO_ICH7M )}, 179 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH7_1, TCO_ICH7M)},
178 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH7_31, TCO_ICH7MDH)}, 180 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH7_31, TCO_ICH7MDH)},
179 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_0, TCO_ICH8 )}, 181 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_0, TCO_ICH8)},
180 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_1, TCO_ICH8ME )}, 182 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_1, TCO_ICH8ME)},
181 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_2, TCO_ICH8DH )}, 183 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_2, TCO_ICH8DH)},
182 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_3, TCO_ICH8DO )}, 184 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_3, TCO_ICH8DO)},
183 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_4, TCO_ICH8M )}, 185 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_4, TCO_ICH8M)},
184 { ITCO_PCI_DEVICE(0x2918, TCO_ICH9 )}, 186 { ITCO_PCI_DEVICE(0x2918, TCO_ICH9)},
185 { ITCO_PCI_DEVICE(0x2916, TCO_ICH9R )}, 187 { ITCO_PCI_DEVICE(0x2916, TCO_ICH9R)},
186 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH9_2, TCO_ICH9DH )}, 188 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH9_2, TCO_ICH9DH)},
187 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH9_4, TCO_ICH9DO )}, 189 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH9_4, TCO_ICH9DO)},
188 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ESB2_0, TCO_631XESB)}, 190 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ESB2_0, TCO_631XESB)},
189 { ITCO_PCI_DEVICE(0x2671, TCO_631XESB)}, 191 { ITCO_PCI_DEVICE(0x2671, TCO_631XESB)},
190 { ITCO_PCI_DEVICE(0x2672, TCO_631XESB)}, 192 { ITCO_PCI_DEVICE(0x2672, TCO_631XESB)},
@@ -203,13 +205,15 @@ static struct pci_device_id iTCO_wdt_pci_tbl[] = {
203 { ITCO_PCI_DEVICE(0x267f, TCO_631XESB)}, 205 { ITCO_PCI_DEVICE(0x267f, TCO_631XESB)},
204 { 0, }, /* End of list */ 206 { 0, }, /* End of list */
205}; 207};
206MODULE_DEVICE_TABLE (pci, iTCO_wdt_pci_tbl); 208MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl);
207 209
208/* Address definitions for the TCO */ 210/* Address definitions for the TCO */
209#define TCOBASE iTCO_wdt_private.ACPIBASE + 0x60 /* TCO base address */ 211/* TCO base address */
210#define SMI_EN iTCO_wdt_private.ACPIBASE + 0x30 /* SMI Control and Enable Register */ 212#define TCOBASE iTCO_wdt_private.ACPIBASE + 0x60
213/* SMI Control and Enable Register */
214#define SMI_EN iTCO_wdt_private.ACPIBASE + 0x30
211 215
212#define TCO_RLD TCOBASE + 0x00 /* TCO Timer Reload and Current Value */ 216#define TCO_RLD TCOBASE + 0x00 /* TCO Timer Reload and Curr. Value */
213#define TCOv1_TMR TCOBASE + 0x01 /* TCOv1 Timer Initial Value */ 217#define TCOv1_TMR TCOBASE + 0x01 /* TCOv1 Timer Initial Value */
214#define TCO_DAT_IN TCOBASE + 0x02 /* TCO Data In Register */ 218#define TCO_DAT_IN TCOBASE + 0x02 /* TCO Data In Register */
215#define TCO_DAT_OUT TCOBASE + 0x03 /* TCO Data Out Register */ 219#define TCO_DAT_OUT TCOBASE + 0x03 /* TCO Data Out Register */
@@ -222,15 +226,21 @@ MODULE_DEVICE_TABLE (pci, iTCO_wdt_pci_tbl);
222/* internal variables */ 226/* internal variables */
223static unsigned long is_active; 227static unsigned long is_active;
224static char expect_release; 228static char expect_release;
225static struct { /* this is private data for the iTCO_wdt device */ 229static struct { /* this is private data for the iTCO_wdt device */
226 unsigned int iTCO_version; /* TCO version/generation */ 230 /* TCO version/generation */
227 unsigned long ACPIBASE; /* The cards ACPIBASE address (TCOBASE = ACPIBASE+0x60) */ 231 unsigned int iTCO_version;
228 unsigned long __iomem *gcs; /* NO_REBOOT flag is Memory-Mapped GCS register bit 5 (TCO version 2) */ 232 /* The cards ACPIBASE address (TCOBASE = ACPIBASE+0x60) */
229 spinlock_t io_lock; /* the lock for io operations */ 233 unsigned long ACPIBASE;
230 struct pci_dev *pdev; /* the PCI-device */ 234 /* NO_REBOOT flag is Memory-Mapped GCS register bit 5 (TCO version 2)*/
235 unsigned long __iomem *gcs;
236 /* the lock for io operations */
237 spinlock_t io_lock;
238 /* the PCI-device */
239 struct pci_dev *pdev;
231} iTCO_wdt_private; 240} iTCO_wdt_private;
232 241
233static struct platform_device *iTCO_wdt_platform_device; /* the watchdog platform device */ 242/* the watchdog platform device */
243static struct platform_device *iTCO_wdt_platform_device;
234 244
235/* module parameters */ 245/* module parameters */
236#define WATCHDOG_HEARTBEAT 30 /* 30 sec default heartbeat */ 246#define WATCHDOG_HEARTBEAT 30 /* 30 sec default heartbeat */
@@ -240,22 +250,9 @@ MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. (2<heartbeat<39 (TCO
240 250
241static int nowayout = WATCHDOG_NOWAYOUT; 251static int nowayout = WATCHDOG_NOWAYOUT;
242module_param(nowayout, int, 0); 252module_param(nowayout, int, 0);
243MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 253MODULE_PARM_DESC(nowayout,
244 254 "Watchdog cannot be stopped once started (default="
245/* iTCO Vendor Specific Support hooks */ 255 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
246#ifdef CONFIG_ITCO_VENDOR_SUPPORT
247extern void iTCO_vendor_pre_start(unsigned long, unsigned int);
248extern void iTCO_vendor_pre_stop(unsigned long);
249extern void iTCO_vendor_pre_keepalive(unsigned long, unsigned int);
250extern void iTCO_vendor_pre_set_heartbeat(unsigned int);
251extern int iTCO_vendor_check_noreboot_on(void);
252#else
253#define iTCO_vendor_pre_start(acpibase, heartbeat) {}
254#define iTCO_vendor_pre_stop(acpibase) {}
255#define iTCO_vendor_pre_keepalive(acpibase,heartbeat) {}
256#define iTCO_vendor_pre_set_heartbeat(heartbeat) {}
257#define iTCO_vendor_check_noreboot_on() 1 /* 1=check noreboot; 0=don't check */
258#endif
259 256
260/* 257/*
261 * Some TCO specific functions 258 * Some TCO specific functions
@@ -369,11 +366,10 @@ static int iTCO_wdt_keepalive(void)
369 iTCO_vendor_pre_keepalive(iTCO_wdt_private.ACPIBASE, heartbeat); 366 iTCO_vendor_pre_keepalive(iTCO_wdt_private.ACPIBASE, heartbeat);
370 367
371 /* Reload the timer by writing to the TCO Timer Counter register */ 368 /* Reload the timer by writing to the TCO Timer Counter register */
372 if (iTCO_wdt_private.iTCO_version == 2) { 369 if (iTCO_wdt_private.iTCO_version == 2)
373 outw(0x01, TCO_RLD); 370 outw(0x01, TCO_RLD);
374 } else if (iTCO_wdt_private.iTCO_version == 1) { 371 else if (iTCO_wdt_private.iTCO_version == 1)
375 outb(0x01, TCO_RLD); 372 outb(0x01, TCO_RLD);
376 }
377 373
378 spin_unlock(&iTCO_wdt_private.io_lock); 374 spin_unlock(&iTCO_wdt_private.io_lock);
379 return 0; 375 return 0;
@@ -425,7 +421,7 @@ static int iTCO_wdt_set_heartbeat(int t)
425 return 0; 421 return 0;
426} 422}
427 423
428static int iTCO_wdt_get_timeleft (int *time_left) 424static int iTCO_wdt_get_timeleft(int *time_left)
429{ 425{
430 unsigned int val16; 426 unsigned int val16;
431 unsigned char val8; 427 unsigned char val8;
@@ -454,7 +450,7 @@ static int iTCO_wdt_get_timeleft (int *time_left)
454 * /dev/watchdog handling 450 * /dev/watchdog handling
455 */ 451 */
456 452
457static int iTCO_wdt_open (struct inode *inode, struct file *file) 453static int iTCO_wdt_open(struct inode *inode, struct file *file)
458{ 454{
459 /* /dev/watchdog can only be opened once */ 455 /* /dev/watchdog can only be opened once */
460 if (test_and_set_bit(0, &is_active)) 456 if (test_and_set_bit(0, &is_active))
@@ -468,7 +464,7 @@ static int iTCO_wdt_open (struct inode *inode, struct file *file)
468 return nonseekable_open(inode, file); 464 return nonseekable_open(inode, file);
469} 465}
470 466
471static int iTCO_wdt_release (struct inode *inode, struct file *file) 467static int iTCO_wdt_release(struct inode *inode, struct file *file)
472{ 468{
473 /* 469 /*
474 * Shut off the timer. 470 * Shut off the timer.
@@ -476,7 +472,8 @@ static int iTCO_wdt_release (struct inode *inode, struct file *file)
476 if (expect_release == 42) { 472 if (expect_release == 42) {
477 iTCO_wdt_stop(); 473 iTCO_wdt_stop();
478 } else { 474 } else {
479 printk(KERN_CRIT PFX "Unexpected close, not stopping watchdog!\n"); 475 printk(KERN_CRIT PFX
476 "Unexpected close, not stopping watchdog!\n");
480 iTCO_wdt_keepalive(); 477 iTCO_wdt_keepalive();
481 } 478 }
482 clear_bit(0, &is_active); 479 clear_bit(0, &is_active);
@@ -484,22 +481,23 @@ static int iTCO_wdt_release (struct inode *inode, struct file *file)
484 return 0; 481 return 0;
485} 482}
486 483
487static ssize_t iTCO_wdt_write (struct file *file, const char __user *data, 484static ssize_t iTCO_wdt_write(struct file *file, const char __user *data,
488 size_t len, loff_t * ppos) 485 size_t len, loff_t *ppos)
489{ 486{
490 /* See if we got the magic character 'V' and reload the timer */ 487 /* See if we got the magic character 'V' and reload the timer */
491 if (len) { 488 if (len) {
492 if (!nowayout) { 489 if (!nowayout) {
493 size_t i; 490 size_t i;
494 491
495 /* note: just in case someone wrote the magic character 492 /* note: just in case someone wrote the magic
496 * five months ago... */ 493 character five months ago... */
497 expect_release = 0; 494 expect_release = 0;
498 495
499 /* scan to see whether or not we got the magic character */ 496 /* scan to see whether or not we got the
497 magic character */
500 for (i = 0; i != len; i++) { 498 for (i = 0; i != len; i++) {
501 char c; 499 char c;
502 if (get_user(c, data+i)) 500 if (get_user(c, data + i))
503 return -EFAULT; 501 return -EFAULT;
504 if (c == 'V') 502 if (c == 'V')
505 expect_release = 42; 503 expect_release = 42;
@@ -512,8 +510,8 @@ static ssize_t iTCO_wdt_write (struct file *file, const char __user *data,
512 return len; 510 return len;
513} 511}
514 512
515static int iTCO_wdt_ioctl (struct inode *inode, struct file *file, 513static long iTCO_wdt_ioctl(struct file *file, unsigned int cmd,
516 unsigned int cmd, unsigned long arg) 514 unsigned long arg)
517{ 515{
518 int new_options, retval = -EINVAL; 516 int new_options, retval = -EINVAL;
519 int new_heartbeat; 517 int new_heartbeat;
@@ -528,64 +526,52 @@ static int iTCO_wdt_ioctl (struct inode *inode, struct file *file,
528 }; 526 };
529 527
530 switch (cmd) { 528 switch (cmd) {
531 case WDIOC_GETSUPPORT: 529 case WDIOC_GETSUPPORT:
532 return copy_to_user(argp, &ident, 530 return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
533 sizeof (ident)) ? -EFAULT : 0; 531 case WDIOC_GETSTATUS:
534 532 case WDIOC_GETBOOTSTATUS:
535 case WDIOC_GETSTATUS: 533 return put_user(0, p);
536 case WDIOC_GETBOOTSTATUS: 534
537 return put_user(0, p); 535 case WDIOC_SETOPTIONS:
538 536 {
539 case WDIOC_KEEPALIVE: 537 if (get_user(new_options, p))
540 iTCO_wdt_keepalive(); 538 return -EFAULT;
541 return 0; 539
542 540 if (new_options & WDIOS_DISABLECARD) {
543 case WDIOC_SETOPTIONS: 541 iTCO_wdt_stop();
544 { 542 retval = 0;
545 if (get_user(new_options, p))
546 return -EFAULT;
547
548 if (new_options & WDIOS_DISABLECARD) {
549 iTCO_wdt_stop();
550 retval = 0;
551 }
552
553 if (new_options & WDIOS_ENABLECARD) {
554 iTCO_wdt_keepalive();
555 iTCO_wdt_start();
556 retval = 0;
557 }
558
559 return retval;
560 } 543 }
561 544 if (new_options & WDIOS_ENABLECARD) {
562 case WDIOC_SETTIMEOUT:
563 {
564 if (get_user(new_heartbeat, p))
565 return -EFAULT;
566
567 if (iTCO_wdt_set_heartbeat(new_heartbeat))
568 return -EINVAL;
569
570 iTCO_wdt_keepalive(); 545 iTCO_wdt_keepalive();
571 /* Fall */ 546 iTCO_wdt_start();
572 } 547 retval = 0;
573
574 case WDIOC_GETTIMEOUT:
575 return put_user(heartbeat, p);
576
577 case WDIOC_GETTIMELEFT:
578 {
579 int time_left;
580
581 if (iTCO_wdt_get_timeleft(&time_left))
582 return -EINVAL;
583
584 return put_user(time_left, p);
585 } 548 }
549 return retval;
550 }
551 case WDIOC_KEEPALIVE:
552 iTCO_wdt_keepalive();
553 return 0;
586 554
587 default: 555 case WDIOC_SETTIMEOUT:
588 return -ENOTTY; 556 {
557 if (get_user(new_heartbeat, p))
558 return -EFAULT;
559 if (iTCO_wdt_set_heartbeat(new_heartbeat))
560 return -EINVAL;
561 iTCO_wdt_keepalive();
562 /* Fall */
563 }
564 case WDIOC_GETTIMEOUT:
565 return put_user(heartbeat, p);
566 case WDIOC_GETTIMELEFT:
567 {
568 int time_left;
569 if (iTCO_wdt_get_timeleft(&time_left))
570 return -EINVAL;
571 return put_user(time_left, p);
572 }
573 default:
574 return -ENOTTY;
589 } 575 }
590} 576}
591 577
@@ -594,12 +580,12 @@ static int iTCO_wdt_ioctl (struct inode *inode, struct file *file,
594 */ 580 */
595 581
596static const struct file_operations iTCO_wdt_fops = { 582static const struct file_operations iTCO_wdt_fops = {
597 .owner = THIS_MODULE, 583 .owner = THIS_MODULE,
598 .llseek = no_llseek, 584 .llseek = no_llseek,
599 .write = iTCO_wdt_write, 585 .write = iTCO_wdt_write,
600 .ioctl = iTCO_wdt_ioctl, 586 .unlocked_ioctl = iTCO_wdt_ioctl,
601 .open = iTCO_wdt_open, 587 .open = iTCO_wdt_open,
602 .release = iTCO_wdt_release, 588 .release = iTCO_wdt_release,
603}; 589};
604 590
605static struct miscdevice iTCO_wdt_miscdev = { 591static struct miscdevice iTCO_wdt_miscdev = {
@@ -612,7 +598,8 @@ static struct miscdevice iTCO_wdt_miscdev = {
612 * Init & exit routines 598 * Init & exit routines
613 */ 599 */
614 600
615static int __devinit iTCO_wdt_init(struct pci_dev *pdev, const struct pci_device_id *ent, struct platform_device *dev) 601static int __devinit iTCO_wdt_init(struct pci_dev *pdev,
602 const struct pci_device_id *ent, struct platform_device *dev)
616{ 603{
617 int ret; 604 int ret;
618 u32 base_address; 605 u32 base_address;
@@ -632,17 +619,19 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev, const struct pci_device
632 pci_dev_put(pdev); 619 pci_dev_put(pdev);
633 return -ENODEV; 620 return -ENODEV;
634 } 621 }
635 iTCO_wdt_private.iTCO_version = iTCO_chipset_info[ent->driver_data].iTCO_version; 622 iTCO_wdt_private.iTCO_version =
623 iTCO_chipset_info[ent->driver_data].iTCO_version;
636 iTCO_wdt_private.ACPIBASE = base_address; 624 iTCO_wdt_private.ACPIBASE = base_address;
637 iTCO_wdt_private.pdev = pdev; 625 iTCO_wdt_private.pdev = pdev;
638 626
639 /* Get the Memory-Mapped GCS register, we need it for the NO_REBOOT flag (TCO v2) */ 627 /* Get the Memory-Mapped GCS register, we need it for the
640 /* To get access to it you have to read RCBA from PCI Config space 0xf0 628 NO_REBOOT flag (TCO v2). To get access to it you have to
641 and use it as base. GCS = RCBA + ICH6_GCS(0x3410). */ 629 read RCBA from PCI Config space 0xf0 and use it as base.
630 GCS = RCBA + ICH6_GCS(0x3410). */
642 if (iTCO_wdt_private.iTCO_version == 2) { 631 if (iTCO_wdt_private.iTCO_version == 2) {
643 pci_read_config_dword(pdev, 0xf0, &base_address); 632 pci_read_config_dword(pdev, 0xf0, &base_address);
644 RCBA = base_address & 0xffffc000; 633 RCBA = base_address & 0xffffc000;
645 iTCO_wdt_private.gcs = ioremap((RCBA + 0x3410),4); 634 iTCO_wdt_private.gcs = ioremap((RCBA + 0x3410), 4);
646 } 635 }
647 636
648 /* Check chipset's NO_REBOOT bit */ 637 /* Check chipset's NO_REBOOT bit */
@@ -657,8 +646,8 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev, const struct pci_device
657 646
658 /* Set the TCO_EN bit in SMI_EN register */ 647 /* Set the TCO_EN bit in SMI_EN register */
659 if (!request_region(SMI_EN, 4, "iTCO_wdt")) { 648 if (!request_region(SMI_EN, 4, "iTCO_wdt")) {
660 printk(KERN_ERR PFX "I/O address 0x%04lx already in use\n", 649 printk(KERN_ERR PFX
661 SMI_EN ); 650 "I/O address 0x%04lx already in use\n", SMI_EN);
662 ret = -EIO; 651 ret = -EIO;
663 goto out; 652 goto out;
664 } 653 }
@@ -667,18 +656,20 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev, const struct pci_device
667 outl(val32, SMI_EN); 656 outl(val32, SMI_EN);
668 release_region(SMI_EN, 4); 657 release_region(SMI_EN, 4);
669 658
670 /* The TCO I/O registers reside in a 32-byte range pointed to by the TCOBASE value */ 659 /* The TCO I/O registers reside in a 32-byte range pointed to
671 if (!request_region (TCOBASE, 0x20, "iTCO_wdt")) { 660 by the TCOBASE value */
672 printk (KERN_ERR PFX "I/O address 0x%04lx already in use\n", 661 if (!request_region(TCOBASE, 0x20, "iTCO_wdt")) {
662 printk(KERN_ERR PFX "I/O address 0x%04lx already in use\n",
673 TCOBASE); 663 TCOBASE);
674 ret = -EIO; 664 ret = -EIO;
675 goto out; 665 goto out;
676 } 666 }
677 667
678 printk(KERN_INFO PFX "Found a %s TCO device (Version=%d, TCOBASE=0x%04lx)\n", 668 printk(KERN_INFO PFX
679 iTCO_chipset_info[ent->driver_data].name, 669 "Found a %s TCO device (Version=%d, TCOBASE=0x%04lx)\n",
680 iTCO_chipset_info[ent->driver_data].iTCO_version, 670 iTCO_chipset_info[ent->driver_data].name,
681 TCOBASE); 671 iTCO_chipset_info[ent->driver_data].iTCO_version,
672 TCOBASE);
682 673
683 /* Clear out the (probably old) status */ 674 /* Clear out the (probably old) status */
684 outb(0, TCO1_STS); 675 outb(0, TCO1_STS);
@@ -687,27 +678,29 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev, const struct pci_device
687 /* Make sure the watchdog is not running */ 678 /* Make sure the watchdog is not running */
688 iTCO_wdt_stop(); 679 iTCO_wdt_stop();
689 680
690 /* Check that the heartbeat value is within it's range ; if not reset to the default */ 681 /* Check that the heartbeat value is within it's range;
682 if not reset to the default */
691 if (iTCO_wdt_set_heartbeat(heartbeat)) { 683 if (iTCO_wdt_set_heartbeat(heartbeat)) {
692 iTCO_wdt_set_heartbeat(WATCHDOG_HEARTBEAT); 684 iTCO_wdt_set_heartbeat(WATCHDOG_HEARTBEAT);
693 printk(KERN_INFO PFX "heartbeat value must be 2<heartbeat<39 (TCO v1) or 613 (TCO v2), using %d\n", 685 printk(KERN_INFO PFX "heartbeat value must be 2 < heartbeat < 39 (TCO v1) or 613 (TCO v2), using %d\n",
694 heartbeat); 686 heartbeat);
695 } 687 }
696 688
697 ret = misc_register(&iTCO_wdt_miscdev); 689 ret = misc_register(&iTCO_wdt_miscdev);
698 if (ret != 0) { 690 if (ret != 0) {
699 printk(KERN_ERR PFX "cannot register miscdev on minor=%d (err=%d)\n", 691 printk(KERN_ERR PFX
700 WATCHDOG_MINOR, ret); 692 "cannot register miscdev on minor=%d (err=%d)\n",
693 WATCHDOG_MINOR, ret);
701 goto unreg_region; 694 goto unreg_region;
702 } 695 }
703 696
704 printk (KERN_INFO PFX "initialized. heartbeat=%d sec (nowayout=%d)\n", 697 printk(KERN_INFO PFX "initialized. heartbeat=%d sec (nowayout=%d)\n",
705 heartbeat, nowayout); 698 heartbeat, nowayout);
706 699
707 return 0; 700 return 0;
708 701
709unreg_region: 702unreg_region:
710 release_region (TCOBASE, 0x20); 703 release_region(TCOBASE, 0x20);
711out: 704out:
712 if (iTCO_wdt_private.iTCO_version == 2) 705 if (iTCO_wdt_private.iTCO_version == 2)
713 iounmap(iTCO_wdt_private.gcs); 706 iounmap(iTCO_wdt_private.gcs);
@@ -796,7 +789,8 @@ static int __init iTCO_wdt_init_module(void)
796 if (err) 789 if (err)
797 return err; 790 return err;
798 791
799 iTCO_wdt_platform_device = platform_device_register_simple(DRV_NAME, -1, NULL, 0); 792 iTCO_wdt_platform_device = platform_device_register_simple(DRV_NAME,
793 -1, NULL, 0);
800 if (IS_ERR(iTCO_wdt_platform_device)) { 794 if (IS_ERR(iTCO_wdt_platform_device)) {
801 err = PTR_ERR(iTCO_wdt_platform_device); 795 err = PTR_ERR(iTCO_wdt_platform_device);
802 goto unreg_platform_driver; 796 goto unreg_platform_driver;
diff --git a/drivers/watchdog/ib700wdt.c b/drivers/watchdog/ib700wdt.c
index 4b89f401691a..05a28106e8eb 100644
--- a/drivers/watchdog/ib700wdt.c
+++ b/drivers/watchdog/ib700wdt.c
@@ -41,9 +41,9 @@
41#include <linux/spinlock.h> 41#include <linux/spinlock.h>
42#include <linux/moduleparam.h> 42#include <linux/moduleparam.h>
43#include <linux/platform_device.h> 43#include <linux/platform_device.h>
44#include <linux/io.h>
45#include <linux/uaccess.h>
44 46
45#include <asm/io.h>
46#include <asm/uaccess.h>
47#include <asm/system.h> 47#include <asm/system.h>
48 48
49static struct platform_device *ibwdt_platform_device; 49static struct platform_device *ibwdt_platform_device;
@@ -120,15 +120,16 @@ static int wd_margin = WD_TIMO;
120 120
121static int nowayout = WATCHDOG_NOWAYOUT; 121static int nowayout = WATCHDOG_NOWAYOUT;
122module_param(nowayout, int, 0); 122module_param(nowayout, int, 0);
123MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 123MODULE_PARM_DESC(nowayout,
124 "Watchdog cannot be stopped once started (default="
125 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
124 126
125 127
126/* 128/*
127 * Watchdog Operations 129 * Watchdog Operations
128 */ 130 */
129 131
130static void 132static void ibwdt_ping(void)
131ibwdt_ping(void)
132{ 133{
133 spin_lock(&ibwdt_lock); 134 spin_lock(&ibwdt_lock);
134 135
@@ -138,16 +139,14 @@ ibwdt_ping(void)
138 spin_unlock(&ibwdt_lock); 139 spin_unlock(&ibwdt_lock);
139} 140}
140 141
141static void 142static void ibwdt_disable(void)
142ibwdt_disable(void)
143{ 143{
144 spin_lock(&ibwdt_lock); 144 spin_lock(&ibwdt_lock);
145 outb_p(0, WDT_STOP); 145 outb_p(0, WDT_STOP);
146 spin_unlock(&ibwdt_lock); 146 spin_unlock(&ibwdt_lock);
147} 147}
148 148
149static int 149static int ibwdt_set_heartbeat(int t)
150ibwdt_set_heartbeat(int t)
151{ 150{
152 int i; 151 int i;
153 152
@@ -165,8 +164,8 @@ ibwdt_set_heartbeat(int t)
165 * /dev/watchdog handling 164 * /dev/watchdog handling
166 */ 165 */
167 166
168static ssize_t 167static ssize_t ibwdt_write(struct file *file, const char __user *buf,
169ibwdt_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) 168 size_t count, loff_t *ppos)
170{ 169{
171 if (count) { 170 if (count) {
172 if (!nowayout) { 171 if (!nowayout) {
@@ -188,77 +187,71 @@ ibwdt_write(struct file *file, const char __user *buf, size_t count, loff_t *ppo
188 return count; 187 return count;
189} 188}
190 189
191static int 190static long ibwdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
192ibwdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
193 unsigned long arg)
194{ 191{
195 int new_margin; 192 int new_margin;
196 void __user *argp = (void __user *)arg; 193 void __user *argp = (void __user *)arg;
197 int __user *p = argp; 194 int __user *p = argp;
198 195
199 static struct watchdog_info ident = { 196 static struct watchdog_info ident = {
200 .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE, 197 .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT
198 | WDIOF_MAGICCLOSE,
201 .firmware_version = 1, 199 .firmware_version = 1,
202 .identity = "IB700 WDT", 200 .identity = "IB700 WDT",
203 }; 201 };
204 202
205 switch (cmd) { 203 switch (cmd) {
206 case WDIOC_GETSUPPORT: 204 case WDIOC_GETSUPPORT:
207 if (copy_to_user(argp, &ident, sizeof(ident))) 205 if (copy_to_user(argp, &ident, sizeof(ident)))
208 return -EFAULT; 206 return -EFAULT;
209 break; 207 break;
210 208
211 case WDIOC_GETSTATUS: 209 case WDIOC_GETSTATUS:
212 case WDIOC_GETBOOTSTATUS: 210 case WDIOC_GETBOOTSTATUS:
213 return put_user(0, p); 211 return put_user(0, p);
214
215 case WDIOC_KEEPALIVE:
216 ibwdt_ping();
217 break;
218
219 case WDIOC_SETTIMEOUT:
220 if (get_user(new_margin, p))
221 return -EFAULT;
222 if (ibwdt_set_heartbeat(new_margin))
223 return -EINVAL;
224 ibwdt_ping();
225 /* Fall */
226
227 case WDIOC_GETTIMEOUT:
228 return put_user(wd_times[wd_margin], p);
229 212
230 case WDIOC_SETOPTIONS: 213 case WDIOC_SETOPTIONS:
231 { 214 {
232 int options, retval = -EINVAL; 215 int options, retval = -EINVAL;
233 216
234 if (get_user(options, p)) 217 if (get_user(options, p))
235 return -EFAULT; 218 return -EFAULT;
236 219
237 if (options & WDIOS_DISABLECARD) { 220 if (options & WDIOS_DISABLECARD) {
238 ibwdt_disable(); 221 ibwdt_disable();
239 retval = 0; 222 retval = 0;
240 } 223 }
224 if (options & WDIOS_ENABLECARD) {
225 ibwdt_ping();
226 retval = 0;
227 }
228 return retval;
229 }
230 case WDIOC_KEEPALIVE:
231 ibwdt_ping();
232 break;
241 233
242 if (options & WDIOS_ENABLECARD) { 234 case WDIOC_SETTIMEOUT:
243 ibwdt_ping(); 235 if (get_user(new_margin, p))
244 retval = 0; 236 return -EFAULT;
245 } 237 if (ibwdt_set_heartbeat(new_margin))
238 return -EINVAL;
239 ibwdt_ping();
240 /* Fall */
246 241
247 return retval; 242 case WDIOC_GETTIMEOUT:
248 } 243 return put_user(wd_times[wd_margin], p);
249 244
250 default: 245 default:
251 return -ENOTTY; 246 return -ENOTTY;
252 } 247 }
253 return 0; 248 return 0;
254} 249}
255 250
256static int 251static int ibwdt_open(struct inode *inode, struct file *file)
257ibwdt_open(struct inode *inode, struct file *file)
258{ 252{
259 if (test_and_set_bit(0, &ibwdt_is_open)) { 253 if (test_and_set_bit(0, &ibwdt_is_open))
260 return -EBUSY; 254 return -EBUSY;
261 }
262 if (nowayout) 255 if (nowayout)
263 __module_get(THIS_MODULE); 256 __module_get(THIS_MODULE);
264 257
@@ -267,13 +260,13 @@ ibwdt_open(struct inode *inode, struct file *file)
267 return nonseekable_open(inode, file); 260 return nonseekable_open(inode, file);
268} 261}
269 262
270static int 263static int ibwdt_close(struct inode *inode, struct file *file)
271ibwdt_close(struct inode *inode, struct file *file)
272{ 264{
273 if (expect_close == 42) { 265 if (expect_close == 42) {
274 ibwdt_disable(); 266 ibwdt_disable();
275 } else { 267 } else {
276 printk(KERN_CRIT PFX "WDT device closed unexpectedly. WDT will not stop!\n"); 268 printk(KERN_CRIT PFX
269 "WDT device closed unexpectedly. WDT will not stop!\n");
277 ibwdt_ping(); 270 ibwdt_ping();
278 } 271 }
279 clear_bit(0, &ibwdt_is_open); 272 clear_bit(0, &ibwdt_is_open);
@@ -289,7 +282,7 @@ static const struct file_operations ibwdt_fops = {
289 .owner = THIS_MODULE, 282 .owner = THIS_MODULE,
290 .llseek = no_llseek, 283 .llseek = no_llseek,
291 .write = ibwdt_write, 284 .write = ibwdt_write,
292 .ioctl = ibwdt_ioctl, 285 .unlocked_ioctl = ibwdt_ioctl,
293 .open = ibwdt_open, 286 .open = ibwdt_open,
294 .release = ibwdt_close, 287 .release = ibwdt_close,
295}; 288};
@@ -310,21 +303,23 @@ static int __devinit ibwdt_probe(struct platform_device *dev)
310 303
311#if WDT_START != WDT_STOP 304#if WDT_START != WDT_STOP
312 if (!request_region(WDT_STOP, 1, "IB700 WDT")) { 305 if (!request_region(WDT_STOP, 1, "IB700 WDT")) {
313 printk (KERN_ERR PFX "STOP method I/O %X is not available.\n", WDT_STOP); 306 printk(KERN_ERR PFX "STOP method I/O %X is not available.\n",
307 WDT_STOP);
314 res = -EIO; 308 res = -EIO;
315 goto out_nostopreg; 309 goto out_nostopreg;
316 } 310 }
317#endif 311#endif
318 312
319 if (!request_region(WDT_START, 1, "IB700 WDT")) { 313 if (!request_region(WDT_START, 1, "IB700 WDT")) {
320 printk (KERN_ERR PFX "START method I/O %X is not available.\n", WDT_START); 314 printk(KERN_ERR PFX "START method I/O %X is not available.\n",
315 WDT_START);
321 res = -EIO; 316 res = -EIO;
322 goto out_nostartreg; 317 goto out_nostartreg;
323 } 318 }
324 319
325 res = misc_register(&ibwdt_miscdev); 320 res = misc_register(&ibwdt_miscdev);
326 if (res) { 321 if (res) {
327 printk (KERN_ERR PFX "failed to register misc device\n"); 322 printk(KERN_ERR PFX "failed to register misc device\n");
328 goto out_nomisc; 323 goto out_nomisc;
329 } 324 }
330 return 0; 325 return 0;
@@ -342,9 +337,9 @@ out_nostopreg:
342static int __devexit ibwdt_remove(struct platform_device *dev) 337static int __devexit ibwdt_remove(struct platform_device *dev)
343{ 338{
344 misc_deregister(&ibwdt_miscdev); 339 misc_deregister(&ibwdt_miscdev);
345 release_region(WDT_START,1); 340 release_region(WDT_START, 1);
346#if WDT_START != WDT_STOP 341#if WDT_START != WDT_STOP
347 release_region(WDT_STOP,1); 342 release_region(WDT_STOP, 1);
348#endif 343#endif
349 return 0; 344 return 0;
350} 345}
@@ -369,13 +364,15 @@ static int __init ibwdt_init(void)
369{ 364{
370 int err; 365 int err;
371 366
372 printk(KERN_INFO PFX "WDT driver for IB700 single board computer initialising.\n"); 367 printk(KERN_INFO PFX
368 "WDT driver for IB700 single board computer initialising.\n");
373 369
374 err = platform_driver_register(&ibwdt_driver); 370 err = platform_driver_register(&ibwdt_driver);
375 if (err) 371 if (err)
376 return err; 372 return err;
377 373
378 ibwdt_platform_device = platform_device_register_simple(DRV_NAME, -1, NULL, 0); 374 ibwdt_platform_device = platform_device_register_simple(DRV_NAME,
375 -1, NULL, 0);
379 if (IS_ERR(ibwdt_platform_device)) { 376 if (IS_ERR(ibwdt_platform_device)) {
380 err = PTR_ERR(ibwdt_platform_device); 377 err = PTR_ERR(ibwdt_platform_device);
381 goto unreg_platform_driver; 378 goto unreg_platform_driver;
diff --git a/drivers/watchdog/ibmasr.c b/drivers/watchdog/ibmasr.c
index 94155f6136c2..b82405cfb4cd 100644
--- a/drivers/watchdog/ibmasr.c
+++ b/drivers/watchdog/ibmasr.c
@@ -19,9 +19,8 @@
19#include <linux/miscdevice.h> 19#include <linux/miscdevice.h>
20#include <linux/watchdog.h> 20#include <linux/watchdog.h>
21#include <linux/dmi.h> 21#include <linux/dmi.h>
22 22#include <linux/io.h>
23#include <asm/io.h> 23#include <linux/uaccess.h>
24#include <asm/uaccess.h>
25 24
26 25
27enum { 26enum {
@@ -70,10 +69,13 @@ static char asr_expect_close;
70static unsigned int asr_type, asr_base, asr_length; 69static unsigned int asr_type, asr_base, asr_length;
71static unsigned int asr_read_addr, asr_write_addr; 70static unsigned int asr_read_addr, asr_write_addr;
72static unsigned char asr_toggle_mask, asr_disable_mask; 71static unsigned char asr_toggle_mask, asr_disable_mask;
72static spinlock_t asr_lock;
73 73
74static void asr_toggle(void) 74static void __asr_toggle(void)
75{ 75{
76 unsigned char reg = inb(asr_read_addr); 76 unsigned char reg;
77
78 reg = inb(asr_read_addr);
77 79
78 outb(reg & ~asr_toggle_mask, asr_write_addr); 80 outb(reg & ~asr_toggle_mask, asr_write_addr);
79 reg = inb(asr_read_addr); 81 reg = inb(asr_read_addr);
@@ -83,12 +85,21 @@ static void asr_toggle(void)
83 85
84 outb(reg & ~asr_toggle_mask, asr_write_addr); 86 outb(reg & ~asr_toggle_mask, asr_write_addr);
85 reg = inb(asr_read_addr); 87 reg = inb(asr_read_addr);
88 spin_unlock(&asr_lock);
89}
90
91static void asr_toggle(void)
92{
93 spin_lock(&asr_lock);
94 __asr_toggle();
95 spin_unlock(&asr_lock);
86} 96}
87 97
88static void asr_enable(void) 98static void asr_enable(void)
89{ 99{
90 unsigned char reg; 100 unsigned char reg;
91 101
102 spin_lock(&asr_lock);
92 if (asr_type == ASMTYPE_TOPAZ) { 103 if (asr_type == ASMTYPE_TOPAZ) {
93 /* asr_write_addr == asr_read_addr */ 104 /* asr_write_addr == asr_read_addr */
94 reg = inb(asr_read_addr); 105 reg = inb(asr_read_addr);
@@ -99,17 +110,21 @@ static void asr_enable(void)
99 * First make sure the hardware timer is reset by toggling 110 * First make sure the hardware timer is reset by toggling
100 * ASR hardware timer line. 111 * ASR hardware timer line.
101 */ 112 */
102 asr_toggle(); 113 __asr_toggle();
103 114
104 reg = inb(asr_read_addr); 115 reg = inb(asr_read_addr);
105 outb(reg & ~asr_disable_mask, asr_write_addr); 116 outb(reg & ~asr_disable_mask, asr_write_addr);
106 } 117 }
107 reg = inb(asr_read_addr); 118 reg = inb(asr_read_addr);
119 spin_unlock(&asr_lock);
108} 120}
109 121
110static void asr_disable(void) 122static void asr_disable(void)
111{ 123{
112 unsigned char reg = inb(asr_read_addr); 124 unsigned char reg;
125
126 spin_lock(&asr_lock);
127 reg = inb(asr_read_addr);
113 128
114 if (asr_type == ASMTYPE_TOPAZ) 129 if (asr_type == ASMTYPE_TOPAZ)
115 /* asr_write_addr == asr_read_addr */ 130 /* asr_write_addr == asr_read_addr */
@@ -122,6 +137,7 @@ static void asr_disable(void)
122 outb(reg | asr_disable_mask, asr_write_addr); 137 outb(reg | asr_disable_mask, asr_write_addr);
123 } 138 }
124 reg = inb(asr_read_addr); 139 reg = inb(asr_read_addr);
140 spin_unlock(&asr_lock);
125} 141}
126 142
127static int __init asr_get_base_address(void) 143static int __init asr_get_base_address(void)
@@ -133,7 +149,8 @@ static int __init asr_get_base_address(void)
133 149
134 switch (asr_type) { 150 switch (asr_type) {
135 case ASMTYPE_TOPAZ: 151 case ASMTYPE_TOPAZ:
136 /* SELECT SuperIO CHIP FOR QUERYING (WRITE 0x07 TO BOTH 0x2E and 0x2F) */ 152 /* SELECT SuperIO CHIP FOR QUERYING
153 (WRITE 0x07 TO BOTH 0x2E and 0x2F) */
137 outb(0x07, 0x2e); 154 outb(0x07, 0x2e);
138 outb(0x07, 0x2f); 155 outb(0x07, 0x2f);
139 156
@@ -154,14 +171,26 @@ static int __init asr_get_base_address(void)
154 171
155 case ASMTYPE_JASPER: 172 case ASMTYPE_JASPER:
156 type = "Jaspers "; 173 type = "Jaspers ";
157 174#if 0
158 /* FIXME: need to use pci_config_lock here, but it's not exported */ 175 u32 r;
176 /* Suggested fix */
177 pdev = pci_get_bus_and_slot(0, DEVFN(0x1f, 0));
178 if (pdev == NULL)
179 return -ENODEV;
180 pci_read_config_dword(pdev, 0x58, &r);
181 asr_base = r & 0xFFFE;
182 pci_dev_put(pdev);
183#else
184 /* FIXME: need to use pci_config_lock here,
185 but it's not exported */
159 186
160/* spin_lock_irqsave(&pci_config_lock, flags);*/ 187/* spin_lock_irqsave(&pci_config_lock, flags);*/
161 188
162 /* Select the SuperIO chip in the PCI I/O port register */ 189 /* Select the SuperIO chip in the PCI I/O port register */
163 outl(0x8000f858, 0xcf8); 190 outl(0x8000f858, 0xcf8);
164 191
192 /* BUS 0, Slot 1F, fnc 0, offset 58 */
193
165 /* 194 /*
166 * Read the base address for the SuperIO chip. 195 * Read the base address for the SuperIO chip.
167 * Only the lower 16 bits are valid, but the address is word 196 * Only the lower 16 bits are valid, but the address is word
@@ -170,7 +199,7 @@ static int __init asr_get_base_address(void)
170 asr_base = inl(0xcfc) & 0xfffe; 199 asr_base = inl(0xcfc) & 0xfffe;
171 200
172/* spin_unlock_irqrestore(&pci_config_lock, flags);*/ 201/* spin_unlock_irqrestore(&pci_config_lock, flags);*/
173 202#endif
174 asr_read_addr = asr_write_addr = 203 asr_read_addr = asr_write_addr =
175 asr_base + JASPER_ASR_REG_OFFSET; 204 asr_base + JASPER_ASR_REG_OFFSET;
176 asr_toggle_mask = JASPER_ASR_TOGGLE_MASK; 205 asr_toggle_mask = JASPER_ASR_TOGGLE_MASK;
@@ -241,66 +270,57 @@ static ssize_t asr_write(struct file *file, const char __user *buf,
241 return count; 270 return count;
242} 271}
243 272
244static int asr_ioctl(struct inode *inode, struct file *file, 273static long asr_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
245 unsigned int cmd, unsigned long arg)
246{ 274{
247 static const struct watchdog_info ident = { 275 static const struct watchdog_info ident = {
248 .options = WDIOF_KEEPALIVEPING | 276 .options = WDIOF_KEEPALIVEPING |
249 WDIOF_MAGICCLOSE, 277 WDIOF_MAGICCLOSE,
250 .identity = "IBM ASR" 278 .identity = "IBM ASR",
251 }; 279 };
252 void __user *argp = (void __user *)arg; 280 void __user *argp = (void __user *)arg;
253 int __user *p = argp; 281 int __user *p = argp;
254 int heartbeat; 282 int heartbeat;
255 283
256 switch (cmd) { 284 switch (cmd) {
257 case WDIOC_GETSUPPORT: 285 case WDIOC_GETSUPPORT:
258 return copy_to_user(argp, &ident, sizeof(ident)) ? 286 return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
259 -EFAULT : 0; 287 case WDIOC_GETSTATUS:
260 288 case WDIOC_GETBOOTSTATUS:
261 case WDIOC_GETSTATUS: 289 return put_user(0, p);
262 case WDIOC_GETBOOTSTATUS: 290 case WDIOC_SETOPTIONS:
263 return put_user(0, p); 291 {
264 292 int new_options, retval = -EINVAL;
265 case WDIOC_KEEPALIVE: 293 if (get_user(new_options, p))
294 return -EFAULT;
295 if (new_options & WDIOS_DISABLECARD) {
296 asr_disable();
297 retval = 0;
298 }
299 if (new_options & WDIOS_ENABLECARD) {
300 asr_enable();
266 asr_toggle(); 301 asr_toggle();
267 return 0; 302 retval = 0;
268
269 /*
270 * The hardware has a fixed timeout value, so no WDIOC_SETTIMEOUT
271 * and WDIOC_GETTIMEOUT always returns 256.
272 */
273 case WDIOC_GETTIMEOUT:
274 heartbeat = 256;
275 return put_user(heartbeat, p);
276
277 case WDIOC_SETOPTIONS: {
278 int new_options, retval = -EINVAL;
279
280 if (get_user(new_options, p))
281 return -EFAULT;
282
283 if (new_options & WDIOS_DISABLECARD) {
284 asr_disable();
285 retval = 0;
286 }
287
288 if (new_options & WDIOS_ENABLECARD) {
289 asr_enable();
290 asr_toggle();
291 retval = 0;
292 }
293
294 return retval;
295 } 303 }
304 return retval;
305 }
306 case WDIOC_KEEPALIVE:
307 asr_toggle();
308 return 0;
309 /*
310 * The hardware has a fixed timeout value, so no WDIOC_SETTIMEOUT
311 * and WDIOC_GETTIMEOUT always returns 256.
312 */
313 case WDIOC_GETTIMEOUT:
314 heartbeat = 256;
315 return put_user(heartbeat, p);
316 default:
317 return -ENOTTY;
296 } 318 }
297
298 return -ENOTTY;
299} 319}
300 320
301static int asr_open(struct inode *inode, struct file *file) 321static int asr_open(struct inode *inode, struct file *file)
302{ 322{
303 if(test_and_set_bit(0, &asr_is_open)) 323 if (test_and_set_bit(0, &asr_is_open))
304 return -EBUSY; 324 return -EBUSY;
305 325
306 asr_toggle(); 326 asr_toggle();
@@ -314,7 +334,8 @@ static int asr_release(struct inode *inode, struct file *file)
314 if (asr_expect_close == 42) 334 if (asr_expect_close == 42)
315 asr_disable(); 335 asr_disable();
316 else { 336 else {
317 printk(KERN_CRIT PFX "unexpected close, not stopping watchdog!\n"); 337 printk(KERN_CRIT PFX
338 "unexpected close, not stopping watchdog!\n");
318 asr_toggle(); 339 asr_toggle();
319 } 340 }
320 clear_bit(0, &asr_is_open); 341 clear_bit(0, &asr_is_open);
@@ -323,12 +344,12 @@ static int asr_release(struct inode *inode, struct file *file)
323} 344}
324 345
325static const struct file_operations asr_fops = { 346static const struct file_operations asr_fops = {
326 .owner = THIS_MODULE, 347 .owner = THIS_MODULE,
327 .llseek = no_llseek, 348 .llseek = no_llseek,
328 .write = asr_write, 349 .write = asr_write,
329 .ioctl = asr_ioctl, 350 .unlocked_ioctl = asr_ioctl,
330 .open = asr_open, 351 .open = asr_open,
331 .release = asr_release, 352 .release = asr_release,
332}; 353};
333 354
334static struct miscdevice asr_miscdev = { 355static struct miscdevice asr_miscdev = {
@@ -367,6 +388,8 @@ static int __init ibmasr_init(void)
367 if (!asr_type) 388 if (!asr_type)
368 return -ENODEV; 389 return -ENODEV;
369 390
391 spin_lock_init(&asr_lock);
392
370 rc = asr_get_base_address(); 393 rc = asr_get_base_address();
371 if (rc) 394 if (rc)
372 return rc; 395 return rc;
@@ -395,7 +418,9 @@ module_init(ibmasr_init);
395module_exit(ibmasr_exit); 418module_exit(ibmasr_exit);
396 419
397module_param(nowayout, int, 0); 420module_param(nowayout, int, 0);
398MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 421MODULE_PARM_DESC(nowayout,
422 "Watchdog cannot be stopped once started (default="
423 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
399 424
400MODULE_DESCRIPTION("IBM Automatic Server Restart driver"); 425MODULE_DESCRIPTION("IBM Automatic Server Restart driver");
401MODULE_AUTHOR("Andrey Panin"); 426MODULE_AUTHOR("Andrey Panin");
diff --git a/drivers/watchdog/indydog.c b/drivers/watchdog/indydog.c
index 788245bdaa7f..73c9e7992feb 100644
--- a/drivers/watchdog/indydog.c
+++ b/drivers/watchdog/indydog.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * IndyDog 0.3 A Hardware Watchdog Device for SGI IP22 2 * IndyDog 0.3 A Hardware Watchdog Device for SGI IP22
3 * 3 *
4 * (c) Copyright 2002 Guido Guenther <agx@sigxcpu.org>, All Rights Reserved. 4 * (c) Copyright 2002 Guido Guenther <agx@sigxcpu.org>,
5 * All Rights Reserved.
5 * 6 *
6 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
@@ -22,32 +23,42 @@
22#include <linux/notifier.h> 23#include <linux/notifier.h>
23#include <linux/reboot.h> 24#include <linux/reboot.h>
24#include <linux/init.h> 25#include <linux/init.h>
25#include <asm/uaccess.h> 26#include <linux/uaccess.h>
26#include <asm/sgi/mc.h> 27#include <asm/sgi/mc.h>
27 28
28#define PFX "indydog: " 29#define PFX "indydog: "
29static int indydog_alive; 30static unsigned long indydog_alive;
31static spinlock_t indydog_lock;
30 32
31#define WATCHDOG_TIMEOUT 30 /* 30 sec default timeout */ 33#define WATCHDOG_TIMEOUT 30 /* 30 sec default timeout */
32 34
33static int nowayout = WATCHDOG_NOWAYOUT; 35static int nowayout = WATCHDOG_NOWAYOUT;
34module_param(nowayout, int, 0); 36module_param(nowayout, int, 0);
35MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 37MODULE_PARM_DESC(nowayout,
38 "Watchdog cannot be stopped once started (default="
39 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
36 40
37static void indydog_start(void) 41static void indydog_start(void)
38{ 42{
39 u32 mc_ctrl0 = sgimc->cpuctrl0; 43 u32 mc_ctrl0;
40 44
45 spin_lock(&indydog_lock);
46 mc_ctrl0 = sgimc->cpuctrl0;
41 mc_ctrl0 = sgimc->cpuctrl0 | SGIMC_CCTRL0_WDOG; 47 mc_ctrl0 = sgimc->cpuctrl0 | SGIMC_CCTRL0_WDOG;
42 sgimc->cpuctrl0 = mc_ctrl0; 48 sgimc->cpuctrl0 = mc_ctrl0;
49 spin_unlock(&indydog_lock);
43} 50}
44 51
45static void indydog_stop(void) 52static void indydog_stop(void)
46{ 53{
47 u32 mc_ctrl0 = sgimc->cpuctrl0; 54 u32 mc_ctrl0;
48 55
56 spin_lock(&indydog_lock);
57
58 mc_ctrl0 = sgimc->cpuctrl0;
49 mc_ctrl0 &= ~SGIMC_CCTRL0_WDOG; 59 mc_ctrl0 &= ~SGIMC_CCTRL0_WDOG;
50 sgimc->cpuctrl0 = mc_ctrl0; 60 sgimc->cpuctrl0 = mc_ctrl0;
61 spin_unlock(&indydog_lock);
51 62
52 printk(KERN_INFO PFX "Stopped watchdog timer.\n"); 63 printk(KERN_INFO PFX "Stopped watchdog timer.\n");
53} 64}
@@ -62,7 +73,7 @@ static void indydog_ping(void)
62 */ 73 */
63static int indydog_open(struct inode *inode, struct file *file) 74static int indydog_open(struct inode *inode, struct file *file)
64{ 75{
65 if (indydog_alive) 76 if (test_and_set_bit(0, &indydog_alive))
66 return -EBUSY; 77 return -EBUSY;
67 78
68 if (nowayout) 79 if (nowayout)
@@ -84,23 +95,21 @@ static int indydog_release(struct inode *inode, struct file *file)
84 * Lock it in if it's a module and we defined ...NOWAYOUT */ 95 * Lock it in if it's a module and we defined ...NOWAYOUT */
85 if (!nowayout) 96 if (!nowayout)
86 indydog_stop(); /* Turn the WDT off */ 97 indydog_stop(); /* Turn the WDT off */
87 98 clear_bit(0, &indydog_alive);
88 indydog_alive = 0;
89
90 return 0; 99 return 0;
91} 100}
92 101
93static ssize_t indydog_write(struct file *file, const char *data, size_t len, loff_t *ppos) 102static ssize_t indydog_write(struct file *file, const char *data,
103 size_t len, loff_t *ppos)
94{ 104{
95 /* Refresh the timer. */ 105 /* Refresh the timer. */
96 if (len) { 106 if (len)
97 indydog_ping(); 107 indydog_ping();
98 }
99 return len; 108 return len;
100} 109}
101 110
102static int indydog_ioctl(struct inode *inode, struct file *file, 111static long indydog_ioctl(struct file *file, unsigned int cmd,
103 unsigned int cmd, unsigned long arg) 112 unsigned long arg)
104{ 113{
105 int options, retval = -EINVAL; 114 int options, retval = -EINVAL;
106 static struct watchdog_info ident = { 115 static struct watchdog_info ident = {
@@ -111,42 +120,40 @@ static int indydog_ioctl(struct inode *inode, struct file *file,
111 }; 120 };
112 121
113 switch (cmd) { 122 switch (cmd) {
114 default: 123 case WDIOC_GETSUPPORT:
115 return -ENOTTY; 124 if (copy_to_user((struct watchdog_info *)arg,
116 case WDIOC_GETSUPPORT: 125 &ident, sizeof(ident)))
117 if (copy_to_user((struct watchdog_info *)arg, 126 return -EFAULT;
118 &ident, sizeof(ident))) 127 return 0;
119 return -EFAULT; 128 case WDIOC_GETSTATUS:
120 return 0; 129 case WDIOC_GETBOOTSTATUS:
121 case WDIOC_GETSTATUS: 130 return put_user(0, (int *)arg);
122 case WDIOC_GETBOOTSTATUS: 131 case WDIOC_SETOPTIONS:
123 return put_user(0,(int *)arg); 132 {
124 case WDIOC_KEEPALIVE: 133 if (get_user(options, (int *)arg))
125 indydog_ping(); 134 return -EFAULT;
126 return 0; 135 if (options & WDIOS_DISABLECARD) {
127 case WDIOC_GETTIMEOUT: 136 indydog_stop();
128 return put_user(WATCHDOG_TIMEOUT,(int *)arg); 137 retval = 0;
129 case WDIOC_SETOPTIONS:
130 {
131 if (get_user(options, (int *)arg))
132 return -EFAULT;
133
134 if (options & WDIOS_DISABLECARD) {
135 indydog_stop();
136 retval = 0;
137 }
138
139 if (options & WDIOS_ENABLECARD) {
140 indydog_start();
141 retval = 0;
142 }
143
144 return retval;
145 } 138 }
139 if (options & WDIOS_ENABLECARD) {
140 indydog_start();
141 retval = 0;
142 }
143 return retval;
144 }
145 case WDIOC_KEEPALIVE:
146 indydog_ping();
147 return 0;
148 case WDIOC_GETTIMEOUT:
149 return put_user(WATCHDOG_TIMEOUT, (int *)arg);
150 default:
151 return -ENOTTY;
146 } 152 }
147} 153}
148 154
149static int indydog_notify_sys(struct notifier_block *this, unsigned long code, void *unused) 155static int indydog_notify_sys(struct notifier_block *this,
156 unsigned long code, void *unused)
150{ 157{
151 if (code == SYS_DOWN || code == SYS_HALT) 158 if (code == SYS_DOWN || code == SYS_HALT)
152 indydog_stop(); /* Turn the WDT off */ 159 indydog_stop(); /* Turn the WDT off */
@@ -158,7 +165,7 @@ static const struct file_operations indydog_fops = {
158 .owner = THIS_MODULE, 165 .owner = THIS_MODULE,
159 .llseek = no_llseek, 166 .llseek = no_llseek,
160 .write = indydog_write, 167 .write = indydog_write,
161 .ioctl = indydog_ioctl, 168 .unlocked_ioctl = indydog_ioctl,
162 .open = indydog_open, 169 .open = indydog_open,
163 .release = indydog_release, 170 .release = indydog_release,
164}; 171};
@@ -180,17 +187,20 @@ static int __init watchdog_init(void)
180{ 187{
181 int ret; 188 int ret;
182 189
190 spin_lock_init(&indydog_lock);
191
183 ret = register_reboot_notifier(&indydog_notifier); 192 ret = register_reboot_notifier(&indydog_notifier);
184 if (ret) { 193 if (ret) {
185 printk(KERN_ERR PFX "cannot register reboot notifier (err=%d)\n", 194 printk(KERN_ERR PFX
186 ret); 195 "cannot register reboot notifier (err=%d)\n", ret);
187 return ret; 196 return ret;
188 } 197 }
189 198
190 ret = misc_register(&indydog_miscdev); 199 ret = misc_register(&indydog_miscdev);
191 if (ret) { 200 if (ret) {
192 printk(KERN_ERR PFX "cannot register miscdev on minor=%d (err=%d)\n", 201 printk(KERN_ERR PFX
193 WATCHDOG_MINOR, ret); 202 "cannot register miscdev on minor=%d (err=%d)\n",
203 WATCHDOG_MINOR, ret);
194 unregister_reboot_notifier(&indydog_notifier); 204 unregister_reboot_notifier(&indydog_notifier);
195 return ret; 205 return ret;
196 } 206 }
diff --git a/drivers/watchdog/iop_wdt.c b/drivers/watchdog/iop_wdt.c
index bbbd91af754d..96eb2cbe5874 100644
--- a/drivers/watchdog/iop_wdt.c
+++ b/drivers/watchdog/iop_wdt.c
@@ -32,11 +32,12 @@
32#include <linux/miscdevice.h> 32#include <linux/miscdevice.h>
33#include <linux/watchdog.h> 33#include <linux/watchdog.h>
34#include <linux/uaccess.h> 34#include <linux/uaccess.h>
35#include <asm/hardware.h> 35#include <mach/hardware.h>
36 36
37static int nowayout = WATCHDOG_NOWAYOUT; 37static int nowayout = WATCHDOG_NOWAYOUT;
38static unsigned long wdt_status; 38static unsigned long wdt_status;
39static unsigned long boot_status; 39static unsigned long boot_status;
40static spinlock_t wdt_lock;
40 41
41#define WDT_IN_USE 0 42#define WDT_IN_USE 0
42#define WDT_OK_TO_CLOSE 1 43#define WDT_OK_TO_CLOSE 1
@@ -68,8 +69,10 @@ static void wdt_enable(void)
68 /* Arm and enable the Timer to starting counting down from 0xFFFF.FFFF 69 /* Arm and enable the Timer to starting counting down from 0xFFFF.FFFF
69 * Takes approx. 10.7s to timeout 70 * Takes approx. 10.7s to timeout
70 */ 71 */
72 spin_lock(&wdt_lock);
71 write_wdtcr(IOP_WDTCR_EN_ARM); 73 write_wdtcr(IOP_WDTCR_EN_ARM);
72 write_wdtcr(IOP_WDTCR_EN); 74 write_wdtcr(IOP_WDTCR_EN);
75 spin_unlock(&wdt_lock);
73} 76}
74 77
75/* returns 0 if the timer was successfully disabled */ 78/* returns 0 if the timer was successfully disabled */
@@ -77,9 +80,11 @@ static int wdt_disable(void)
77{ 80{
78 /* Stop Counting */ 81 /* Stop Counting */
79 if (wdt_supports_disable()) { 82 if (wdt_supports_disable()) {
83 spin_lock(&wdt_lock);
80 write_wdtcr(IOP_WDTCR_DIS_ARM); 84 write_wdtcr(IOP_WDTCR_DIS_ARM);
81 write_wdtcr(IOP_WDTCR_DIS); 85 write_wdtcr(IOP_WDTCR_DIS);
82 clear_bit(WDT_ENABLED, &wdt_status); 86 clear_bit(WDT_ENABLED, &wdt_status);
87 spin_unlock(&wdt_lock);
83 printk(KERN_INFO "WATCHDOG: Disabled\n"); 88 printk(KERN_INFO "WATCHDOG: Disabled\n");
84 return 0; 89 return 0;
85 } else 90 } else
@@ -92,16 +97,12 @@ static int iop_wdt_open(struct inode *inode, struct file *file)
92 return -EBUSY; 97 return -EBUSY;
93 98
94 clear_bit(WDT_OK_TO_CLOSE, &wdt_status); 99 clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
95
96 wdt_enable(); 100 wdt_enable();
97
98 set_bit(WDT_ENABLED, &wdt_status); 101 set_bit(WDT_ENABLED, &wdt_status);
99
100 return nonseekable_open(inode, file); 102 return nonseekable_open(inode, file);
101} 103}
102 104
103static ssize_t 105static ssize_t iop_wdt_write(struct file *file, const char *data, size_t len,
104iop_wdt_write(struct file *file, const char *data, size_t len,
105 loff_t *ppos) 106 loff_t *ppos)
106{ 107{
107 if (len) { 108 if (len) {
@@ -121,46 +122,35 @@ iop_wdt_write(struct file *file, const char *data, size_t len,
121 } 122 }
122 wdt_enable(); 123 wdt_enable();
123 } 124 }
124
125 return len; 125 return len;
126} 126}
127 127
128static struct watchdog_info ident = { 128static const struct watchdog_info ident = {
129 .options = WDIOF_CARDRESET | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING, 129 .options = WDIOF_CARDRESET | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING,
130 .identity = "iop watchdog", 130 .identity = "iop watchdog",
131}; 131};
132 132
133static int 133static long iop_wdt_ioctl(struct file *file,
134iop_wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, 134 unsigned int cmd, unsigned long arg)
135 unsigned long arg)
136{ 135{
137 int options; 136 int options;
138 int ret = -ENOTTY; 137 int ret = -ENOTTY;
138 int __user *argp = (int __user *)arg;
139 139
140 switch (cmd) { 140 switch (cmd) {
141 case WDIOC_GETSUPPORT: 141 case WDIOC_GETSUPPORT:
142 if (copy_to_user 142 if (copy_to_user(argp, &ident, sizeof ident))
143 ((struct watchdog_info *)arg, &ident, sizeof ident))
144 ret = -EFAULT; 143 ret = -EFAULT;
145 else 144 else
146 ret = 0; 145 ret = 0;
147 break; 146 break;
148 147
149 case WDIOC_GETSTATUS: 148 case WDIOC_GETSTATUS:
150 ret = put_user(0, (int *)arg); 149 ret = put_user(0, argp);
151 break; 150 break;
152 151
153 case WDIOC_GETBOOTSTATUS: 152 case WDIOC_GETBOOTSTATUS:
154 ret = put_user(boot_status, (int *)arg); 153 ret = put_user(boot_status, argp);
155 break;
156
157 case WDIOC_GETTIMEOUT:
158 ret = put_user(iop_watchdog_timeout(), (int *)arg);
159 break;
160
161 case WDIOC_KEEPALIVE:
162 wdt_enable();
163 ret = 0;
164 break; 154 break;
165 155
166 case WDIOC_SETOPTIONS: 156 case WDIOC_SETOPTIONS:
@@ -177,14 +167,21 @@ iop_wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
177 } else 167 } else
178 ret = 0; 168 ret = 0;
179 } 169 }
180
181 if (options & WDIOS_ENABLECARD) { 170 if (options & WDIOS_ENABLECARD) {
182 wdt_enable(); 171 wdt_enable();
183 ret = 0; 172 ret = 0;
184 } 173 }
185 break; 174 break;
186 }
187 175
176 case WDIOC_KEEPALIVE:
177 wdt_enable();
178 ret = 0;
179 break;
180
181 case WDIOC_GETTIMEOUT:
182 ret = put_user(iop_watchdog_timeout(), argp);
183 break;
184 }
188 return ret; 185 return ret;
189} 186}
190 187
@@ -214,7 +211,7 @@ static const struct file_operations iop_wdt_fops = {
214 .owner = THIS_MODULE, 211 .owner = THIS_MODULE,
215 .llseek = no_llseek, 212 .llseek = no_llseek,
216 .write = iop_wdt_write, 213 .write = iop_wdt_write,
217 .ioctl = iop_wdt_ioctl, 214 .unlocked_ioctl = iop_wdt_ioctl,
218 .open = iop_wdt_open, 215 .open = iop_wdt_open,
219 .release = iop_wdt_release, 216 .release = iop_wdt_release,
220}; 217};
@@ -229,10 +226,8 @@ static int __init iop_wdt_init(void)
229{ 226{
230 int ret; 227 int ret;
231 228
232 ret = misc_register(&iop_wdt_miscdev); 229 spin_lock_init(&wdt_lock);
233 if (ret == 0) 230
234 printk("iop watchdog timer: timeout %lu sec\n",
235 iop_watchdog_timeout());
236 231
237 /* check if the reset was caused by the watchdog timer */ 232 /* check if the reset was caused by the watchdog timer */
238 boot_status = (read_rcsr() & IOP_RCSR_WDT) ? WDIOF_CARDRESET : 0; 233 boot_status = (read_rcsr() & IOP_RCSR_WDT) ? WDIOF_CARDRESET : 0;
@@ -242,6 +237,13 @@ static int __init iop_wdt_init(void)
242 */ 237 */
243 write_wdtsr(IOP13XX_WDTCR_IB_RESET); 238 write_wdtsr(IOP13XX_WDTCR_IB_RESET);
244 239
240 /* Register after we have the device set up so we cannot race
241 with an open */
242 ret = misc_register(&iop_wdt_miscdev);
243 if (ret == 0)
244 printk(KERN_INFO "iop watchdog timer: timeout %lu sec\n",
245 iop_watchdog_timeout());
246
245 return ret; 247 return ret;
246} 248}
247 249
diff --git a/drivers/watchdog/it8712f_wdt.c b/drivers/watchdog/it8712f_wdt.c
index 445b7e812112..2270ee07c01b 100644
--- a/drivers/watchdog/it8712f_wdt.c
+++ b/drivers/watchdog/it8712f_wdt.c
@@ -30,9 +30,8 @@
30#include <linux/fs.h> 30#include <linux/fs.h>
31#include <linux/pci.h> 31#include <linux/pci.h>
32#include <linux/spinlock.h> 32#include <linux/spinlock.h>
33 33#include <linux/uaccess.h>
34#include <asm/uaccess.h> 34#include <linux/io.h>
35#include <asm/io.h>
36 35
37#define NAME "it8712f_wdt" 36#define NAME "it8712f_wdt"
38 37
@@ -50,7 +49,7 @@ static int nowayout = WATCHDOG_NOWAYOUT;
50module_param(nowayout, int, 0); 49module_param(nowayout, int, 0);
51MODULE_PARM_DESC(nowayout, "Disable watchdog shutdown on close"); 50MODULE_PARM_DESC(nowayout, "Disable watchdog shutdown on close");
52 51
53static struct semaphore it8712f_wdt_sem; 52static unsigned long wdt_open;
54static unsigned expect_close; 53static unsigned expect_close;
55static spinlock_t io_lock; 54static spinlock_t io_lock;
56static unsigned char revision; 55static unsigned char revision;
@@ -86,22 +85,19 @@ static unsigned short address;
86#define WDT_OUT_PWROK 0x10 85#define WDT_OUT_PWROK 0x10
87#define WDT_OUT_KRST 0x40 86#define WDT_OUT_KRST 0x40
88 87
89static int 88static int superio_inb(int reg)
90superio_inb(int reg)
91{ 89{
92 outb(reg, REG); 90 outb(reg, REG);
93 return inb(VAL); 91 return inb(VAL);
94} 92}
95 93
96static void 94static void superio_outb(int val, int reg)
97superio_outb(int val, int reg)
98{ 95{
99 outb(reg, REG); 96 outb(reg, REG);
100 outb(val, VAL); 97 outb(val, VAL);
101} 98}
102 99
103static int 100static int superio_inw(int reg)
104superio_inw(int reg)
105{ 101{
106 int val; 102 int val;
107 outb(reg++, REG); 103 outb(reg++, REG);
@@ -111,15 +107,13 @@ superio_inw(int reg)
111 return val; 107 return val;
112} 108}
113 109
114static inline void 110static inline void superio_select(int ldn)
115superio_select(int ldn)
116{ 111{
117 outb(LDN, REG); 112 outb(LDN, REG);
118 outb(ldn, VAL); 113 outb(ldn, VAL);
119} 114}
120 115
121static inline void 116static inline void superio_enter(void)
122superio_enter(void)
123{ 117{
124 spin_lock(&io_lock); 118 spin_lock(&io_lock);
125 outb(0x87, REG); 119 outb(0x87, REG);
@@ -128,22 +122,19 @@ superio_enter(void)
128 outb(0x55, REG); 122 outb(0x55, REG);
129} 123}
130 124
131static inline void 125static inline void superio_exit(void)
132superio_exit(void)
133{ 126{
134 outb(0x02, REG); 127 outb(0x02, REG);
135 outb(0x02, VAL); 128 outb(0x02, VAL);
136 spin_unlock(&io_lock); 129 spin_unlock(&io_lock);
137} 130}
138 131
139static inline void 132static inline void it8712f_wdt_ping(void)
140it8712f_wdt_ping(void)
141{ 133{
142 inb(address); 134 inb(address);
143} 135}
144 136
145static void 137static void it8712f_wdt_update_margin(void)
146it8712f_wdt_update_margin(void)
147{ 138{
148 int config = WDT_OUT_KRST | WDT_OUT_PWROK; 139 int config = WDT_OUT_KRST | WDT_OUT_PWROK;
149 int units = margin; 140 int units = margin;
@@ -165,8 +156,7 @@ it8712f_wdt_update_margin(void)
165 superio_outb(units, WDT_TIMEOUT); 156 superio_outb(units, WDT_TIMEOUT);
166} 157}
167 158
168static int 159static int it8712f_wdt_get_status(void)
169it8712f_wdt_get_status(void)
170{ 160{
171 if (superio_inb(WDT_CONTROL) & 0x01) 161 if (superio_inb(WDT_CONTROL) & 0x01)
172 return WDIOF_CARDRESET; 162 return WDIOF_CARDRESET;
@@ -174,8 +164,7 @@ it8712f_wdt_get_status(void)
174 return 0; 164 return 0;
175} 165}
176 166
177static void 167static void it8712f_wdt_enable(void)
178it8712f_wdt_enable(void)
179{ 168{
180 printk(KERN_DEBUG NAME ": enabling watchdog timer\n"); 169 printk(KERN_DEBUG NAME ": enabling watchdog timer\n");
181 superio_enter(); 170 superio_enter();
@@ -190,8 +179,7 @@ it8712f_wdt_enable(void)
190 it8712f_wdt_ping(); 179 it8712f_wdt_ping();
191} 180}
192 181
193static void 182static void it8712f_wdt_disable(void)
194it8712f_wdt_disable(void)
195{ 183{
196 printk(KERN_DEBUG NAME ": disabling watchdog timer\n"); 184 printk(KERN_DEBUG NAME ": disabling watchdog timer\n");
197 185
@@ -207,8 +195,7 @@ it8712f_wdt_disable(void)
207 superio_exit(); 195 superio_exit();
208} 196}
209 197
210static int 198static int it8712f_wdt_notify(struct notifier_block *this,
211it8712f_wdt_notify(struct notifier_block *this,
212 unsigned long code, void *unused) 199 unsigned long code, void *unused)
213{ 200{
214 if (code == SYS_HALT || code == SYS_POWER_OFF) 201 if (code == SYS_HALT || code == SYS_POWER_OFF)
@@ -222,9 +209,8 @@ static struct notifier_block it8712f_wdt_notifier = {
222 .notifier_call = it8712f_wdt_notify, 209 .notifier_call = it8712f_wdt_notify,
223}; 210};
224 211
225static ssize_t 212static ssize_t it8712f_wdt_write(struct file *file, const char __user *data,
226it8712f_wdt_write(struct file *file, const char __user *data, 213 size_t len, loff_t *ppos)
227 size_t len, loff_t *ppos)
228{ 214{
229 /* check for a magic close character */ 215 /* check for a magic close character */
230 if (len) { 216 if (len) {
@@ -235,7 +221,7 @@ it8712f_wdt_write(struct file *file, const char __user *data,
235 expect_close = 0; 221 expect_close = 0;
236 for (i = 0; i < len; ++i) { 222 for (i = 0; i < len; ++i) {
237 char c; 223 char c;
238 if (get_user(c, data+i)) 224 if (get_user(c, data + i))
239 return -EFAULT; 225 return -EFAULT;
240 if (c == 'V') 226 if (c == 'V')
241 expect_close = 42; 227 expect_close = 42;
@@ -245,9 +231,8 @@ it8712f_wdt_write(struct file *file, const char __user *data,
245 return len; 231 return len;
246} 232}
247 233
248static int 234static long it8712f_wdt_ioctl(struct file *file, unsigned int cmd,
249it8712f_wdt_ioctl(struct inode *inode, struct file *file, 235 unsigned long arg)
250 unsigned int cmd, unsigned long arg)
251{ 236{
252 void __user *argp = (void __user *)arg; 237 void __user *argp = (void __user *)arg;
253 int __user *p = argp; 238 int __user *p = argp;
@@ -259,8 +244,6 @@ it8712f_wdt_ioctl(struct inode *inode, struct file *file,
259 int value; 244 int value;
260 245
261 switch (cmd) { 246 switch (cmd) {
262 default:
263 return -ENOTTY;
264 case WDIOC_GETSUPPORT: 247 case WDIOC_GETSUPPORT:
265 if (copy_to_user(argp, &ident, sizeof(ident))) 248 if (copy_to_user(argp, &ident, sizeof(ident)))
266 return -EFAULT; 249 return -EFAULT;
@@ -299,22 +282,21 @@ it8712f_wdt_ioctl(struct inode *inode, struct file *file,
299 if (put_user(margin, p)) 282 if (put_user(margin, p))
300 return -EFAULT; 283 return -EFAULT;
301 return 0; 284 return 0;
285 default:
286 return -ENOTTY;
302 } 287 }
303} 288}
304 289
305static int 290static int it8712f_wdt_open(struct inode *inode, struct file *file)
306it8712f_wdt_open(struct inode *inode, struct file *file)
307{ 291{
308 /* only allow one at a time */ 292 /* only allow one at a time */
309 if (down_trylock(&it8712f_wdt_sem)) 293 if (test_and_set_bit(0, &wdt_open))
310 return -EBUSY; 294 return -EBUSY;
311 it8712f_wdt_enable(); 295 it8712f_wdt_enable();
312
313 return nonseekable_open(inode, file); 296 return nonseekable_open(inode, file);
314} 297}
315 298
316static int 299static int it8712f_wdt_release(struct inode *inode, struct file *file)
317it8712f_wdt_release(struct inode *inode, struct file *file)
318{ 300{
319 if (expect_close != 42) { 301 if (expect_close != 42) {
320 printk(KERN_WARNING NAME 302 printk(KERN_WARNING NAME
@@ -324,7 +306,7 @@ it8712f_wdt_release(struct inode *inode, struct file *file)
324 it8712f_wdt_disable(); 306 it8712f_wdt_disable();
325 } 307 }
326 expect_close = 0; 308 expect_close = 0;
327 up(&it8712f_wdt_sem); 309 clear_bit(0, &wdt_open);
328 310
329 return 0; 311 return 0;
330} 312}
@@ -333,7 +315,7 @@ static const struct file_operations it8712f_wdt_fops = {
333 .owner = THIS_MODULE, 315 .owner = THIS_MODULE,
334 .llseek = no_llseek, 316 .llseek = no_llseek,
335 .write = it8712f_wdt_write, 317 .write = it8712f_wdt_write,
336 .ioctl = it8712f_wdt_ioctl, 318 .unlocked_ioctl = it8712f_wdt_ioctl,
337 .open = it8712f_wdt_open, 319 .open = it8712f_wdt_open,
338 .release = it8712f_wdt_release, 320 .release = it8712f_wdt_release,
339}; 321};
@@ -344,8 +326,7 @@ static struct miscdevice it8712f_wdt_miscdev = {
344 .fops = &it8712f_wdt_fops, 326 .fops = &it8712f_wdt_fops,
345}; 327};
346 328
347static int __init 329static int __init it8712f_wdt_find(unsigned short *address)
348it8712f_wdt_find(unsigned short *address)
349{ 330{
350 int err = -ENODEV; 331 int err = -ENODEV;
351 int chip_type; 332 int chip_type;
@@ -387,8 +368,7 @@ exit:
387 return err; 368 return err;
388} 369}
389 370
390static int __init 371static int __init it8712f_wdt_init(void)
391it8712f_wdt_init(void)
392{ 372{
393 int err = 0; 373 int err = 0;
394 374
@@ -404,8 +384,6 @@ it8712f_wdt_init(void)
404 384
405 it8712f_wdt_disable(); 385 it8712f_wdt_disable();
406 386
407 sema_init(&it8712f_wdt_sem, 1);
408
409 err = register_reboot_notifier(&it8712f_wdt_notifier); 387 err = register_reboot_notifier(&it8712f_wdt_notifier);
410 if (err) { 388 if (err) {
411 printk(KERN_ERR NAME ": unable to register reboot notifier\n"); 389 printk(KERN_ERR NAME ": unable to register reboot notifier\n");
@@ -430,8 +408,7 @@ out:
430 return err; 408 return err;
431} 409}
432 410
433static void __exit 411static void __exit it8712f_wdt_exit(void)
434it8712f_wdt_exit(void)
435{ 412{
436 misc_deregister(&it8712f_wdt_miscdev); 413 misc_deregister(&it8712f_wdt_miscdev);
437 unregister_reboot_notifier(&it8712f_wdt_notifier); 414 unregister_reboot_notifier(&it8712f_wdt_notifier);
diff --git a/drivers/watchdog/ixp2000_wdt.c b/drivers/watchdog/ixp2000_wdt.c
index dc7548dcaf35..4f4b35a20d84 100644
--- a/drivers/watchdog/ixp2000_wdt.c
+++ b/drivers/watchdog/ixp2000_wdt.c
@@ -25,42 +25,44 @@
25#include <linux/watchdog.h> 25#include <linux/watchdog.h>
26#include <linux/init.h> 26#include <linux/init.h>
27#include <linux/bitops.h> 27#include <linux/bitops.h>
28 28#include <linux/uaccess.h>
29#include <asm/hardware.h> 29#include <mach/hardware.h>
30#include <asm/uaccess.h>
31 30
32static int nowayout = WATCHDOG_NOWAYOUT; 31static int nowayout = WATCHDOG_NOWAYOUT;
33static unsigned int heartbeat = 60; /* (secs) Default is 1 minute */ 32static unsigned int heartbeat = 60; /* (secs) Default is 1 minute */
34static unsigned long wdt_status; 33static unsigned long wdt_status;
34static spinlock_t wdt_lock;
35 35
36#define WDT_IN_USE 0 36#define WDT_IN_USE 0
37#define WDT_OK_TO_CLOSE 1 37#define WDT_OK_TO_CLOSE 1
38 38
39static unsigned long wdt_tick_rate; 39static unsigned long wdt_tick_rate;
40 40
41static void 41static void wdt_enable(void)
42wdt_enable(void)
43{ 42{
43 spin_lock(&wdt_lock);
44 ixp2000_reg_write(IXP2000_RESET0, *(IXP2000_RESET0) | WDT_RESET_ENABLE); 44 ixp2000_reg_write(IXP2000_RESET0, *(IXP2000_RESET0) | WDT_RESET_ENABLE);
45 ixp2000_reg_write(IXP2000_TWDE, WDT_ENABLE); 45 ixp2000_reg_write(IXP2000_TWDE, WDT_ENABLE);
46 ixp2000_reg_write(IXP2000_T4_CLD, heartbeat * wdt_tick_rate); 46 ixp2000_reg_write(IXP2000_T4_CLD, heartbeat * wdt_tick_rate);
47 ixp2000_reg_write(IXP2000_T4_CTL, TIMER_DIVIDER_256 | TIMER_ENABLE); 47 ixp2000_reg_write(IXP2000_T4_CTL, TIMER_DIVIDER_256 | TIMER_ENABLE);
48 spin_unlock(&wdt_lock);
48} 49}
49 50
50static void 51static void wdt_disable(void)
51wdt_disable(void)
52{ 52{
53 spin_lock(&wdt_lock);
53 ixp2000_reg_write(IXP2000_T4_CTL, 0); 54 ixp2000_reg_write(IXP2000_T4_CTL, 0);
55 spin_unlock(&wdt_lock);
54} 56}
55 57
56static void 58static void wdt_keepalive(void)
57wdt_keepalive(void)
58{ 59{
60 spin_lock(&wdt_lock);
59 ixp2000_reg_write(IXP2000_T4_CLD, heartbeat * wdt_tick_rate); 61 ixp2000_reg_write(IXP2000_T4_CLD, heartbeat * wdt_tick_rate);
62 spin_unlock(&wdt_lock);
60} 63}
61 64
62static int 65static int ixp2000_wdt_open(struct inode *inode, struct file *file)
63ixp2000_wdt_open(struct inode *inode, struct file *file)
64{ 66{
65 if (test_and_set_bit(WDT_IN_USE, &wdt_status)) 67 if (test_and_set_bit(WDT_IN_USE, &wdt_status))
66 return -EBUSY; 68 return -EBUSY;
@@ -72,8 +74,8 @@ ixp2000_wdt_open(struct inode *inode, struct file *file)
72 return nonseekable_open(inode, file); 74 return nonseekable_open(inode, file);
73} 75}
74 76
75static ssize_t 77static ssize_t ixp2000_wdt_write(struct file *file, const char *data,
76ixp2000_wdt_write(struct file *file, const char *data, size_t len, loff_t *ppos) 78 size_t len, loff_t *ppos)
77{ 79{
78 if (len) { 80 if (len) {
79 if (!nowayout) { 81 if (!nowayout) {
@@ -103,9 +105,8 @@ static struct watchdog_info ident = {
103 .identity = "IXP2000 Watchdog", 105 .identity = "IXP2000 Watchdog",
104}; 106};
105 107
106static int 108static long ixp2000_wdt_ioctl(struct file *file, unsigned int cmd,
107ixp2000_wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, 109 unsigned long arg)
108 unsigned long arg)
109{ 110{
110 int ret = -ENOTTY; 111 int ret = -ENOTTY;
111 int time; 112 int time;
@@ -124,6 +125,11 @@ ixp2000_wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
124 ret = put_user(0, (int *)arg); 125 ret = put_user(0, (int *)arg);
125 break; 126 break;
126 127
128 case WDIOC_KEEPALIVE:
129 wdt_enable();
130 ret = 0;
131 break;
132
127 case WDIOC_SETTIMEOUT: 133 case WDIOC_SETTIMEOUT:
128 ret = get_user(time, (int *)arg); 134 ret = get_user(time, (int *)arg);
129 if (ret) 135 if (ret)
@@ -141,26 +147,18 @@ ixp2000_wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
141 case WDIOC_GETTIMEOUT: 147 case WDIOC_GETTIMEOUT:
142 ret = put_user(heartbeat, (int *)arg); 148 ret = put_user(heartbeat, (int *)arg);
143 break; 149 break;
144
145 case WDIOC_KEEPALIVE:
146 wdt_enable();
147 ret = 0;
148 break;
149 } 150 }
150 151
151 return ret; 152 return ret;
152} 153}
153 154
154static int 155static int ixp2000_wdt_release(struct inode *inode, struct file *file)
155ixp2000_wdt_release(struct inode *inode, struct file *file)
156{ 156{
157 if (test_bit(WDT_OK_TO_CLOSE, &wdt_status)) { 157 if (test_bit(WDT_OK_TO_CLOSE, &wdt_status))
158 wdt_disable(); 158 wdt_disable();
159 } else { 159 else
160 printk(KERN_CRIT "WATCHDOG: Device closed unexpectedly - " 160 printk(KERN_CRIT "WATCHDOG: Device closed unexpectedly - "
161 "timer will not stop\n"); 161 "timer will not stop\n");
162 }
163
164 clear_bit(WDT_IN_USE, &wdt_status); 162 clear_bit(WDT_IN_USE, &wdt_status);
165 clear_bit(WDT_OK_TO_CLOSE, &wdt_status); 163 clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
166 164
@@ -168,18 +166,16 @@ ixp2000_wdt_release(struct inode *inode, struct file *file)
168} 166}
169 167
170 168
171static const struct file_operations ixp2000_wdt_fops = 169static const struct file_operations ixp2000_wdt_fops = {
172{
173 .owner = THIS_MODULE, 170 .owner = THIS_MODULE,
174 .llseek = no_llseek, 171 .llseek = no_llseek,
175 .write = ixp2000_wdt_write, 172 .write = ixp2000_wdt_write,
176 .ioctl = ixp2000_wdt_ioctl, 173 .unlocked_ioctl = ixp2000_wdt_ioctl,
177 .open = ixp2000_wdt_open, 174 .open = ixp2000_wdt_open,
178 .release = ixp2000_wdt_release, 175 .release = ixp2000_wdt_release,
179}; 176};
180 177
181static struct miscdevice ixp2000_wdt_miscdev = 178static struct miscdevice ixp2000_wdt_miscdev = {
182{
183 .minor = WATCHDOG_MINOR, 179 .minor = WATCHDOG_MINOR,
184 .name = "watchdog", 180 .name = "watchdog",
185 .fops = &ixp2000_wdt_fops, 181 .fops = &ixp2000_wdt_fops,
@@ -191,9 +187,8 @@ static int __init ixp2000_wdt_init(void)
191 printk(KERN_INFO "Unable to use IXP2000 watchdog due to IXP2800 erratum #25.\n"); 187 printk(KERN_INFO "Unable to use IXP2000 watchdog due to IXP2800 erratum #25.\n");
192 return -EIO; 188 return -EIO;
193 } 189 }
194
195 wdt_tick_rate = (*IXP2000_T1_CLD * HZ) / 256; 190 wdt_tick_rate = (*IXP2000_T1_CLD * HZ) / 256;
196 191 spin_lock_init(&wdt_lock);
197 return misc_register(&ixp2000_wdt_miscdev); 192 return misc_register(&ixp2000_wdt_miscdev);
198} 193}
199 194
diff --git a/drivers/watchdog/ixp4xx_wdt.c b/drivers/watchdog/ixp4xx_wdt.c
index 5864bb865cfe..41264a5f1731 100644
--- a/drivers/watchdog/ixp4xx_wdt.c
+++ b/drivers/watchdog/ixp4xx_wdt.c
@@ -22,48 +22,47 @@
22#include <linux/watchdog.h> 22#include <linux/watchdog.h>
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/bitops.h> 24#include <linux/bitops.h>
25 25#include <linux/uaccess.h>
26#include <asm/hardware.h> 26#include <mach/hardware.h>
27#include <asm/uaccess.h>
28 27
29static int nowayout = WATCHDOG_NOWAYOUT; 28static int nowayout = WATCHDOG_NOWAYOUT;
30static int heartbeat = 60; /* (secs) Default is 1 minute */ 29static int heartbeat = 60; /* (secs) Default is 1 minute */
31static unsigned long wdt_status; 30static unsigned long wdt_status;
32static unsigned long boot_status; 31static unsigned long boot_status;
32static spin_lock_t wdt_lock;
33 33
34#define WDT_TICK_RATE (IXP4XX_PERIPHERAL_BUS_CLOCK * 1000000UL) 34#define WDT_TICK_RATE (IXP4XX_PERIPHERAL_BUS_CLOCK * 1000000UL)
35 35
36#define WDT_IN_USE 0 36#define WDT_IN_USE 0
37#define WDT_OK_TO_CLOSE 1 37#define WDT_OK_TO_CLOSE 1
38 38
39static void 39static void wdt_enable(void)
40wdt_enable(void)
41{ 40{
41 spin_lock(&wdt_lock);
42 *IXP4XX_OSWK = IXP4XX_WDT_KEY; 42 *IXP4XX_OSWK = IXP4XX_WDT_KEY;
43 *IXP4XX_OSWE = 0; 43 *IXP4XX_OSWE = 0;
44 *IXP4XX_OSWT = WDT_TICK_RATE * heartbeat; 44 *IXP4XX_OSWT = WDT_TICK_RATE * heartbeat;
45 *IXP4XX_OSWE = IXP4XX_WDT_COUNT_ENABLE | IXP4XX_WDT_RESET_ENABLE; 45 *IXP4XX_OSWE = IXP4XX_WDT_COUNT_ENABLE | IXP4XX_WDT_RESET_ENABLE;
46 *IXP4XX_OSWK = 0; 46 *IXP4XX_OSWK = 0;
47 spin_unlock(&wdt_lock);
47} 48}
48 49
49static void 50static void wdt_disable(void)
50wdt_disable(void)
51{ 51{
52 spin_lock(&wdt_lock);
52 *IXP4XX_OSWK = IXP4XX_WDT_KEY; 53 *IXP4XX_OSWK = IXP4XX_WDT_KEY;
53 *IXP4XX_OSWE = 0; 54 *IXP4XX_OSWE = 0;
54 *IXP4XX_OSWK = 0; 55 *IXP4XX_OSWK = 0;
56 spin_unlock(&wdt_lock);
55} 57}
56 58
57static int 59static int ixp4xx_wdt_open(struct inode *inode, struct file *file)
58ixp4xx_wdt_open(struct inode *inode, struct file *file)
59{ 60{
60 if (test_and_set_bit(WDT_IN_USE, &wdt_status)) 61 if (test_and_set_bit(WDT_IN_USE, &wdt_status))
61 return -EBUSY; 62 return -EBUSY;
62 63
63 clear_bit(WDT_OK_TO_CLOSE, &wdt_status); 64 clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
64
65 wdt_enable(); 65 wdt_enable();
66
67 return nonseekable_open(inode, file); 66 return nonseekable_open(inode, file);
68} 67}
69 68
@@ -87,7 +86,6 @@ ixp4xx_wdt_write(struct file *file, const char *data, size_t len, loff_t *ppos)
87 } 86 }
88 wdt_enable(); 87 wdt_enable();
89 } 88 }
90
91 return len; 89 return len;
92} 90}
93 91
@@ -98,9 +96,8 @@ static struct watchdog_info ident = {
98}; 96};
99 97
100 98
101static int 99static long ixp4xx_wdt_ioctl(struct file *file, unsigned int cmd,
102ixp4xx_wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, 100 unsigned long arg)
103 unsigned long arg)
104{ 101{
105 int ret = -ENOTTY; 102 int ret = -ENOTTY;
106 int time; 103 int time;
@@ -119,6 +116,11 @@ ixp4xx_wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
119 ret = put_user(boot_status, (int *)arg); 116 ret = put_user(boot_status, (int *)arg);
120 break; 117 break;
121 118
119 case WDIOC_KEEPALIVE:
120 wdt_enable();
121 ret = 0;
122 break;
123
122 case WDIOC_SETTIMEOUT: 124 case WDIOC_SETTIMEOUT:
123 ret = get_user(time, (int *)arg); 125 ret = get_user(time, (int *)arg);
124 if (ret) 126 if (ret)
@@ -136,25 +138,17 @@ ixp4xx_wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
136 case WDIOC_GETTIMEOUT: 138 case WDIOC_GETTIMEOUT:
137 ret = put_user(heartbeat, (int *)arg); 139 ret = put_user(heartbeat, (int *)arg);
138 break; 140 break;
139
140 case WDIOC_KEEPALIVE:
141 wdt_enable();
142 ret = 0;
143 break;
144 } 141 }
145 return ret; 142 return ret;
146} 143}
147 144
148static int 145static int ixp4xx_wdt_release(struct inode *inode, struct file *file)
149ixp4xx_wdt_release(struct inode *inode, struct file *file)
150{ 146{
151 if (test_bit(WDT_OK_TO_CLOSE, &wdt_status)) { 147 if (test_bit(WDT_OK_TO_CLOSE, &wdt_status))
152 wdt_disable(); 148 wdt_disable();
153 } else { 149 else
154 printk(KERN_CRIT "WATCHDOG: Device closed unexpectedly - " 150 printk(KERN_CRIT "WATCHDOG: Device closed unexpectedly - "
155 "timer will not stop\n"); 151 "timer will not stop\n");
156 }
157
158 clear_bit(WDT_IN_USE, &wdt_status); 152 clear_bit(WDT_IN_USE, &wdt_status);
159 clear_bit(WDT_OK_TO_CLOSE, &wdt_status); 153 clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
160 154
@@ -162,18 +156,16 @@ ixp4xx_wdt_release(struct inode *inode, struct file *file)
162} 156}
163 157
164 158
165static const struct file_operations ixp4xx_wdt_fops = 159static const struct file_operations ixp4xx_wdt_fops = {
166{
167 .owner = THIS_MODULE, 160 .owner = THIS_MODULE,
168 .llseek = no_llseek, 161 .llseek = no_llseek,
169 .write = ixp4xx_wdt_write, 162 .write = ixp4xx_wdt_write,
170 .ioctl = ixp4xx_wdt_ioctl, 163 .unlocked_ioctl = ixp4xx_wdt_ioctl,
171 .open = ixp4xx_wdt_open, 164 .open = ixp4xx_wdt_open,
172 .release = ixp4xx_wdt_release, 165 .release = ixp4xx_wdt_release,
173}; 166};
174 167
175static struct miscdevice ixp4xx_wdt_miscdev = 168static struct miscdevice ixp4xx_wdt_miscdev = {
176{
177 .minor = WATCHDOG_MINOR, 169 .minor = WATCHDOG_MINOR,
178 .name = "watchdog", 170 .name = "watchdog",
179 .fops = &ixp4xx_wdt_fops, 171 .fops = &ixp4xx_wdt_fops,
@@ -186,19 +178,18 @@ static int __init ixp4xx_wdt_init(void)
186 178
187 asm("mrc p15, 0, %0, cr0, cr0, 0;" : "=r"(processor_id) :); 179 asm("mrc p15, 0, %0, cr0, cr0, 0;" : "=r"(processor_id) :);
188 if (!(processor_id & 0xf) && !cpu_is_ixp46x()) { 180 if (!(processor_id & 0xf) && !cpu_is_ixp46x()) {
189 printk("IXP4XXX Watchdog: Rev. A0 IXP42x CPU detected - " 181 printk(KERN_ERR "IXP4XXX Watchdog: Rev. A0 IXP42x CPU detected"
190 "watchdog disabled\n"); 182 " - watchdog disabled\n");
191 183
192 return -ENODEV; 184 return -ENODEV;
193 } 185 }
194 186 spin_lock_init(&wdt_lock);
195 ret = misc_register(&ixp4xx_wdt_miscdev);
196 if (ret == 0)
197 printk("IXP4xx Watchdog Timer: heartbeat %d sec\n", heartbeat);
198
199 boot_status = (*IXP4XX_OSST & IXP4XX_OSST_TIMER_WARM_RESET) ? 187 boot_status = (*IXP4XX_OSST & IXP4XX_OSST_TIMER_WARM_RESET) ?
200 WDIOF_CARDRESET : 0; 188 WDIOF_CARDRESET : 0;
201 189 ret = misc_register(&ixp4xx_wdt_miscdev);
190 if (ret == 0)
191 printk(KERN_INFO "IXP4xx Watchdog Timer: heartbeat %d sec\n",
192 heartbeat);
202 return ret; 193 return ret;
203} 194}
204 195
diff --git a/drivers/watchdog/ks8695_wdt.c b/drivers/watchdog/ks8695_wdt.c
index df5a6b811ccd..0b798fdaa378 100644
--- a/drivers/watchdog/ks8695_wdt.c
+++ b/drivers/watchdog/ks8695_wdt.c
@@ -19,10 +19,9 @@
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/types.h> 20#include <linux/types.h>
21#include <linux/watchdog.h> 21#include <linux/watchdog.h>
22#include <asm/io.h> 22#include <linux/io.h>
23#include <asm/uaccess.h> 23#include <linux/uaccess.h>
24#include <asm/arch/regs-timer.h> 24#include <mach/regs-timer.h>
25
26 25
27#define WDT_DEFAULT_TIME 5 /* seconds */ 26#define WDT_DEFAULT_TIME 5 /* seconds */
28#define WDT_MAX_TIME 171 /* seconds */ 27#define WDT_MAX_TIME 171 /* seconds */
@@ -31,38 +30,44 @@ static int wdt_time = WDT_DEFAULT_TIME;
31static int nowayout = WATCHDOG_NOWAYOUT; 30static int nowayout = WATCHDOG_NOWAYOUT;
32 31
33module_param(wdt_time, int, 0); 32module_param(wdt_time, int, 0);
34MODULE_PARM_DESC(wdt_time, "Watchdog time in seconds. (default="__MODULE_STRING(WDT_DEFAULT_TIME) ")"); 33MODULE_PARM_DESC(wdt_time, "Watchdog time in seconds. (default="
34 __MODULE_STRING(WDT_DEFAULT_TIME) ")");
35 35
36#ifdef CONFIG_WATCHDOG_NOWAYOUT 36#ifdef CONFIG_WATCHDOG_NOWAYOUT
37module_param(nowayout, int, 0); 37module_param(nowayout, int, 0);
38MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 38MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
39 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
39#endif 40#endif
40 41
41 42
42static unsigned long ks8695wdt_busy; 43static unsigned long ks8695wdt_busy;
44static spinlock_t ks8695_lock;
43 45
44/* ......................................................................... */ 46/* ......................................................................... */
45 47
46/* 48/*
47 * Disable the watchdog. 49 * Disable the watchdog.
48 */ 50 */
49static void inline ks8695_wdt_stop(void) 51static inline void ks8695_wdt_stop(void)
50{ 52{
51 unsigned long tmcon; 53 unsigned long tmcon;
52 54
55 spin_lock(&ks8695_lock);
53 /* disable timer0 */ 56 /* disable timer0 */
54 tmcon = __raw_readl(KS8695_TMR_VA + KS8695_TMCON); 57 tmcon = __raw_readl(KS8695_TMR_VA + KS8695_TMCON);
55 __raw_writel(tmcon & ~TMCON_T0EN, KS8695_TMR_VA + KS8695_TMCON); 58 __raw_writel(tmcon & ~TMCON_T0EN, KS8695_TMR_VA + KS8695_TMCON);
59 spin_unlock(&ks8695_lock);
56} 60}
57 61
58/* 62/*
59 * Enable and reset the watchdog. 63 * Enable and reset the watchdog.
60 */ 64 */
61static void inline ks8695_wdt_start(void) 65static inline void ks8695_wdt_start(void)
62{ 66{
63 unsigned long tmcon; 67 unsigned long tmcon;
64 unsigned long tval = wdt_time * CLOCK_TICK_RATE; 68 unsigned long tval = wdt_time * CLOCK_TICK_RATE;
65 69
70 spin_lock(&ks8695_lock);
66 /* disable timer0 */ 71 /* disable timer0 */
67 tmcon = __raw_readl(KS8695_TMR_VA + KS8695_TMCON); 72 tmcon = __raw_readl(KS8695_TMR_VA + KS8695_TMCON);
68 __raw_writel(tmcon & ~TMCON_T0EN, KS8695_TMR_VA + KS8695_TMCON); 73 __raw_writel(tmcon & ~TMCON_T0EN, KS8695_TMR_VA + KS8695_TMCON);
@@ -73,19 +78,22 @@ static void inline ks8695_wdt_start(void)
73 /* re-enable timer0 */ 78 /* re-enable timer0 */
74 tmcon = __raw_readl(KS8695_TMR_VA + KS8695_TMCON); 79 tmcon = __raw_readl(KS8695_TMR_VA + KS8695_TMCON);
75 __raw_writel(tmcon | TMCON_T0EN, KS8695_TMR_VA + KS8695_TMCON); 80 __raw_writel(tmcon | TMCON_T0EN, KS8695_TMR_VA + KS8695_TMCON);
81 spin_unlock(&ks8695_lock);
76} 82}
77 83
78/* 84/*
79 * Reload the watchdog timer. (ie, pat the watchdog) 85 * Reload the watchdog timer. (ie, pat the watchdog)
80 */ 86 */
81static void inline ks8695_wdt_reload(void) 87static inline void ks8695_wdt_reload(void)
82{ 88{
83 unsigned long tmcon; 89 unsigned long tmcon;
84 90
91 spin_lock(&ks8695_lock);
85 /* disable, then re-enable timer0 */ 92 /* disable, then re-enable timer0 */
86 tmcon = __raw_readl(KS8695_TMR_VA + KS8695_TMCON); 93 tmcon = __raw_readl(KS8695_TMR_VA + KS8695_TMCON);
87 __raw_writel(tmcon & ~TMCON_T0EN, KS8695_TMR_VA + KS8695_TMCON); 94 __raw_writel(tmcon & ~TMCON_T0EN, KS8695_TMR_VA + KS8695_TMCON);
88 __raw_writel(tmcon | TMCON_T0EN, KS8695_TMR_VA + KS8695_TMCON); 95 __raw_writel(tmcon | TMCON_T0EN, KS8695_TMR_VA + KS8695_TMCON);
96 spin_unlock(&ks8695_lock);
89} 97}
90 98
91/* 99/*
@@ -102,7 +110,8 @@ static int ks8695_wdt_settimeout(int new_time)
102 if ((new_time <= 0) || (new_time > WDT_MAX_TIME)) 110 if ((new_time <= 0) || (new_time > WDT_MAX_TIME))
103 return -EINVAL; 111 return -EINVAL;
104 112
105 /* Set new watchdog time. It will be used when ks8695_wdt_start() is called. */ 113 /* Set new watchdog time. It will be used when
114 ks8695_wdt_start() is called. */
106 wdt_time = new_time; 115 wdt_time = new_time;
107 return 0; 116 return 0;
108} 117}
@@ -128,9 +137,9 @@ static int ks8695_wdt_open(struct inode *inode, struct file *file)
128 */ 137 */
129static int ks8695_wdt_close(struct inode *inode, struct file *file) 138static int ks8695_wdt_close(struct inode *inode, struct file *file)
130{ 139{
140 /* Disable the watchdog when file is closed */
131 if (!nowayout) 141 if (!nowayout)
132 ks8695_wdt_stop(); /* Disable the watchdog when file is closed */ 142 ks8695_wdt_stop();
133
134 clear_bit(0, &ks8695wdt_busy); 143 clear_bit(0, &ks8695wdt_busy);
135 return 0; 144 return 0;
136} 145}
@@ -143,60 +152,52 @@ static struct watchdog_info ks8695_wdt_info = {
143/* 152/*
144 * Handle commands from user-space. 153 * Handle commands from user-space.
145 */ 154 */
146static int ks8695_wdt_ioctl(struct inode *inode, struct file *file, 155static long ks8695_wdt_ioctl(struct file *file, unsigned int cmd,
147 unsigned int cmd, unsigned long arg) 156 unsigned long arg)
148{ 157{
149 void __user *argp = (void __user *)arg; 158 void __user *argp = (void __user *)arg;
150 int __user *p = argp; 159 int __user *p = argp;
151 int new_value; 160 int new_value;
152 161
153 switch(cmd) { 162 switch (cmd) {
154 case WDIOC_KEEPALIVE: 163 case WDIOC_GETSUPPORT:
155 ks8695_wdt_reload(); /* pat the watchdog */ 164 return copy_to_user(argp, &ks8695_wdt_info,
156 return 0; 165 sizeof(ks8695_wdt_info)) ? -EFAULT : 0;
157 166 case WDIOC_GETSTATUS:
158 case WDIOC_GETSUPPORT: 167 case WDIOC_GETBOOTSTATUS:
159 return copy_to_user(argp, &ks8695_wdt_info, sizeof(ks8695_wdt_info)) ? -EFAULT : 0; 168 return put_user(0, p);
160 169 case WDIOC_SETOPTIONS:
161 case WDIOC_SETTIMEOUT: 170 if (get_user(new_value, p))
162 if (get_user(new_value, p)) 171 return -EFAULT;
163 return -EFAULT; 172 if (new_value & WDIOS_DISABLECARD)
164 173 ks8695_wdt_stop();
165 if (ks8695_wdt_settimeout(new_value)) 174 if (new_value & WDIOS_ENABLECARD)
166 return -EINVAL;
167
168 /* Enable new time value */
169 ks8695_wdt_start(); 175 ks8695_wdt_start();
170 176 return 0;
171 /* Return current value */ 177 case WDIOC_KEEPALIVE:
172 return put_user(wdt_time, p); 178 ks8695_wdt_reload(); /* pat the watchdog */
173 179 return 0;
174 case WDIOC_GETTIMEOUT: 180 case WDIOC_SETTIMEOUT:
175 return put_user(wdt_time, p); 181 if (get_user(new_value, p))
176 182 return -EFAULT;
177 case WDIOC_GETSTATUS: 183 if (ks8695_wdt_settimeout(new_value))
178 case WDIOC_GETBOOTSTATUS: 184 return -EINVAL;
179 return put_user(0, p); 185 /* Enable new time value */
180 186 ks8695_wdt_start();
181 case WDIOC_SETOPTIONS: 187 /* Return current value */
182 if (get_user(new_value, p)) 188 return put_user(wdt_time, p);
183 return -EFAULT; 189 case WDIOC_GETTIMEOUT:
184 190 return put_user(wdt_time, p);
185 if (new_value & WDIOS_DISABLECARD) 191 default:
186 ks8695_wdt_stop(); 192 return -ENOTTY;
187 if (new_value & WDIOS_ENABLECARD)
188 ks8695_wdt_start();
189 return 0;
190
191 default:
192 return -ENOTTY;
193 } 193 }
194} 194}
195 195
196/* 196/*
197 * Pat the watchdog whenever device is written to. 197 * Pat the watchdog whenever device is written to.
198 */ 198 */
199static ssize_t ks8695_wdt_write(struct file *file, const char *data, size_t len, loff_t *ppos) 199static ssize_t ks8695_wdt_write(struct file *file, const char *data,
200 size_t len, loff_t *ppos)
200{ 201{
201 ks8695_wdt_reload(); /* pat the watchdog */ 202 ks8695_wdt_reload(); /* pat the watchdog */
202 return len; 203 return len;
@@ -207,7 +208,7 @@ static ssize_t ks8695_wdt_write(struct file *file, const char *data, size_t len,
207static const struct file_operations ks8695wdt_fops = { 208static const struct file_operations ks8695wdt_fops = {
208 .owner = THIS_MODULE, 209 .owner = THIS_MODULE,
209 .llseek = no_llseek, 210 .llseek = no_llseek,
210 .ioctl = ks8695_wdt_ioctl, 211 .unlocked_ioctl = ks8695_wdt_ioctl,
211 .open = ks8695_wdt_open, 212 .open = ks8695_wdt_open,
212 .release = ks8695_wdt_close, 213 .release = ks8695_wdt_close,
213 .write = ks8695_wdt_write, 214 .write = ks8695_wdt_write,
@@ -231,7 +232,8 @@ static int __init ks8695wdt_probe(struct platform_device *pdev)
231 if (res) 232 if (res)
232 return res; 233 return res;
233 234
234 printk("KS8695 Watchdog Timer enabled (%d seconds%s)\n", wdt_time, nowayout ? ", nowayout" : ""); 235 printk(KERN_INFO "KS8695 Watchdog Timer enabled (%d seconds%s)\n",
236 wdt_time, nowayout ? ", nowayout" : "");
235 return 0; 237 return 0;
236} 238}
237 239
@@ -285,12 +287,14 @@ static struct platform_driver ks8695wdt_driver = {
285 287
286static int __init ks8695_wdt_init(void) 288static int __init ks8695_wdt_init(void)
287{ 289{
288 /* Check that the heartbeat value is within range; if not reset to the default */ 290 spin_lock_init(&ks8695_lock);
291 /* Check that the heartbeat value is within range;
292 if not reset to the default */
289 if (ks8695_wdt_settimeout(wdt_time)) { 293 if (ks8695_wdt_settimeout(wdt_time)) {
290 ks8695_wdt_settimeout(WDT_DEFAULT_TIME); 294 ks8695_wdt_settimeout(WDT_DEFAULT_TIME);
291 pr_info("ks8695_wdt: wdt_time value must be 1 <= wdt_time <= %i, using %d\n", wdt_time, WDT_MAX_TIME); 295 pr_info("ks8695_wdt: wdt_time value must be 1 <= wdt_time <= %i, using %d\n",
296 wdt_time, WDT_MAX_TIME);
292 } 297 }
293
294 return platform_driver_register(&ks8695wdt_driver); 298 return platform_driver_register(&ks8695wdt_driver);
295} 299}
296 300
diff --git a/drivers/watchdog/machzwd.c b/drivers/watchdog/machzwd.c
index 6905135a776c..2dfc27559bf7 100644
--- a/drivers/watchdog/machzwd.c
+++ b/drivers/watchdog/machzwd.c
@@ -40,9 +40,9 @@
40#include <linux/notifier.h> 40#include <linux/notifier.h>
41#include <linux/reboot.h> 41#include <linux/reboot.h>
42#include <linux/init.h> 42#include <linux/init.h>
43#include <linux/io.h>
44#include <linux/uaccess.h>
43 45
44#include <asm/io.h>
45#include <asm/uaccess.h>
46#include <asm/system.h> 46#include <asm/system.h>
47 47
48/* ports */ 48/* ports */
@@ -95,7 +95,9 @@ MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
95 95
96static int nowayout = WATCHDOG_NOWAYOUT; 96static int nowayout = WATCHDOG_NOWAYOUT;
97module_param(nowayout, int, 0); 97module_param(nowayout, int, 0);
98MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 98MODULE_PARM_DESC(nowayout,
99 "Watchdog cannot be stopped once started (default="
100 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
99 101
100#define PFX "machzwd" 102#define PFX "machzwd"
101 103
@@ -114,7 +116,7 @@ static struct watchdog_info zf_info = {
114 * 3 = GEN_SCI 116 * 3 = GEN_SCI
115 * defaults to GEN_RESET (0) 117 * defaults to GEN_RESET (0)
116 */ 118 */
117static int action = 0; 119static int action;
118module_param(action, int, 0); 120module_param(action, int, 0);
119MODULE_PARM_DESC(action, "after watchdog resets, generate: 0 = RESET(*) 1 = SMI 2 = NMI 3 = SCI"); 121MODULE_PARM_DESC(action, "after watchdog resets, generate: 0 = RESET(*) 1 = SMI 2 = NMI 3 = SCI");
120 122
@@ -123,10 +125,9 @@ static void zf_ping(unsigned long data);
123static int zf_action = GEN_RESET; 125static int zf_action = GEN_RESET;
124static unsigned long zf_is_open; 126static unsigned long zf_is_open;
125static char zf_expect_close; 127static char zf_expect_close;
126static DEFINE_SPINLOCK(zf_lock);
127static DEFINE_SPINLOCK(zf_port_lock); 128static DEFINE_SPINLOCK(zf_port_lock);
128static DEFINE_TIMER(zf_timer, zf_ping, 0, 0); 129static DEFINE_TIMER(zf_timer, zf_ping, 0, 0);
129static unsigned long next_heartbeat = 0; 130static unsigned long next_heartbeat;
130 131
131 132
132/* timeout for user land heart beat (10 seconds) */ 133/* timeout for user land heart beat (10 seconds) */
@@ -171,13 +172,13 @@ static inline void zf_set_control(unsigned short new)
171 172
172static inline void zf_set_timer(unsigned short new, unsigned char n) 173static inline void zf_set_timer(unsigned short new, unsigned char n)
173{ 174{
174 switch(n){ 175 switch (n) {
175 case WD1: 176 case WD1:
176 zf_writew(COUNTER_1, new); 177 zf_writew(COUNTER_1, new);
177 case WD2: 178 case WD2:
178 zf_writeb(COUNTER_2, new > 0xff ? 0xff : new); 179 zf_writeb(COUNTER_2, new > 0xff ? 0xff : new);
179 default: 180 default:
180 return; 181 return;
181 } 182 }
182} 183}
183 184
@@ -241,10 +242,8 @@ static void zf_ping(unsigned long data)
241 242
242 zf_writeb(COUNTER_2, 0xff); 243 zf_writeb(COUNTER_2, 0xff);
243 244
244 if(time_before(jiffies, next_heartbeat)){ 245 if (time_before(jiffies, next_heartbeat)) {
245
246 dprintk("time_before: %ld\n", next_heartbeat - jiffies); 246 dprintk("time_before: %ld\n", next_heartbeat - jiffies);
247
248 /* 247 /*
249 * reset event is activated by transition from 0 to 1 on 248 * reset event is activated by transition from 0 to 1 on
250 * RESET_WD1 bit and we assume that it is already zero... 249 * RESET_WD1 bit and we assume that it is already zero...
@@ -261,24 +260,21 @@ static void zf_ping(unsigned long data)
261 spin_unlock_irqrestore(&zf_port_lock, flags); 260 spin_unlock_irqrestore(&zf_port_lock, flags);
262 261
263 mod_timer(&zf_timer, jiffies + ZF_HW_TIMEO); 262 mod_timer(&zf_timer, jiffies + ZF_HW_TIMEO);
264 }else{ 263 } else
265 printk(KERN_CRIT PFX ": I will reset your machine\n"); 264 printk(KERN_CRIT PFX ": I will reset your machine\n");
266 }
267} 265}
268 266
269static ssize_t zf_write(struct file *file, const char __user *buf, size_t count, 267static ssize_t zf_write(struct file *file, const char __user *buf, size_t count,
270 loff_t *ppos) 268 loff_t *ppos)
271{ 269{
272 /* See if we got the magic character */ 270 /* See if we got the magic character */
273 if(count){ 271 if (count) {
274
275 /* 272 /*
276 * no need to check for close confirmation 273 * no need to check for close confirmation
277 * no way to disable watchdog ;) 274 * no way to disable watchdog ;)
278 */ 275 */
279 if (!nowayout) { 276 if (!nowayout) {
280 size_t ofs; 277 size_t ofs;
281
282 /* 278 /*
283 * note: just in case someone wrote the magic character 279 * note: just in case someone wrote the magic character
284 * five months ago... 280 * five months ago...
@@ -286,11 +282,11 @@ static ssize_t zf_write(struct file *file, const char __user *buf, size_t count,
286 zf_expect_close = 0; 282 zf_expect_close = 0;
287 283
288 /* now scan */ 284 /* now scan */
289 for (ofs = 0; ofs != count; ofs++){ 285 for (ofs = 0; ofs != count; ofs++) {
290 char c; 286 char c;
291 if (get_user(c, buf + ofs)) 287 if (get_user(c, buf + ofs))
292 return -EFAULT; 288 return -EFAULT;
293 if (c == 'V'){ 289 if (c == 'V') {
294 zf_expect_close = 42; 290 zf_expect_close = 42;
295 dprintk("zf_expect_close = 42\n"); 291 dprintk("zf_expect_close = 42\n");
296 } 292 }
@@ -303,14 +299,11 @@ static ssize_t zf_write(struct file *file, const char __user *buf, size_t count,
303 */ 299 */
304 next_heartbeat = jiffies + ZF_USER_TIMEO; 300 next_heartbeat = jiffies + ZF_USER_TIMEO;
305 dprintk("user ping at %ld\n", jiffies); 301 dprintk("user ping at %ld\n", jiffies);
306
307 } 302 }
308
309 return count; 303 return count;
310} 304}
311 305
312static int zf_ioctl(struct inode *inode, struct file *file, unsigned int cmd, 306static long zf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
313 unsigned long arg)
314{ 307{
315 void __user *argp = (void __user *)arg; 308 void __user *argp = (void __user *)arg;
316 int __user *p = argp; 309 int __user *p = argp;
@@ -319,55 +312,38 @@ static int zf_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
319 if (copy_to_user(argp, &zf_info, sizeof(zf_info))) 312 if (copy_to_user(argp, &zf_info, sizeof(zf_info)))
320 return -EFAULT; 313 return -EFAULT;
321 break; 314 break;
322
323 case WDIOC_GETSTATUS: 315 case WDIOC_GETSTATUS:
324 case WDIOC_GETBOOTSTATUS: 316 case WDIOC_GETBOOTSTATUS:
325 return put_user(0, p); 317 return put_user(0, p);
326
327 case WDIOC_KEEPALIVE: 318 case WDIOC_KEEPALIVE:
328 zf_ping(0); 319 zf_ping(0);
329 break; 320 break;
330
331 default: 321 default:
332 return -ENOTTY; 322 return -ENOTTY;
333 } 323 }
334
335 return 0; 324 return 0;
336} 325}
337 326
338static int zf_open(struct inode *inode, struct file *file) 327static int zf_open(struct inode *inode, struct file *file)
339{ 328{
340 spin_lock(&zf_lock); 329 if (test_and_set_bit(0, &zf_is_open))
341 if(test_and_set_bit(0, &zf_is_open)) {
342 spin_unlock(&zf_lock);
343 return -EBUSY; 330 return -EBUSY;
344 }
345
346 if (nowayout) 331 if (nowayout)
347 __module_get(THIS_MODULE); 332 __module_get(THIS_MODULE);
348
349 spin_unlock(&zf_lock);
350
351 zf_timer_on(); 333 zf_timer_on();
352
353 return nonseekable_open(inode, file); 334 return nonseekable_open(inode, file);
354} 335}
355 336
356static int zf_close(struct inode *inode, struct file *file) 337static int zf_close(struct inode *inode, struct file *file)
357{ 338{
358 if(zf_expect_close == 42){ 339 if (zf_expect_close == 42)
359 zf_timer_off(); 340 zf_timer_off();
360 } else { 341 else {
361 del_timer(&zf_timer); 342 del_timer(&zf_timer);
362 printk(KERN_ERR PFX ": device file closed unexpectedly. Will not stop the WDT!\n"); 343 printk(KERN_ERR PFX ": device file closed unexpectedly. Will not stop the WDT!\n");
363 } 344 }
364
365 spin_lock(&zf_lock);
366 clear_bit(0, &zf_is_open); 345 clear_bit(0, &zf_is_open);
367 spin_unlock(&zf_lock);
368
369 zf_expect_close = 0; 346 zf_expect_close = 0;
370
371 return 0; 347 return 0;
372} 348}
373 349
@@ -378,23 +354,18 @@ static int zf_close(struct inode *inode, struct file *file)
378static int zf_notify_sys(struct notifier_block *this, unsigned long code, 354static int zf_notify_sys(struct notifier_block *this, unsigned long code,
379 void *unused) 355 void *unused)
380{ 356{
381 if(code == SYS_DOWN || code == SYS_HALT){ 357 if (code == SYS_DOWN || code == SYS_HALT)
382 zf_timer_off(); 358 zf_timer_off();
383 }
384
385 return NOTIFY_DONE; 359 return NOTIFY_DONE;
386} 360}
387 361
388
389
390
391static const struct file_operations zf_fops = { 362static const struct file_operations zf_fops = {
392 .owner = THIS_MODULE, 363 .owner = THIS_MODULE,
393 .llseek = no_llseek, 364 .llseek = no_llseek,
394 .write = zf_write, 365 .write = zf_write,
395 .ioctl = zf_ioctl, 366 .unlocked_ioctl = zf_ioctl,
396 .open = zf_open, 367 .open = zf_open,
397 .release = zf_close, 368 .release = zf_close,
398}; 369};
399 370
400static struct miscdevice zf_miscdev = { 371static struct miscdevice zf_miscdev = {
@@ -402,7 +373,7 @@ static struct miscdevice zf_miscdev = {
402 .name = "watchdog", 373 .name = "watchdog",
403 .fops = &zf_fops, 374 .fops = &zf_fops,
404}; 375};
405 376
406 377
407/* 378/*
408 * The device needs to learn about soft shutdowns in order to 379 * The device needs to learn about soft shutdowns in order to
@@ -423,22 +394,23 @@ static int __init zf_init(void)
423{ 394{
424 int ret; 395 int ret;
425 396
426 printk(KERN_INFO PFX ": MachZ ZF-Logic Watchdog driver initializing.\n"); 397 printk(KERN_INFO PFX
398 ": MachZ ZF-Logic Watchdog driver initializing.\n");
427 399
428 ret = zf_get_ZFL_version(); 400 ret = zf_get_ZFL_version();
429 if ((!ret) || (ret == 0xffff)) { 401 if (!ret || ret == 0xffff) {
430 printk(KERN_WARNING PFX ": no ZF-Logic found\n"); 402 printk(KERN_WARNING PFX ": no ZF-Logic found\n");
431 return -ENODEV; 403 return -ENODEV;
432 } 404 }
433 405
434 if((action <= 3) && (action >= 0)){ 406 if (action <= 3 && action >= 0)
435 zf_action = zf_action>>action; 407 zf_action = zf_action >> action;
436 } else 408 else
437 action = 0; 409 action = 0;
438 410
439 zf_show_action(action); 411 zf_show_action(action);
440 412
441 if(!request_region(ZF_IOBASE, 3, "MachZ ZFL WDT")){ 413 if (!request_region(ZF_IOBASE, 3, "MachZ ZFL WDT")) {
442 printk(KERN_ERR "cannot reserve I/O ports at %d\n", 414 printk(KERN_ERR "cannot reserve I/O ports at %d\n",
443 ZF_IOBASE); 415 ZF_IOBASE);
444 ret = -EBUSY; 416 ret = -EBUSY;
@@ -446,14 +418,14 @@ static int __init zf_init(void)
446 } 418 }
447 419
448 ret = register_reboot_notifier(&zf_notifier); 420 ret = register_reboot_notifier(&zf_notifier);
449 if(ret){ 421 if (ret) {
450 printk(KERN_ERR "can't register reboot notifier (err=%d)\n", 422 printk(KERN_ERR "can't register reboot notifier (err=%d)\n",
451 ret); 423 ret);
452 goto no_reboot; 424 goto no_reboot;
453 } 425 }
454 426
455 ret = misc_register(&zf_miscdev); 427 ret = misc_register(&zf_miscdev);
456 if (ret){ 428 if (ret) {
457 printk(KERN_ERR "can't misc_register on minor=%d\n", 429 printk(KERN_ERR "can't misc_register on minor=%d\n",
458 WATCHDOG_MINOR); 430 WATCHDOG_MINOR);
459 goto no_misc; 431 goto no_misc;
diff --git a/drivers/watchdog/mixcomwd.c b/drivers/watchdog/mixcomwd.c
index 1adf1d56027d..407b025cb104 100644
--- a/drivers/watchdog/mixcomwd.c
+++ b/drivers/watchdog/mixcomwd.c
@@ -29,7 +29,8 @@
29 * - support for one more type board 29 * - support for one more type board
30 * 30 *
31 * Version 0.5 (2001/12/14) Matt Domsch <Matt_Domsch@dell.com> 31 * Version 0.5 (2001/12/14) Matt Domsch <Matt_Domsch@dell.com>
32 * - added nowayout module option to override CONFIG_WATCHDOG_NOWAYOUT 32 * - added nowayout module option to override
33 * CONFIG_WATCHDOG_NOWAYOUT
33 * 34 *
34 * Version 0.6 (2002/04/12): Rob Radez <rob@osinvestor.com> 35 * Version 0.6 (2002/04/12): Rob Radez <rob@osinvestor.com>
35 * - make mixcomwd_opened unsigned, 36 * - make mixcomwd_opened unsigned,
@@ -53,8 +54,8 @@
53#include <linux/init.h> 54#include <linux/init.h>
54#include <linux/jiffies.h> 55#include <linux/jiffies.h>
55#include <linux/timer.h> 56#include <linux/timer.h>
56#include <asm/uaccess.h> 57#include <linux/uaccess.h>
57#include <asm/io.h> 58#include <linux/io.h>
58 59
59/* 60/*
60 * We have two types of cards that can be probed: 61 * We have two types of cards that can be probed:
@@ -108,18 +109,19 @@ static char expect_close;
108 109
109static int nowayout = WATCHDOG_NOWAYOUT; 110static int nowayout = WATCHDOG_NOWAYOUT;
110module_param(nowayout, int, 0); 111module_param(nowayout, int, 0);
111MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 112MODULE_PARM_DESC(nowayout,
113 "Watchdog cannot be stopped once started (default="
114 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
112 115
113static void mixcomwd_ping(void) 116static void mixcomwd_ping(void)
114{ 117{
115 outb_p(55,watchdog_port); 118 outb_p(55, watchdog_port);
116 return; 119 return;
117} 120}
118 121
119static void mixcomwd_timerfun(unsigned long d) 122static void mixcomwd_timerfun(unsigned long d)
120{ 123{
121 mixcomwd_ping(); 124 mixcomwd_ping();
122
123 mod_timer(&mixcomwd_timer, jiffies + 5 * HZ); 125 mod_timer(&mixcomwd_timer, jiffies + 5 * HZ);
124} 126}
125 127
@@ -129,22 +131,22 @@ static void mixcomwd_timerfun(unsigned long d)
129 131
130static int mixcomwd_open(struct inode *inode, struct file *file) 132static int mixcomwd_open(struct inode *inode, struct file *file)
131{ 133{
132 if(test_and_set_bit(0,&mixcomwd_opened)) { 134 if (test_and_set_bit(0, &mixcomwd_opened))
133 return -EBUSY; 135 return -EBUSY;
134 } 136
135 mixcomwd_ping(); 137 mixcomwd_ping();
136 138
137 if (nowayout) { 139 if (nowayout)
138 /* 140 /*
139 * fops_get() code via open() has already done 141 * fops_get() code via open() has already done
140 * a try_module_get() so it is safe to do the 142 * a try_module_get() so it is safe to do the
141 * __module_get(). 143 * __module_get().
142 */ 144 */
143 __module_get(THIS_MODULE); 145 __module_get(THIS_MODULE);
144 } else { 146 else {
145 if(mixcomwd_timer_alive) { 147 if (mixcomwd_timer_alive) {
146 del_timer(&mixcomwd_timer); 148 del_timer(&mixcomwd_timer);
147 mixcomwd_timer_alive=0; 149 mixcomwd_timer_alive = 0;
148 } 150 }
149 } 151 }
150 return nonseekable_open(inode, file); 152 return nonseekable_open(inode, file);
@@ -153,26 +155,27 @@ static int mixcomwd_open(struct inode *inode, struct file *file)
153static int mixcomwd_release(struct inode *inode, struct file *file) 155static int mixcomwd_release(struct inode *inode, struct file *file)
154{ 156{
155 if (expect_close == 42) { 157 if (expect_close == 42) {
156 if(mixcomwd_timer_alive) { 158 if (mixcomwd_timer_alive) {
157 printk(KERN_ERR PFX "release called while internal timer alive"); 159 printk(KERN_ERR PFX
160 "release called while internal timer alive");
158 return -EBUSY; 161 return -EBUSY;
159 } 162 }
160 mixcomwd_timer_alive=1; 163 mixcomwd_timer_alive = 1;
161 mod_timer(&mixcomwd_timer, jiffies + 5 * HZ); 164 mod_timer(&mixcomwd_timer, jiffies + 5 * HZ);
162 } else { 165 } else
163 printk(KERN_CRIT PFX "WDT device closed unexpectedly. WDT will not stop!\n"); 166 printk(KERN_CRIT PFX
164 } 167 "WDT device closed unexpectedly. WDT will not stop!\n");
165 168
166 clear_bit(0,&mixcomwd_opened); 169 clear_bit(0, &mixcomwd_opened);
167 expect_close=0; 170 expect_close = 0;
168 return 0; 171 return 0;
169} 172}
170 173
171 174
172static ssize_t mixcomwd_write(struct file *file, const char __user *data, size_t len, loff_t *ppos) 175static ssize_t mixcomwd_write(struct file *file, const char __user *data,
176 size_t len, loff_t *ppos)
173{ 177{
174 if(len) 178 if (len) {
175 {
176 if (!nowayout) { 179 if (!nowayout) {
177 size_t i; 180 size_t i;
178 181
@@ -192,8 +195,8 @@ static ssize_t mixcomwd_write(struct file *file, const char __user *data, size_t
192 return len; 195 return len;
193} 196}
194 197
195static int mixcomwd_ioctl(struct inode *inode, struct file *file, 198static long mixcomwd_ioctl(struct file *file,
196 unsigned int cmd, unsigned long arg) 199 unsigned int cmd, unsigned long arg)
197{ 200{
198 void __user *argp = (void __user *)arg; 201 void __user *argp = (void __user *)arg;
199 int __user *p = argp; 202 int __user *p = argp;
@@ -204,32 +207,23 @@ static int mixcomwd_ioctl(struct inode *inode, struct file *file,
204 .identity = "MixCOM watchdog", 207 .identity = "MixCOM watchdog",
205 }; 208 };
206 209
207 switch(cmd) 210 switch (cmd) {
208 { 211 case WDIOC_GETSUPPORT:
209 case WDIOC_GETSTATUS: 212 if (copy_to_user(argp, &ident, sizeof(ident)))
210 status=mixcomwd_opened; 213 return -EFAULT;
211 if (!nowayout) { 214 break;
212 status|=mixcomwd_timer_alive; 215 case WDIOC_GETSTATUS:
213 } 216 status = mixcomwd_opened;
214 if (copy_to_user(p, &status, sizeof(int))) { 217 if (!nowayout)
215 return -EFAULT; 218 status |= mixcomwd_timer_alive;
216 } 219 return put_user(status, p);
217 break; 220 case WDIOC_GETBOOTSTATUS:
218 case WDIOC_GETBOOTSTATUS: 221 return put_user(0, p);
219 if (copy_to_user(p, &status, sizeof(int))) { 222 case WDIOC_KEEPALIVE:
220 return -EFAULT; 223 mixcomwd_ping();
221 } 224 break;
222 break; 225 default:
223 case WDIOC_GETSUPPORT: 226 return -ENOTTY;
224 if (copy_to_user(argp, &ident, sizeof(ident))) {
225 return -EFAULT;
226 }
227 break;
228 case WDIOC_KEEPALIVE:
229 mixcomwd_ping();
230 break;
231 default:
232 return -ENOTTY;
233 } 227 }
234 return 0; 228 return 0;
235} 229}
@@ -238,7 +232,7 @@ static const struct file_operations mixcomwd_fops = {
238 .owner = THIS_MODULE, 232 .owner = THIS_MODULE,
239 .llseek = no_llseek, 233 .llseek = no_llseek,
240 .write = mixcomwd_write, 234 .write = mixcomwd_write,
241 .ioctl = mixcomwd_ioctl, 235 .unlocked_ioctl = mixcomwd_ioctl,
242 .open = mixcomwd_open, 236 .open = mixcomwd_open,
243 .release = mixcomwd_release, 237 .release = mixcomwd_release,
244}; 238};
@@ -253,15 +247,14 @@ static int __init checkcard(int port, int card_id)
253{ 247{
254 int id; 248 int id;
255 249
256 if (!request_region(port, 1, "MixCOM watchdog")) { 250 if (!request_region(port, 1, "MixCOM watchdog"))
257 return 0; 251 return 0;
258 }
259 252
260 id=inb_p(port); 253 id = inb_p(port);
261 if (card_id==MIXCOM_ID) 254 if (card_id == MIXCOM_ID)
262 id &= 0x3f; 255 id &= 0x3f;
263 256
264 if (id!=card_id) { 257 if (id != card_id) {
265 release_region(port, 1); 258 release_region(port, 1);
266 return 0; 259 return 0;
267 } 260 }
@@ -270,9 +263,7 @@ static int __init checkcard(int port, int card_id)
270 263
271static int __init mixcomwd_init(void) 264static int __init mixcomwd_init(void)
272{ 265{
273 int i; 266 int i, ret, found = 0;
274 int ret;
275 int found=0;
276 267
277 for (i = 0; !found && mixcomwd_io_info[i].ioport != 0; i++) { 268 for (i = 0; !found && mixcomwd_io_info[i].ioport != 0; i++) {
278 if (checkcard(mixcomwd_io_info[i].ioport, 269 if (checkcard(mixcomwd_io_info[i].ioport,
@@ -283,20 +274,22 @@ static int __init mixcomwd_init(void)
283 } 274 }
284 275
285 if (!found) { 276 if (!found) {
286 printk(KERN_ERR PFX "No card detected, or port not available.\n"); 277 printk(KERN_ERR PFX
278 "No card detected, or port not available.\n");
287 return -ENODEV; 279 return -ENODEV;
288 } 280 }
289 281
290 ret = misc_register(&mixcomwd_miscdev); 282 ret = misc_register(&mixcomwd_miscdev);
291 if (ret) 283 if (ret) {
292 { 284 printk(KERN_ERR PFX
293 printk(KERN_ERR PFX "cannot register miscdev on minor=%d (err=%d)\n", 285 "cannot register miscdev on minor=%d (err=%d)\n",
294 WATCHDOG_MINOR, ret); 286 WATCHDOG_MINOR, ret);
295 goto error_misc_register_watchdog; 287 goto error_misc_register_watchdog;
296 } 288 }
297 289
298 printk(KERN_INFO "MixCOM watchdog driver v%s, watchdog port at 0x%3x\n", 290 printk(KERN_INFO
299 VERSION, watchdog_port); 291 "MixCOM watchdog driver v%s, watchdog port at 0x%3x\n",
292 VERSION, watchdog_port);
300 293
301 return 0; 294 return 0;
302 295
@@ -309,15 +302,15 @@ error_misc_register_watchdog:
309static void __exit mixcomwd_exit(void) 302static void __exit mixcomwd_exit(void)
310{ 303{
311 if (!nowayout) { 304 if (!nowayout) {
312 if(mixcomwd_timer_alive) { 305 if (mixcomwd_timer_alive) {
313 printk(KERN_WARNING PFX "I quit now, hardware will" 306 printk(KERN_WARNING PFX "I quit now, hardware will"
314 " probably reboot!\n"); 307 " probably reboot!\n");
315 del_timer_sync(&mixcomwd_timer); 308 del_timer_sync(&mixcomwd_timer);
316 mixcomwd_timer_alive=0; 309 mixcomwd_timer_alive = 0;
317 } 310 }
318 } 311 }
319 misc_deregister(&mixcomwd_miscdev); 312 misc_deregister(&mixcomwd_miscdev);
320 release_region(watchdog_port,1); 313 release_region(watchdog_port, 1);
321} 314}
322 315
323module_init(mixcomwd_init); 316module_init(mixcomwd_init);
diff --git a/drivers/watchdog/mpc5200_wdt.c b/drivers/watchdog/mpc5200_wdt.c
index 77c1c2ae2cc2..db91892558f2 100644
--- a/drivers/watchdog/mpc5200_wdt.c
+++ b/drivers/watchdog/mpc5200_wdt.c
@@ -5,7 +5,7 @@
5#include <linux/io.h> 5#include <linux/io.h>
6#include <linux/spinlock.h> 6#include <linux/spinlock.h>
7#include <linux/of_platform.h> 7#include <linux/of_platform.h>
8#include <asm/uaccess.h> 8#include <linux/uaccess.h>
9#include <asm/mpc52xx.h> 9#include <asm/mpc52xx.h>
10 10
11 11
@@ -57,7 +57,8 @@ static int mpc5200_wdt_start(struct mpc5200_wdt *wdt)
57 /* set timeout, with maximum prescaler */ 57 /* set timeout, with maximum prescaler */
58 out_be32(&wdt->regs->count, 0x0 | wdt->count); 58 out_be32(&wdt->regs->count, 0x0 | wdt->count);
59 /* enable watchdog */ 59 /* enable watchdog */
60 out_be32(&wdt->regs->mode, GPT_MODE_CE | GPT_MODE_WDT | GPT_MODE_MS_TIMER); 60 out_be32(&wdt->regs->mode, GPT_MODE_CE | GPT_MODE_WDT |
61 GPT_MODE_MS_TIMER);
61 spin_unlock(&wdt->io_lock); 62 spin_unlock(&wdt->io_lock);
62 63
63 return 0; 64 return 0;
@@ -66,7 +67,8 @@ static int mpc5200_wdt_ping(struct mpc5200_wdt *wdt)
66{ 67{
67 spin_lock(&wdt->io_lock); 68 spin_lock(&wdt->io_lock);
68 /* writing A5 to OCPW resets the watchdog */ 69 /* writing A5 to OCPW resets the watchdog */
69 out_be32(&wdt->regs->mode, 0xA5000000 | (0xffffff & in_be32(&wdt->regs->mode))); 70 out_be32(&wdt->regs->mode, 0xA5000000 |
71 (0xffffff & in_be32(&wdt->regs->mode)));
70 spin_unlock(&wdt->io_lock); 72 spin_unlock(&wdt->io_lock);
71 return 0; 73 return 0;
72} 74}
@@ -92,8 +94,8 @@ static struct watchdog_info mpc5200_wdt_info = {
92 .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, 94 .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
93 .identity = "mpc5200 watchdog on GPT0", 95 .identity = "mpc5200 watchdog on GPT0",
94}; 96};
95static int mpc5200_wdt_ioctl(struct inode *inode, struct file *file, 97static long mpc5200_wdt_ioctl(struct file *file, unsigned int cmd,
96 unsigned int cmd, unsigned long arg) 98 unsigned long arg)
97{ 99{
98 struct mpc5200_wdt *wdt = file->private_data; 100 struct mpc5200_wdt *wdt = file->private_data;
99 int __user *data = (int __user *)arg; 101 int __user *data = (int __user *)arg;
@@ -103,7 +105,7 @@ static int mpc5200_wdt_ioctl(struct inode *inode, struct file *file,
103 switch (cmd) { 105 switch (cmd) {
104 case WDIOC_GETSUPPORT: 106 case WDIOC_GETSUPPORT:
105 ret = copy_to_user(data, &mpc5200_wdt_info, 107 ret = copy_to_user(data, &mpc5200_wdt_info,
106 sizeof(mpc5200_wdt_info)); 108 sizeof(mpc5200_wdt_info));
107 if (ret) 109 if (ret)
108 ret = -EFAULT; 110 ret = -EFAULT;
109 break; 111 break;
@@ -135,6 +137,7 @@ static int mpc5200_wdt_ioctl(struct inode *inode, struct file *file,
135 } 137 }
136 return ret; 138 return ret;
137} 139}
140
138static int mpc5200_wdt_open(struct inode *inode, struct file *file) 141static int mpc5200_wdt_open(struct inode *inode, struct file *file)
139{ 142{
140 /* /dev/watchdog can only be opened once */ 143 /* /dev/watchdog can only be opened once */
@@ -161,13 +164,14 @@ static int mpc5200_wdt_release(struct inode *inode, struct file *file)
161static const struct file_operations mpc5200_wdt_fops = { 164static const struct file_operations mpc5200_wdt_fops = {
162 .owner = THIS_MODULE, 165 .owner = THIS_MODULE,
163 .write = mpc5200_wdt_write, 166 .write = mpc5200_wdt_write,
164 .ioctl = mpc5200_wdt_ioctl, 167 .unlocked_ioctl = mpc5200_wdt_ioctl,
165 .open = mpc5200_wdt_open, 168 .open = mpc5200_wdt_open,
166 .release = mpc5200_wdt_release, 169 .release = mpc5200_wdt_release,
167}; 170};
168 171
169/* module operations */ 172/* module operations */
170static int mpc5200_wdt_probe(struct of_device *op, const struct of_device_id *match) 173static int mpc5200_wdt_probe(struct of_device *op,
174 const struct of_device_id *match)
171{ 175{
172 struct mpc5200_wdt *wdt; 176 struct mpc5200_wdt *wdt;
173 int err; 177 int err;
@@ -215,9 +219,9 @@ static int mpc5200_wdt_probe(struct of_device *op, const struct of_device_id *ma
215 return 0; 219 return 0;
216 220
217 iounmap(wdt->regs); 221 iounmap(wdt->regs);
218 out_release: 222out_release:
219 release_mem_region(wdt->mem.start, size); 223 release_mem_region(wdt->mem.start, size);
220 out_free: 224out_free:
221 kfree(wdt); 225 kfree(wdt);
222 return err; 226 return err;
223} 227}
diff --git a/drivers/watchdog/mpc83xx_wdt.c b/drivers/watchdog/mpc83xx_wdt.c
deleted file mode 100644
index b16c5cd972eb..000000000000
--- a/drivers/watchdog/mpc83xx_wdt.c
+++ /dev/null
@@ -1,230 +0,0 @@
1/*
2 * mpc83xx_wdt.c - MPC83xx watchdog userspace interface
3 *
4 * Authors: Dave Updegraff <dave@cray.org>
5 * Kumar Gala <galak@kernel.crashing.org>
6 * Attribution: from 83xx_wst: Florian Schirmer <jolt@tuxbox.org>
7 * ..and from sc520_wdt
8 *
9 * Note: it appears that you can only actually ENABLE or DISABLE the thing
10 * once after POR. Once enabled, you cannot disable, and vice versa.
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 */
17
18#include <linux/fs.h>
19#include <linux/init.h>
20#include <linux/kernel.h>
21#include <linux/miscdevice.h>
22#include <linux/platform_device.h>
23#include <linux/module.h>
24#include <linux/watchdog.h>
25#include <asm/io.h>
26#include <asm/uaccess.h>
27
28struct mpc83xx_wdt {
29 __be32 res0;
30 __be32 swcrr; /* System watchdog control register */
31#define SWCRR_SWTC 0xFFFF0000 /* Software Watchdog Time Count. */
32#define SWCRR_SWEN 0x00000004 /* Watchdog Enable bit. */
33#define SWCRR_SWRI 0x00000002 /* Software Watchdog Reset/Interrupt Select bit.*/
34#define SWCRR_SWPR 0x00000001 /* Software Watchdog Counter Prescale bit. */
35 __be32 swcnr; /* System watchdog count register */
36 u8 res1[2];
37 __be16 swsrr; /* System watchdog service register */
38 u8 res2[0xF0];
39};
40
41static struct mpc83xx_wdt __iomem *wd_base;
42
43static u16 timeout = 0xffff;
44module_param(timeout, ushort, 0);
45MODULE_PARM_DESC(timeout, "Watchdog timeout in ticks. (0<timeout<65536, default=65535");
46
47static int reset = 1;
48module_param(reset, bool, 0);
49MODULE_PARM_DESC(reset, "Watchdog Interrupt/Reset Mode. 0 = interrupt, 1 = reset");
50
51/*
52 * We always prescale, but if someone really doesn't want to they can set this
53 * to 0
54 */
55static int prescale = 1;
56static unsigned int timeout_sec;
57
58static unsigned long wdt_is_open;
59static DEFINE_SPINLOCK(wdt_spinlock);
60
61static void mpc83xx_wdt_keepalive(void)
62{
63 /* Ping the WDT */
64 spin_lock(&wdt_spinlock);
65 out_be16(&wd_base->swsrr, 0x556c);
66 out_be16(&wd_base->swsrr, 0xaa39);
67 spin_unlock(&wdt_spinlock);
68}
69
70static ssize_t mpc83xx_wdt_write(struct file *file, const char __user *buf,
71 size_t count, loff_t *ppos)
72{
73 if (count)
74 mpc83xx_wdt_keepalive();
75 return count;
76}
77
78static int mpc83xx_wdt_open(struct inode *inode, struct file *file)
79{
80 u32 tmp = SWCRR_SWEN;
81 if (test_and_set_bit(0, &wdt_is_open))
82 return -EBUSY;
83
84 /* Once we start the watchdog we can't stop it */
85 __module_get(THIS_MODULE);
86
87 /* Good, fire up the show */
88 if (prescale)
89 tmp |= SWCRR_SWPR;
90 if (reset)
91 tmp |= SWCRR_SWRI;
92
93 tmp |= timeout << 16;
94
95 out_be32(&wd_base->swcrr, tmp);
96
97 return nonseekable_open(inode, file);
98}
99
100static int mpc83xx_wdt_release(struct inode *inode, struct file *file)
101{
102 printk(KERN_CRIT "Unexpected close, not stopping watchdog!\n");
103 mpc83xx_wdt_keepalive();
104 clear_bit(0, &wdt_is_open);
105 return 0;
106}
107
108static int mpc83xx_wdt_ioctl(struct inode *inode, struct file *file,
109 unsigned int cmd, unsigned long arg)
110{
111 void __user *argp = (void __user *)arg;
112 int __user *p = argp;
113 static struct watchdog_info ident = {
114 .options = WDIOF_KEEPALIVEPING,
115 .firmware_version = 1,
116 .identity = "MPC83xx",
117 };
118
119 switch (cmd) {
120 case WDIOC_GETSUPPORT:
121 return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
122 case WDIOC_GETSTATUS:
123 case WDIOC_GETBOOTSTATUS:
124 return put_user(0, p);
125 case WDIOC_KEEPALIVE:
126 mpc83xx_wdt_keepalive();
127 return 0;
128 case WDIOC_GETTIMEOUT:
129 return put_user(timeout_sec, p);
130 default:
131 return -ENOTTY;
132 }
133}
134
135static const struct file_operations mpc83xx_wdt_fops = {
136 .owner = THIS_MODULE,
137 .llseek = no_llseek,
138 .write = mpc83xx_wdt_write,
139 .ioctl = mpc83xx_wdt_ioctl,
140 .open = mpc83xx_wdt_open,
141 .release = mpc83xx_wdt_release,
142};
143
144static struct miscdevice mpc83xx_wdt_miscdev = {
145 .minor = WATCHDOG_MINOR,
146 .name = "watchdog",
147 .fops = &mpc83xx_wdt_fops,
148};
149
150static int __devinit mpc83xx_wdt_probe(struct platform_device *dev)
151{
152 struct resource *r;
153 int ret;
154 unsigned int *freq = dev->dev.platform_data;
155
156 /* get a pointer to the register memory */
157 r = platform_get_resource(dev, IORESOURCE_MEM, 0);
158
159 if (!r) {
160 ret = -ENODEV;
161 goto err_out;
162 }
163
164 wd_base = ioremap(r->start, sizeof (struct mpc83xx_wdt));
165
166 if (wd_base == NULL) {
167 ret = -ENOMEM;
168 goto err_out;
169 }
170
171 ret = misc_register(&mpc83xx_wdt_miscdev);
172 if (ret) {
173 printk(KERN_ERR "cannot register miscdev on minor=%d "
174 "(err=%d)\n",
175 WATCHDOG_MINOR, ret);
176 goto err_unmap;
177 }
178
179 /* Calculate the timeout in seconds */
180 if (prescale)
181 timeout_sec = (timeout * 0x10000) / (*freq);
182 else
183 timeout_sec = timeout / (*freq);
184
185 printk(KERN_INFO "WDT driver for MPC83xx initialized. "
186 "mode:%s timeout=%d (%d seconds)\n",
187 reset ? "reset":"interrupt", timeout, timeout_sec);
188 return 0;
189
190err_unmap:
191 iounmap(wd_base);
192err_out:
193 return ret;
194}
195
196static int __devexit mpc83xx_wdt_remove(struct platform_device *dev)
197{
198 misc_deregister(&mpc83xx_wdt_miscdev);
199 iounmap(wd_base);
200
201 return 0;
202}
203
204static struct platform_driver mpc83xx_wdt_driver = {
205 .probe = mpc83xx_wdt_probe,
206 .remove = __devexit_p(mpc83xx_wdt_remove),
207 .driver = {
208 .name = "mpc83xx_wdt",
209 .owner = THIS_MODULE,
210 },
211};
212
213static int __init mpc83xx_wdt_init(void)
214{
215 return platform_driver_register(&mpc83xx_wdt_driver);
216}
217
218static void __exit mpc83xx_wdt_exit(void)
219{
220 platform_driver_unregister(&mpc83xx_wdt_driver);
221}
222
223module_init(mpc83xx_wdt_init);
224module_exit(mpc83xx_wdt_exit);
225
226MODULE_AUTHOR("Dave Updegraff, Kumar Gala");
227MODULE_DESCRIPTION("Driver for watchdog timer in MPC83xx uProcessor");
228MODULE_LICENSE("GPL");
229MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
230MODULE_ALIAS("platform:mpc83xx_wdt");
diff --git a/drivers/watchdog/mpc8xx_wdt.c b/drivers/watchdog/mpc8xx_wdt.c
index 85b5734403a5..1336425acf20 100644
--- a/drivers/watchdog/mpc8xx_wdt.c
+++ b/drivers/watchdog/mpc8xx_wdt.c
@@ -16,36 +16,35 @@
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/watchdog.h> 17#include <linux/watchdog.h>
18#include <asm/8xx_immap.h> 18#include <asm/8xx_immap.h>
19#include <asm/uaccess.h> 19#include <linux/uaccess.h>
20#include <asm/io.h> 20#include <linux/io.h>
21#include <syslib/m8xx_wdt.h> 21#include <syslib/m8xx_wdt.h>
22 22
23static unsigned long wdt_opened; 23static unsigned long wdt_opened;
24static int wdt_status; 24static int wdt_status;
25static spinlock_t wdt_lock;
25 26
26static void mpc8xx_wdt_handler_disable(void) 27static void mpc8xx_wdt_handler_disable(void)
27{ 28{
28 volatile uint __iomem *piscr; 29 volatile uint __iomem *piscr;
29 piscr = (uint *)&((immap_t*)IMAP_ADDR)->im_sit.sit_piscr; 30 piscr = (uint *)&((immap_t *)IMAP_ADDR)->im_sit.sit_piscr;
30 31
31 if (!m8xx_has_internal_rtc) 32 if (!m8xx_has_internal_rtc)
32 m8xx_wdt_stop_timer(); 33 m8xx_wdt_stop_timer();
33 else 34 else
34 out_be32(piscr, in_be32(piscr) & ~(PISCR_PIE | PISCR_PTE)); 35 out_be32(piscr, in_be32(piscr) & ~(PISCR_PIE | PISCR_PTE));
35
36 printk(KERN_NOTICE "mpc8xx_wdt: keep-alive handler deactivated\n"); 36 printk(KERN_NOTICE "mpc8xx_wdt: keep-alive handler deactivated\n");
37} 37}
38 38
39static void mpc8xx_wdt_handler_enable(void) 39static void mpc8xx_wdt_handler_enable(void)
40{ 40{
41 volatile uint __iomem *piscr; 41 volatile uint __iomem *piscr;
42 piscr = (uint *)&((immap_t*)IMAP_ADDR)->im_sit.sit_piscr; 42 piscr = (uint *)&((immap_t *)IMAP_ADDR)->im_sit.sit_piscr;
43 43
44 if (!m8xx_has_internal_rtc) 44 if (!m8xx_has_internal_rtc)
45 m8xx_wdt_install_timer(); 45 m8xx_wdt_install_timer();
46 else 46 else
47 out_be32(piscr, in_be32(piscr) | PISCR_PIE | PISCR_PTE); 47 out_be32(piscr, in_be32(piscr) | PISCR_PIE | PISCR_PTE);
48
49 printk(KERN_NOTICE "mpc8xx_wdt: keep-alive handler activated\n"); 48 printk(KERN_NOTICE "mpc8xx_wdt: keep-alive handler activated\n");
50} 49}
51 50
@@ -53,37 +52,34 @@ static int mpc8xx_wdt_open(struct inode *inode, struct file *file)
53{ 52{
54 if (test_and_set_bit(0, &wdt_opened)) 53 if (test_and_set_bit(0, &wdt_opened))
55 return -EBUSY; 54 return -EBUSY;
56
57 m8xx_wdt_reset(); 55 m8xx_wdt_reset();
58 mpc8xx_wdt_handler_disable(); 56 mpc8xx_wdt_handler_disable();
59
60 return nonseekable_open(inode, file); 57 return nonseekable_open(inode, file);
61} 58}
62 59
63static int mpc8xx_wdt_release(struct inode *inode, struct file *file) 60static int mpc8xx_wdt_release(struct inode *inode, struct file *file)
64{ 61{
65 m8xx_wdt_reset(); 62 m8xx_wdt_reset();
66
67#if !defined(CONFIG_WATCHDOG_NOWAYOUT) 63#if !defined(CONFIG_WATCHDOG_NOWAYOUT)
68 mpc8xx_wdt_handler_enable(); 64 mpc8xx_wdt_handler_enable();
69#endif 65#endif
70
71 clear_bit(0, &wdt_opened); 66 clear_bit(0, &wdt_opened);
72
73 return 0; 67 return 0;
74} 68}
75 69
76static ssize_t mpc8xx_wdt_write(struct file *file, const char *data, size_t len, 70static ssize_t mpc8xx_wdt_write(struct file *file, const char *data,
77 loff_t * ppos) 71 size_t len, loff_t *ppos)
78{ 72{
79 if (len) 73 if (len) {
74 spin_lock(&wdt_lock);
80 m8xx_wdt_reset(); 75 m8xx_wdt_reset();
81 76 spin_unlock(&wdt_lock);
77 }
82 return len; 78 return len;
83} 79}
84 80
85static int mpc8xx_wdt_ioctl(struct inode *inode, struct file *file, 81static long mpc8xx_wdt_ioctl(struct file *file,
86 unsigned int cmd, unsigned long arg) 82 unsigned int cmd, unsigned long arg)
87{ 83{
88 int timeout; 84 int timeout;
89 static struct watchdog_info info = { 85 static struct watchdog_info info = {
@@ -112,15 +108,19 @@ static int mpc8xx_wdt_ioctl(struct inode *inode, struct file *file,
112 return -EOPNOTSUPP; 108 return -EOPNOTSUPP;
113 109
114 case WDIOC_KEEPALIVE: 110 case WDIOC_KEEPALIVE:
111 spin_lock(&wdt_lock);
115 m8xx_wdt_reset(); 112 m8xx_wdt_reset();
116 wdt_status |= WDIOF_KEEPALIVEPING; 113 wdt_status |= WDIOF_KEEPALIVEPING;
114 spin_unlock(&wdt_lock);
117 break; 115 break;
118 116
119 case WDIOC_SETTIMEOUT: 117 case WDIOC_SETTIMEOUT:
120 return -EOPNOTSUPP; 118 return -EOPNOTSUPP;
121 119
122 case WDIOC_GETTIMEOUT: 120 case WDIOC_GETTIMEOUT:
121 spin_lock(&wdt_lock);
123 timeout = m8xx_wdt_get_timeout(); 122 timeout = m8xx_wdt_get_timeout();
123 spin_unlock(&wdt_lock);
124 if (put_user(timeout, (int *)arg)) 124 if (put_user(timeout, (int *)arg))
125 return -EFAULT; 125 return -EFAULT;
126 break; 126 break;
@@ -136,7 +136,7 @@ static const struct file_operations mpc8xx_wdt_fops = {
136 .owner = THIS_MODULE, 136 .owner = THIS_MODULE,
137 .llseek = no_llseek, 137 .llseek = no_llseek,
138 .write = mpc8xx_wdt_write, 138 .write = mpc8xx_wdt_write,
139 .ioctl = mpc8xx_wdt_ioctl, 139 .unlocked_ioctl = mpc8xx_wdt_ioctl,
140 .open = mpc8xx_wdt_open, 140 .open = mpc8xx_wdt_open,
141 .release = mpc8xx_wdt_release, 141 .release = mpc8xx_wdt_release,
142}; 142};
@@ -149,6 +149,7 @@ static struct miscdevice mpc8xx_wdt_miscdev = {
149 149
150static int __init mpc8xx_wdt_init(void) 150static int __init mpc8xx_wdt_init(void)
151{ 151{
152 spin_lock_init(&wdt_lock);
152 return misc_register(&mpc8xx_wdt_miscdev); 153 return misc_register(&mpc8xx_wdt_miscdev);
153} 154}
154 155
diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c
new file mode 100644
index 000000000000..f2094960e662
--- /dev/null
+++ b/drivers/watchdog/mpc8xxx_wdt.c
@@ -0,0 +1,316 @@
1/*
2 * mpc8xxx_wdt.c - MPC8xx/MPC83xx/MPC86xx watchdog userspace interface
3 *
4 * Authors: Dave Updegraff <dave@cray.org>
5 * Kumar Gala <galak@kernel.crashing.org>
6 * Attribution: from 83xx_wst: Florian Schirmer <jolt@tuxbox.org>
7 * ..and from sc520_wdt
8 * Copyright (c) 2008 MontaVista Software, Inc.
9 * Anton Vorontsov <avorontsov@ru.mvista.com>
10 *
11 * Note: it appears that you can only actually ENABLE or DISABLE the thing
12 * once after POR. Once enabled, you cannot disable, and vice versa.
13 *
14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the
16 * Free Software Foundation; either version 2 of the License, or (at your
17 * option) any later version.
18 */
19
20#include <linux/fs.h>
21#include <linux/init.h>
22#include <linux/kernel.h>
23#include <linux/timer.h>
24#include <linux/miscdevice.h>
25#include <linux/of_platform.h>
26#include <linux/module.h>
27#include <linux/watchdog.h>
28#include <linux/io.h>
29#include <linux/uaccess.h>
30#include <sysdev/fsl_soc.h>
31
32struct mpc8xxx_wdt {
33 __be32 res0;
34 __be32 swcrr; /* System watchdog control register */
35#define SWCRR_SWTC 0xFFFF0000 /* Software Watchdog Time Count. */
36#define SWCRR_SWEN 0x00000004 /* Watchdog Enable bit. */
37#define SWCRR_SWRI 0x00000002 /* Software Watchdog Reset/Interrupt Select bit.*/
38#define SWCRR_SWPR 0x00000001 /* Software Watchdog Counter Prescale bit. */
39 __be32 swcnr; /* System watchdog count register */
40 u8 res1[2];
41 __be16 swsrr; /* System watchdog service register */
42 u8 res2[0xF0];
43};
44
45struct mpc8xxx_wdt_type {
46 int prescaler;
47 bool hw_enabled;
48};
49
50static struct mpc8xxx_wdt __iomem *wd_base;
51
52static u16 timeout = 0xffff;
53module_param(timeout, ushort, 0);
54MODULE_PARM_DESC(timeout,
55 "Watchdog timeout in ticks. (0<timeout<65536, default=65535");
56
57static int reset = 1;
58module_param(reset, bool, 0);
59MODULE_PARM_DESC(reset,
60 "Watchdog Interrupt/Reset Mode. 0 = interrupt, 1 = reset");
61
62static int nowayout = WATCHDOG_NOWAYOUT;
63module_param(nowayout, int, 0);
64MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
65 "(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
66
67/*
68 * We always prescale, but if someone really doesn't want to they can set this
69 * to 0
70 */
71static int prescale = 1;
72static unsigned int timeout_sec;
73
74static unsigned long wdt_is_open;
75static DEFINE_SPINLOCK(wdt_spinlock);
76
77static void mpc8xxx_wdt_keepalive(void)
78{
79 /* Ping the WDT */
80 spin_lock(&wdt_spinlock);
81 out_be16(&wd_base->swsrr, 0x556c);
82 out_be16(&wd_base->swsrr, 0xaa39);
83 spin_unlock(&wdt_spinlock);
84}
85
86static void mpc8xxx_wdt_timer_ping(unsigned long arg);
87static DEFINE_TIMER(wdt_timer, mpc8xxx_wdt_timer_ping, 0, 0);
88
89static void mpc8xxx_wdt_timer_ping(unsigned long arg)
90{
91 mpc8xxx_wdt_keepalive();
92 /* We're pinging it twice faster than needed, just to be sure. */
93 mod_timer(&wdt_timer, jiffies + HZ * timeout_sec / 2);
94}
95
96static void mpc8xxx_wdt_pr_warn(const char *msg)
97{
98 pr_crit("mpc8xxx_wdt: %s, expect the %s soon!\n", msg,
99 reset ? "reset" : "machine check exception");
100}
101
102static ssize_t mpc8xxx_wdt_write(struct file *file, const char __user *buf,
103 size_t count, loff_t *ppos)
104{
105 if (count)
106 mpc8xxx_wdt_keepalive();
107 return count;
108}
109
110static int mpc8xxx_wdt_open(struct inode *inode, struct file *file)
111{
112 u32 tmp = SWCRR_SWEN;
113 if (test_and_set_bit(0, &wdt_is_open))
114 return -EBUSY;
115
116 /* Once we start the watchdog we can't stop it */
117 if (nowayout)
118 __module_get(THIS_MODULE);
119
120 /* Good, fire up the show */
121 if (prescale)
122 tmp |= SWCRR_SWPR;
123 if (reset)
124 tmp |= SWCRR_SWRI;
125
126 tmp |= timeout << 16;
127
128 out_be32(&wd_base->swcrr, tmp);
129
130 del_timer_sync(&wdt_timer);
131
132 return nonseekable_open(inode, file);
133}
134
135static int mpc8xxx_wdt_release(struct inode *inode, struct file *file)
136{
137 if (!nowayout)
138 mpc8xxx_wdt_timer_ping(0);
139 else
140 mpc8xxx_wdt_pr_warn("watchdog closed");
141 clear_bit(0, &wdt_is_open);
142 return 0;
143}
144
145static long mpc8xxx_wdt_ioctl(struct file *file, unsigned int cmd,
146 unsigned long arg)
147{
148 void __user *argp = (void __user *)arg;
149 int __user *p = argp;
150 static struct watchdog_info ident = {
151 .options = WDIOF_KEEPALIVEPING,
152 .firmware_version = 1,
153 .identity = "MPC8xxx",
154 };
155
156 switch (cmd) {
157 case WDIOC_GETSUPPORT:
158 return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
159 case WDIOC_GETSTATUS:
160 case WDIOC_GETBOOTSTATUS:
161 return put_user(0, p);
162 case WDIOC_KEEPALIVE:
163 mpc8xxx_wdt_keepalive();
164 return 0;
165 case WDIOC_GETTIMEOUT:
166 return put_user(timeout_sec, p);
167 default:
168 return -ENOTTY;
169 }
170}
171
172static const struct file_operations mpc8xxx_wdt_fops = {
173 .owner = THIS_MODULE,
174 .llseek = no_llseek,
175 .write = mpc8xxx_wdt_write,
176 .unlocked_ioctl = mpc8xxx_wdt_ioctl,
177 .open = mpc8xxx_wdt_open,
178 .release = mpc8xxx_wdt_release,
179};
180
181static struct miscdevice mpc8xxx_wdt_miscdev = {
182 .minor = WATCHDOG_MINOR,
183 .name = "watchdog",
184 .fops = &mpc8xxx_wdt_fops,
185};
186
187static int __devinit mpc8xxx_wdt_probe(struct of_device *ofdev,
188 const struct of_device_id *match)
189{
190 int ret;
191 struct device_node *np = ofdev->node;
192 struct mpc8xxx_wdt_type *wdt_type = match->data;
193 u32 freq = fsl_get_sys_freq();
194 bool enabled;
195
196 if (!freq || freq == -1)
197 return -EINVAL;
198
199 wd_base = of_iomap(np, 0);
200 if (!wd_base)
201 return -ENOMEM;
202
203 enabled = in_be32(&wd_base->swcrr) & SWCRR_SWEN;
204 if (!enabled && wdt_type->hw_enabled) {
205 pr_info("mpc8xxx_wdt: could not be enabled in software\n");
206 ret = -ENOSYS;
207 goto err_unmap;
208 }
209
210 /* Calculate the timeout in seconds */
211 if (prescale)
212 timeout_sec = (timeout * wdt_type->prescaler) / freq;
213 else
214 timeout_sec = timeout / freq;
215
216 pr_info("WDT driver for MPC8xxx initialized. mode:%s timeout=%d "
217 "(%d seconds)\n", reset ? "reset" : "interrupt", timeout,
218 timeout_sec);
219
220 /*
221 * If the watchdog was previously enabled or we're running on
222 * MPC8xxx, we should ping the wdt from the kernel until the
223 * userspace handles it.
224 */
225 if (enabled)
226 mpc8xxx_wdt_timer_ping(0);
227 return 0;
228err_unmap:
229 iounmap(wd_base);
230 wd_base = NULL;
231 return ret;
232}
233
234static int __devexit mpc8xxx_wdt_remove(struct of_device *ofdev)
235{
236 mpc8xxx_wdt_pr_warn("watchdog removed");
237 del_timer_sync(&wdt_timer);
238 misc_deregister(&mpc8xxx_wdt_miscdev);
239 iounmap(wd_base);
240
241 return 0;
242}
243
244static const struct of_device_id mpc8xxx_wdt_match[] = {
245 {
246 .compatible = "mpc83xx_wdt",
247 .data = &(struct mpc8xxx_wdt_type) {
248 .prescaler = 0x10000,
249 },
250 },
251 {
252 .compatible = "fsl,mpc8610-wdt",
253 .data = &(struct mpc8xxx_wdt_type) {
254 .prescaler = 0x10000,
255 .hw_enabled = true,
256 },
257 },
258 {
259 .compatible = "fsl,mpc823-wdt",
260 .data = &(struct mpc8xxx_wdt_type) {
261 .prescaler = 0x800,
262 },
263 },
264 {},
265};
266MODULE_DEVICE_TABLE(of, mpc8xxx_wdt_match);
267
268static struct of_platform_driver mpc8xxx_wdt_driver = {
269 .match_table = mpc8xxx_wdt_match,
270 .probe = mpc8xxx_wdt_probe,
271 .remove = __devexit_p(mpc8xxx_wdt_remove),
272 .driver = {
273 .name = "mpc8xxx_wdt",
274 .owner = THIS_MODULE,
275 },
276};
277
278/*
279 * We do wdt initialization in two steps: arch_initcall probes the wdt
280 * very early to start pinging the watchdog (misc devices are not yet
281 * available), and later module_init() just registers the misc device.
282 */
283static int __init mpc8xxx_wdt_init_late(void)
284{
285 int ret;
286
287 if (!wd_base)
288 return -ENODEV;
289
290 ret = misc_register(&mpc8xxx_wdt_miscdev);
291 if (ret) {
292 pr_err("cannot register miscdev on minor=%d (err=%d)\n",
293 WATCHDOG_MINOR, ret);
294 return ret;
295 }
296 return 0;
297}
298module_init(mpc8xxx_wdt_init_late);
299
300static int __init mpc8xxx_wdt_init(void)
301{
302 return of_register_platform_driver(&mpc8xxx_wdt_driver);
303}
304arch_initcall(mpc8xxx_wdt_init);
305
306static void __exit mpc8xxx_wdt_exit(void)
307{
308 of_unregister_platform_driver(&mpc8xxx_wdt_driver);
309}
310module_exit(mpc8xxx_wdt_exit);
311
312MODULE_AUTHOR("Dave Updegraff, Kumar Gala");
313MODULE_DESCRIPTION("Driver for watchdog timer in MPC8xx/MPC83xx/MPC86xx "
314 "uProcessors");
315MODULE_LICENSE("GPL");
316MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/mpcore_wdt.c b/drivers/watchdog/mpcore_wdt.c
index 009573b81496..2a9bfa81f9d6 100644
--- a/drivers/watchdog/mpcore_wdt.c
+++ b/drivers/watchdog/mpcore_wdt.c
@@ -29,9 +29,9 @@
29#include <linux/init.h> 29#include <linux/init.h>
30#include <linux/interrupt.h> 30#include <linux/interrupt.h>
31#include <linux/platform_device.h> 31#include <linux/platform_device.h>
32#include <linux/uaccess.h>
32 33
33#include <asm/hardware/arm_twd.h> 34#include <asm/hardware/arm_twd.h>
34#include <asm/uaccess.h>
35 35
36struct mpcore_wdt { 36struct mpcore_wdt {
37 unsigned long timer_alive; 37 unsigned long timer_alive;
@@ -43,17 +43,20 @@ struct mpcore_wdt {
43}; 43};
44 44
45static struct platform_device *mpcore_wdt_dev; 45static struct platform_device *mpcore_wdt_dev;
46
47extern unsigned int mpcore_timer_rate; 46extern unsigned int mpcore_timer_rate;
48 47
49#define TIMER_MARGIN 60 48#define TIMER_MARGIN 60
50static int mpcore_margin = TIMER_MARGIN; 49static int mpcore_margin = TIMER_MARGIN;
51module_param(mpcore_margin, int, 0); 50module_param(mpcore_margin, int, 0);
52MODULE_PARM_DESC(mpcore_margin, "MPcore timer margin in seconds. (0<mpcore_margin<65536, default=" __MODULE_STRING(TIMER_MARGIN) ")"); 51MODULE_PARM_DESC(mpcore_margin,
52 "MPcore timer margin in seconds. (0 < mpcore_margin < 65536, default="
53 __MODULE_STRING(TIMER_MARGIN) ")");
53 54
54static int nowayout = WATCHDOG_NOWAYOUT; 55static int nowayout = WATCHDOG_NOWAYOUT;
55module_param(nowayout, int, 0); 56module_param(nowayout, int, 0);
56MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 57MODULE_PARM_DESC(nowayout,
58 "Watchdog cannot be stopped once started (default="
59 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
57 60
58#define ONLY_TESTING 0 61#define ONLY_TESTING 0
59static int mpcore_noboot = ONLY_TESTING; 62static int mpcore_noboot = ONLY_TESTING;
@@ -70,14 +73,12 @@ static irqreturn_t mpcore_wdt_fire(int irq, void *arg)
70 73
71 /* Check it really was our interrupt */ 74 /* Check it really was our interrupt */
72 if (readl(wdt->base + TWD_WDOG_INTSTAT)) { 75 if (readl(wdt->base + TWD_WDOG_INTSTAT)) {
73 dev_printk(KERN_CRIT, wdt->dev, "Triggered - Reboot ignored.\n"); 76 dev_printk(KERN_CRIT, wdt->dev,
74 77 "Triggered - Reboot ignored.\n");
75 /* Clear the interrupt on the watchdog */ 78 /* Clear the interrupt on the watchdog */
76 writel(1, wdt->base + TWD_WDOG_INTSTAT); 79 writel(1, wdt->base + TWD_WDOG_INTSTAT);
77
78 return IRQ_HANDLED; 80 return IRQ_HANDLED;
79 } 81 }
80
81 return IRQ_NONE; 82 return IRQ_NONE;
82} 83}
83 84
@@ -96,22 +97,26 @@ static void mpcore_wdt_keepalive(struct mpcore_wdt *wdt)
96 count = (mpcore_timer_rate / 256) * mpcore_margin; 97 count = (mpcore_timer_rate / 256) * mpcore_margin;
97 98
98 /* Reload the counter */ 99 /* Reload the counter */
100 spin_lock(&wdt_lock);
99 writel(count + wdt->perturb, wdt->base + TWD_WDOG_LOAD); 101 writel(count + wdt->perturb, wdt->base + TWD_WDOG_LOAD);
100
101 wdt->perturb = wdt->perturb ? 0 : 1; 102 wdt->perturb = wdt->perturb ? 0 : 1;
103 spin_unlock(&wdt_lock);
102} 104}
103 105
104static void mpcore_wdt_stop(struct mpcore_wdt *wdt) 106static void mpcore_wdt_stop(struct mpcore_wdt *wdt)
105{ 107{
108 spin_lock(&wdt_lock);
106 writel(0x12345678, wdt->base + TWD_WDOG_DISABLE); 109 writel(0x12345678, wdt->base + TWD_WDOG_DISABLE);
107 writel(0x87654321, wdt->base + TWD_WDOG_DISABLE); 110 writel(0x87654321, wdt->base + TWD_WDOG_DISABLE);
108 writel(0x0, wdt->base + TWD_WDOG_CONTROL); 111 writel(0x0, wdt->base + TWD_WDOG_CONTROL);
112 spin_unlock(&wdt_lock);
109} 113}
110 114
111static void mpcore_wdt_start(struct mpcore_wdt *wdt) 115static void mpcore_wdt_start(struct mpcore_wdt *wdt)
112{ 116{
113 dev_printk(KERN_INFO, wdt->dev, "enabling watchdog.\n"); 117 dev_printk(KERN_INFO, wdt->dev, "enabling watchdog.\n");
114 118
119 spin_lock(&wdt_lock);
115 /* This loads the count register but does NOT start the count yet */ 120 /* This loads the count register but does NOT start the count yet */
116 mpcore_wdt_keepalive(wdt); 121 mpcore_wdt_keepalive(wdt);
117 122
@@ -122,6 +127,7 @@ static void mpcore_wdt_start(struct mpcore_wdt *wdt)
122 /* Enable watchdog - prescale=256, watchdog mode=1, enable=1 */ 127 /* Enable watchdog - prescale=256, watchdog mode=1, enable=1 */
123 writel(0x0000FF09, wdt->base + TWD_WDOG_CONTROL); 128 writel(0x0000FF09, wdt->base + TWD_WDOG_CONTROL);
124 } 129 }
130 spin_unlock(&wdt_lock);
125} 131}
126 132
127static int mpcore_wdt_set_heartbeat(int t) 133static int mpcore_wdt_set_heartbeat(int t)
@@ -164,10 +170,11 @@ static int mpcore_wdt_release(struct inode *inode, struct file *file)
164 * Shut off the timer. 170 * Shut off the timer.
165 * Lock it in if it's a module and we set nowayout 171 * Lock it in if it's a module and we set nowayout
166 */ 172 */
167 if (wdt->expect_close == 42) { 173 if (wdt->expect_close == 42)
168 mpcore_wdt_stop(wdt); 174 mpcore_wdt_stop(wdt);
169 } else { 175 else {
170 dev_printk(KERN_CRIT, wdt->dev, "unexpected close, not stopping watchdog!\n"); 176 dev_printk(KERN_CRIT, wdt->dev,
177 "unexpected close, not stopping watchdog!\n");
171 mpcore_wdt_keepalive(wdt); 178 mpcore_wdt_keepalive(wdt);
172 } 179 }
173 clear_bit(0, &wdt->timer_alive); 180 clear_bit(0, &wdt->timer_alive);
@@ -175,7 +182,8 @@ static int mpcore_wdt_release(struct inode *inode, struct file *file)
175 return 0; 182 return 0;
176} 183}
177 184
178static ssize_t mpcore_wdt_write(struct file *file, const char *data, size_t len, loff_t *ppos) 185static ssize_t mpcore_wdt_write(struct file *file, const char *data,
186 size_t len, loff_t *ppos)
179{ 187{
180 struct mpcore_wdt *wdt = file->private_data; 188 struct mpcore_wdt *wdt = file->private_data;
181 189
@@ -210,8 +218,8 @@ static struct watchdog_info ident = {
210 .identity = "MPcore Watchdog", 218 .identity = "MPcore Watchdog",
211}; 219};
212 220
213static int mpcore_wdt_ioctl(struct inode *inode, struct file *file, 221static long mpcore_wdt_ioctl(struct file *file, unsigned int cmd,
214 unsigned int cmd, unsigned long arg) 222 unsigned long arg)
215{ 223{
216 struct mpcore_wdt *wdt = file->private_data; 224 struct mpcore_wdt *wdt = file->private_data;
217 int ret; 225 int ret;
@@ -235,6 +243,12 @@ static int mpcore_wdt_ioctl(struct inode *inode, struct file *file,
235 ret = 0; 243 ret = 0;
236 break; 244 break;
237 245
246 case WDIOC_GETSTATUS:
247 case WDIOC_GETBOOTSTATUS:
248 uarg.i = 0;
249 ret = 0;
250 break;
251
238 case WDIOC_SETOPTIONS: 252 case WDIOC_SETOPTIONS:
239 ret = -EINVAL; 253 ret = -EINVAL;
240 if (uarg.i & WDIOS_DISABLECARD) { 254 if (uarg.i & WDIOS_DISABLECARD) {
@@ -247,12 +261,6 @@ static int mpcore_wdt_ioctl(struct inode *inode, struct file *file,
247 } 261 }
248 break; 262 break;
249 263
250 case WDIOC_GETSTATUS:
251 case WDIOC_GETBOOTSTATUS:
252 uarg.i = 0;
253 ret = 0;
254 break;
255
256 case WDIOC_KEEPALIVE: 264 case WDIOC_KEEPALIVE:
257 mpcore_wdt_keepalive(wdt); 265 mpcore_wdt_keepalive(wdt);
258 ret = 0; 266 ret = 0;
@@ -301,7 +309,7 @@ static const struct file_operations mpcore_wdt_fops = {
301 .owner = THIS_MODULE, 309 .owner = THIS_MODULE,
302 .llseek = no_llseek, 310 .llseek = no_llseek,
303 .write = mpcore_wdt_write, 311 .write = mpcore_wdt_write,
304 .ioctl = mpcore_wdt_ioctl, 312 .unlocked_ioctl = mpcore_wdt_ioctl,
305 .open = mpcore_wdt_open, 313 .open = mpcore_wdt_open,
306 .release = mpcore_wdt_release, 314 .release = mpcore_wdt_release,
307}; 315};
@@ -349,14 +357,17 @@ static int __devinit mpcore_wdt_probe(struct platform_device *dev)
349 mpcore_wdt_miscdev.parent = &dev->dev; 357 mpcore_wdt_miscdev.parent = &dev->dev;
350 ret = misc_register(&mpcore_wdt_miscdev); 358 ret = misc_register(&mpcore_wdt_miscdev);
351 if (ret) { 359 if (ret) {
352 dev_printk(KERN_ERR, _dev, "cannot register miscdev on minor=%d (err=%d)\n", 360 dev_printk(KERN_ERR, _dev,
353 WATCHDOG_MINOR, ret); 361 "cannot register miscdev on minor=%d (err=%d)\n",
362 WATCHDOG_MINOR, ret);
354 goto err_misc; 363 goto err_misc;
355 } 364 }
356 365
357 ret = request_irq(wdt->irq, mpcore_wdt_fire, IRQF_DISABLED, "mpcore_wdt", wdt); 366 ret = request_irq(wdt->irq, mpcore_wdt_fire, IRQF_DISABLED,
367 "mpcore_wdt", wdt);
358 if (ret) { 368 if (ret) {
359 dev_printk(KERN_ERR, _dev, "cannot register IRQ%d for watchdog\n", wdt->irq); 369 dev_printk(KERN_ERR, _dev,
370 "cannot register IRQ%d for watchdog\n", wdt->irq);
360 goto err_irq; 371 goto err_irq;
361 } 372 }
362 373
@@ -366,13 +377,13 @@ static int __devinit mpcore_wdt_probe(struct platform_device *dev)
366 377
367 return 0; 378 return 0;
368 379
369 err_irq: 380err_irq:
370 misc_deregister(&mpcore_wdt_miscdev); 381 misc_deregister(&mpcore_wdt_miscdev);
371 err_misc: 382err_misc:
372 iounmap(wdt->base); 383 iounmap(wdt->base);
373 err_free: 384err_free:
374 kfree(wdt); 385 kfree(wdt);
375 err_out: 386err_out:
376 return ret; 387 return ret;
377} 388}
378 389
@@ -415,7 +426,7 @@ static int __init mpcore_wdt_init(void)
415 */ 426 */
416 if (mpcore_wdt_set_heartbeat(mpcore_margin)) { 427 if (mpcore_wdt_set_heartbeat(mpcore_margin)) {
417 mpcore_wdt_set_heartbeat(TIMER_MARGIN); 428 mpcore_wdt_set_heartbeat(TIMER_MARGIN);
418 printk(KERN_INFO "mpcore_margin value must be 0<mpcore_margin<65536, using %d\n", 429 printk(KERN_INFO "mpcore_margin value must be 0 < mpcore_margin < 65536, using %d\n",
419 TIMER_MARGIN); 430 TIMER_MARGIN);
420 } 431 }
421 432
diff --git a/drivers/watchdog/mtx-1_wdt.c b/drivers/watchdog/mtx-1_wdt.c
index a8e67383784e..b4b7b0a4c119 100644
--- a/drivers/watchdog/mtx-1_wdt.c
+++ b/drivers/watchdog/mtx-1_wdt.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * Driver for the MTX-1 Watchdog. 2 * Driver for the MTX-1 Watchdog.
3 * 3 *
4 * (C) Copyright 2005 4G Systems <info@4g-systems.biz>, All Rights Reserved. 4 * (C) Copyright 2005 4G Systems <info@4g-systems.biz>,
5 * All Rights Reserved.
5 * http://www.4g-systems.biz 6 * http://www.4g-systems.biz
6 * 7 *
7 * (C) Copyright 2007 OpenWrt.org, Florian Fainelli <florian@openwrt.org> 8 * (C) Copyright 2007 OpenWrt.org, Florian Fainelli <florian@openwrt.org>
@@ -46,12 +47,11 @@
46#include <linux/jiffies.h> 47#include <linux/jiffies.h>
47#include <linux/watchdog.h> 48#include <linux/watchdog.h>
48#include <linux/platform_device.h> 49#include <linux/platform_device.h>
49 50#include <linux/io.h>
50#include <asm/io.h> 51#include <linux/uaccess.h>
51#include <asm/uaccess.h> 52#include <linux/gpio.h>
52 53
53#include <asm/mach-au1x00/au1000.h> 54#include <asm/mach-au1x00/au1000.h>
54#include <asm/gpio.h>
55 55
56#define MTX1_WDT_INTERVAL (5 * HZ) 56#define MTX1_WDT_INTERVAL (5 * HZ)
57 57
@@ -59,6 +59,7 @@ static int ticks = 100 * HZ;
59 59
60static struct { 60static struct {
61 struct completion stop; 61 struct completion stop;
62 spinlock_t lock;
62 int running; 63 int running;
63 struct timer_list timer; 64 struct timer_list timer;
64 int queue; 65 int queue;
@@ -71,6 +72,7 @@ static void mtx1_wdt_trigger(unsigned long unused)
71{ 72{
72 u32 tmp; 73 u32 tmp;
73 74
75 spin_lock(&mtx1_wdt_device.lock);
74 if (mtx1_wdt_device.running) 76 if (mtx1_wdt_device.running)
75 ticks--; 77 ticks--;
76 /* 78 /*
@@ -79,13 +81,13 @@ static void mtx1_wdt_trigger(unsigned long unused)
79 tmp = au_readl(GPIO2_DIR); 81 tmp = au_readl(GPIO2_DIR);
80 tmp = (tmp & ~(1 << mtx1_wdt_device.gpio)) | 82 tmp = (tmp & ~(1 << mtx1_wdt_device.gpio)) |
81 ((~tmp) & (1 << mtx1_wdt_device.gpio)); 83 ((~tmp) & (1 << mtx1_wdt_device.gpio));
82 au_writel (tmp, GPIO2_DIR); 84 au_writel(tmp, GPIO2_DIR);
83 85
84 if (mtx1_wdt_device.queue && ticks) 86 if (mtx1_wdt_device.queue && ticks)
85 mod_timer(&mtx1_wdt_device.timer, jiffies + MTX1_WDT_INTERVAL); 87 mod_timer(&mtx1_wdt_device.timer, jiffies + MTX1_WDT_INTERVAL);
86 else { 88 else
87 complete(&mtx1_wdt_device.stop); 89 complete(&mtx1_wdt_device.stop);
88 } 90 spin_unlock(&mtx1_wdt_device.lock);
89} 91}
90 92
91static void mtx1_wdt_reset(void) 93static void mtx1_wdt_reset(void)
@@ -96,23 +98,25 @@ static void mtx1_wdt_reset(void)
96 98
97static void mtx1_wdt_start(void) 99static void mtx1_wdt_start(void)
98{ 100{
101 spin_lock_irqsave(&mtx1_wdt_device.lock, flags);
99 if (!mtx1_wdt_device.queue) { 102 if (!mtx1_wdt_device.queue) {
100 mtx1_wdt_device.queue = 1; 103 mtx1_wdt_device.queue = 1;
101 gpio_set_value(mtx1_wdt_device.gpio, 1); 104 gpio_set_value(mtx1_wdt_device.gpio, 1);
102 mod_timer(&mtx1_wdt_device.timer, jiffies + MTX1_WDT_INTERVAL); 105 mod_timer(&mtx1_wdt_device.timer, jiffies + MTX1_WDT_INTERVAL);
103 } 106 }
104 mtx1_wdt_device.running++; 107 mtx1_wdt_device.running++;
108 spin_unlock_irqrestore(&mtx1_wdt_device.lock, flags);
105} 109}
106 110
107static int mtx1_wdt_stop(void) 111static int mtx1_wdt_stop(void)
108{ 112{
113 spin_lock_irqsave(&mtx1_wdt_device.lock, flags);
109 if (mtx1_wdt_device.queue) { 114 if (mtx1_wdt_device.queue) {
110 mtx1_wdt_device.queue = 0; 115 mtx1_wdt_device.queue = 0;
111 gpio_set_value(mtx1_wdt_device.gpio, 0); 116 gpio_set_value(mtx1_wdt_device.gpio, 0);
112 } 117 }
113
114 ticks = mtx1_wdt_device.default_ticks; 118 ticks = mtx1_wdt_device.default_ticks;
115 119 spin_unlock_irqrestore(&mtx1_wdt_device.lock, flags);
116 return 0; 120 return 0;
117} 121}
118 122
@@ -122,7 +126,6 @@ static int mtx1_wdt_open(struct inode *inode, struct file *file)
122{ 126{
123 if (test_and_set_bit(0, &mtx1_wdt_device.inuse)) 127 if (test_and_set_bit(0, &mtx1_wdt_device.inuse))
124 return -EBUSY; 128 return -EBUSY;
125
126 return nonseekable_open(inode, file); 129 return nonseekable_open(inode, file);
127} 130}
128 131
@@ -133,54 +136,51 @@ static int mtx1_wdt_release(struct inode *inode, struct file *file)
133 return 0; 136 return 0;
134} 137}
135 138
136static int mtx1_wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) 139static long mtx1_wdt_ioctl(struct file *file, unsigned int cmd,
140 unsigned long arg)
137{ 141{
138 void __user *argp = (void __user *)arg; 142 void __user *argp = (void __user *)arg;
143 int __user *p = (int __user *)argp;
139 unsigned int value; 144 unsigned int value;
140 static struct watchdog_info ident = 145 static const struct watchdog_info ident = {
141 {
142 .options = WDIOF_CARDRESET, 146 .options = WDIOF_CARDRESET,
143 .identity = "MTX-1 WDT", 147 .identity = "MTX-1 WDT",
144 }; 148 };
145 149
146 switch(cmd) { 150 switch (cmd) {
147 case WDIOC_KEEPALIVE: 151 case WDIOC_GETSUPPORT:
148 mtx1_wdt_reset(); 152 if (copy_to_user(argp, &ident, sizeof(ident)))
149 break; 153 return -EFAULT;
150 case WDIOC_GETSTATUS: 154 break;
151 case WDIOC_GETBOOTSTATUS: 155 case WDIOC_GETSTATUS:
152 if ( copy_to_user(argp, &value, sizeof(int)) ) 156 case WDIOC_GETBOOTSTATUS:
153 return -EFAULT; 157 put_user(0, p);
154 break; 158 break;
155 case WDIOC_GETSUPPORT: 159 case WDIOC_SETOPTIONS:
156 if ( copy_to_user(argp, &ident, sizeof(ident)) ) 160 if (get_user(value, p))
157 return -EFAULT; 161 return -EFAULT;
158 break; 162 if (value & WDIOS_ENABLECARD)
159 case WDIOC_SETOPTIONS: 163 mtx1_wdt_start();
160 if ( copy_from_user(&value, argp, sizeof(int)) ) 164 else if (value & WDIOS_DISABLECARD)
161 return -EFAULT; 165 mtx1_wdt_stop();
162 switch(value) { 166 else
163 case WDIOS_ENABLECARD: 167 return -EINVAL;
164 mtx1_wdt_start(); 168 return 0;
165 break; 169 case WDIOC_KEEPALIVE:
166 case WDIOS_DISABLECARD: 170 mtx1_wdt_reset();
167 return mtx1_wdt_stop(); 171 break;
168 default: 172 default:
169 return -EINVAL; 173 return -ENOTTY;
170 }
171 break;
172 default:
173 return -ENOTTY;
174 } 174 }
175 return 0; 175 return 0;
176} 176}
177 177
178 178
179static ssize_t mtx1_wdt_write(struct file *file, const char *buf, size_t count, loff_t *ppos) 179static ssize_t mtx1_wdt_write(struct file *file, const char *buf,
180 size_t count, loff_t *ppos)
180{ 181{
181 if (!count) 182 if (!count)
182 return -EIO; 183 return -EIO;
183
184 mtx1_wdt_reset(); 184 mtx1_wdt_reset();
185 return count; 185 return count;
186} 186}
@@ -188,17 +188,17 @@ static ssize_t mtx1_wdt_write(struct file *file, const char *buf, size_t count,
188static const struct file_operations mtx1_wdt_fops = { 188static const struct file_operations mtx1_wdt_fops = {
189 .owner = THIS_MODULE, 189 .owner = THIS_MODULE,
190 .llseek = no_llseek, 190 .llseek = no_llseek,
191 .ioctl = mtx1_wdt_ioctl, 191 .unlocked_ioctl = mtx1_wdt_ioctl,
192 .open = mtx1_wdt_open, 192 .open = mtx1_wdt_open,
193 .write = mtx1_wdt_write, 193 .write = mtx1_wdt_write,
194 .release = mtx1_wdt_release 194 .release = mtx1_wdt_release,
195}; 195};
196 196
197 197
198static struct miscdevice mtx1_wdt_misc = { 198static struct miscdevice mtx1_wdt_misc = {
199 .minor = WATCHDOG_MINOR, 199 .minor = WATCHDOG_MINOR,
200 .name = "watchdog", 200 .name = "watchdog",
201 .fops = &mtx1_wdt_fops 201 .fops = &mtx1_wdt_fops,
202}; 202};
203 203
204 204
@@ -208,29 +208,26 @@ static int mtx1_wdt_probe(struct platform_device *pdev)
208 208
209 mtx1_wdt_device.gpio = pdev->resource[0].start; 209 mtx1_wdt_device.gpio = pdev->resource[0].start;
210 210
211 if ((ret = misc_register(&mtx1_wdt_misc)) < 0) { 211 spin_lock_init(&mtx1_wdt_device.lock);
212 printk(KERN_ERR " mtx-1_wdt : failed to register\n");
213 return ret;
214 }
215
216 init_completion(&mtx1_wdt_device.stop); 212 init_completion(&mtx1_wdt_device.stop);
217 mtx1_wdt_device.queue = 0; 213 mtx1_wdt_device.queue = 0;
218
219 clear_bit(0, &mtx1_wdt_device.inuse); 214 clear_bit(0, &mtx1_wdt_device.inuse);
220
221 setup_timer(&mtx1_wdt_device.timer, mtx1_wdt_trigger, 0L); 215 setup_timer(&mtx1_wdt_device.timer, mtx1_wdt_trigger, 0L);
222
223 mtx1_wdt_device.default_ticks = ticks; 216 mtx1_wdt_device.default_ticks = ticks;
224 217
218 ret = misc_register(&mtx1_wdt_misc);
219 if (ret < 0) {
220 printk(KERN_ERR " mtx-1_wdt : failed to register\n");
221 return ret;
222 }
225 mtx1_wdt_start(); 223 mtx1_wdt_start();
226
227 printk(KERN_INFO "MTX-1 Watchdog driver\n"); 224 printk(KERN_INFO "MTX-1 Watchdog driver\n");
228
229 return 0; 225 return 0;
230} 226}
231 227
232static int mtx1_wdt_remove(struct platform_device *pdev) 228static int mtx1_wdt_remove(struct platform_device *pdev)
233{ 229{
230 /* FIXME: do we need to lock this test ? */
234 if (mtx1_wdt_device.queue) { 231 if (mtx1_wdt_device.queue) {
235 mtx1_wdt_device.queue = 0; 232 mtx1_wdt_device.queue = 0;
236 wait_for_completion(&mtx1_wdt_device.stop); 233 wait_for_completion(&mtx1_wdt_device.stop);
diff --git a/drivers/watchdog/mv64x60_wdt.c b/drivers/watchdog/mv64x60_wdt.c
index b59ca3273967..acf589dc057c 100644
--- a/drivers/watchdog/mv64x60_wdt.c
+++ b/drivers/watchdog/mv64x60_wdt.c
@@ -8,7 +8,7 @@
8 * and services the watchdog. 8 * and services the watchdog.
9 * 9 *
10 * Derived from mpc8xx_wdt.c, with the following copyright. 10 * Derived from mpc8xx_wdt.c, with the following copyright.
11 * 11 *
12 * 2002 (c) Florian Schirmer <jolt@tuxbox.org> This file is licensed under 12 * 2002 (c) Florian Schirmer <jolt@tuxbox.org> This file is licensed under
13 * the terms of the GNU General Public License version 2. This program 13 * the terms of the GNU General Public License version 2. This program
14 * is licensed "as is" without any warranty of any kind, whether express 14 * is licensed "as is" without any warranty of any kind, whether express
@@ -22,10 +22,9 @@
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/watchdog.h> 23#include <linux/watchdog.h>
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25
26#include <linux/mv643xx.h> 25#include <linux/mv643xx.h>
27#include <asm/uaccess.h> 26#include <linux/uaccess.h>
28#include <asm/io.h> 27#include <linux/io.h>
29 28
30#define MV64x60_WDT_WDC_OFFSET 0 29#define MV64x60_WDT_WDC_OFFSET 0
31 30
@@ -61,7 +60,9 @@ static DEFINE_SPINLOCK(mv64x60_wdt_spinlock);
61 60
62static int nowayout = WATCHDOG_NOWAYOUT; 61static int nowayout = WATCHDOG_NOWAYOUT;
63module_param(nowayout, int, 0); 62module_param(nowayout, int, 0);
64MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 63MODULE_PARM_DESC(nowayout,
64 "Watchdog cannot be stopped once started (default="
65 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
65 66
66static int mv64x60_wdt_toggle_wdc(int enabled_predicate, int field_shift) 67static int mv64x60_wdt_toggle_wdc(int enabled_predicate, int field_shift)
67{ 68{
@@ -150,7 +151,7 @@ static int mv64x60_wdt_release(struct inode *inode, struct file *file)
150} 151}
151 152
152static ssize_t mv64x60_wdt_write(struct file *file, const char __user *data, 153static ssize_t mv64x60_wdt_write(struct file *file, const char __user *data,
153 size_t len, loff_t * ppos) 154 size_t len, loff_t *ppos)
154{ 155{
155 if (len) { 156 if (len) {
156 if (!nowayout) { 157 if (!nowayout) {
@@ -160,7 +161,7 @@ static ssize_t mv64x60_wdt_write(struct file *file, const char __user *data,
160 161
161 for (i = 0; i != len; i++) { 162 for (i = 0; i != len; i++) {
162 char c; 163 char c;
163 if(get_user(c, data + i)) 164 if (get_user(c, data + i))
164 return -EFAULT; 165 return -EFAULT;
165 if (c == 'V') 166 if (c == 'V')
166 expect_close = 42; 167 expect_close = 42;
@@ -172,8 +173,8 @@ static ssize_t mv64x60_wdt_write(struct file *file, const char __user *data,
172 return len; 173 return len;
173} 174}
174 175
175static int mv64x60_wdt_ioctl(struct inode *inode, struct file *file, 176static long mv64x60_wdt_ioctl(struct file *file,
176 unsigned int cmd, unsigned long arg) 177 unsigned int cmd, unsigned long arg)
177{ 178{
178 int timeout; 179 int timeout;
179 int options; 180 int options;
@@ -240,7 +241,7 @@ static const struct file_operations mv64x60_wdt_fops = {
240 .owner = THIS_MODULE, 241 .owner = THIS_MODULE,
241 .llseek = no_llseek, 242 .llseek = no_llseek,
242 .write = mv64x60_wdt_write, 243 .write = mv64x60_wdt_write,
243 .ioctl = mv64x60_wdt_ioctl, 244 .unlocked_ioctl = mv64x60_wdt_ioctl,
244 .open = mv64x60_wdt_open, 245 .open = mv64x60_wdt_open,
245 .release = mv64x60_wdt_release, 246 .release = mv64x60_wdt_release,
246}; 247};
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
index 74bc39aa1ce8..3a11dadfd8e7 100644
--- a/drivers/watchdog/omap_wdt.c
+++ b/drivers/watchdog/omap_wdt.c
@@ -40,12 +40,10 @@
40#include <linux/moduleparam.h> 40#include <linux/moduleparam.h>
41#include <linux/clk.h> 41#include <linux/clk.h>
42#include <linux/bitops.h> 42#include <linux/bitops.h>
43 43#include <linux/io.h>
44#include <asm/io.h> 44#include <linux/uaccess.h>
45#include <asm/uaccess.h> 45#include <mach/hardware.h>
46#include <asm/hardware.h> 46#include <mach/prcm.h>
47
48#include <asm/arch/prcm.h>
49 47
50#include "omap_wdt.h" 48#include "omap_wdt.h"
51 49
@@ -54,11 +52,12 @@ module_param(timer_margin, uint, 0);
54MODULE_PARM_DESC(timer_margin, "initial watchdog timeout (in seconds)"); 52MODULE_PARM_DESC(timer_margin, "initial watchdog timeout (in seconds)");
55 53
56static int omap_wdt_users; 54static int omap_wdt_users;
57static struct clk *armwdt_ck = NULL; 55static struct clk *armwdt_ck;
58static struct clk *mpu_wdt_ick = NULL; 56static struct clk *mpu_wdt_ick;
59static struct clk *mpu_wdt_fck = NULL; 57static struct clk *mpu_wdt_fck;
60 58
61static unsigned int wdt_trgr_pattern = 0x1234; 59static unsigned int wdt_trgr_pattern = 0x1234;
60static spinlock_t wdt_lock;
62 61
63static void omap_wdt_ping(void) 62static void omap_wdt_ping(void)
64{ 63{
@@ -174,30 +173,29 @@ static int omap_wdt_release(struct inode *inode, struct file *file)
174 return 0; 173 return 0;
175} 174}
176 175
177static ssize_t 176static ssize_t omap_wdt_write(struct file *file, const char __user *data,
178omap_wdt_write(struct file *file, const char __user *data,
179 size_t len, loff_t *ppos) 177 size_t len, loff_t *ppos)
180{ 178{
181 /* Refresh LOAD_TIME. */ 179 /* Refresh LOAD_TIME. */
182 if (len) 180 if (len) {
181 spin_lock(&wdt_lock);
183 omap_wdt_ping(); 182 omap_wdt_ping();
183 spin_unlock(&wdt_lock);
184 }
184 return len; 185 return len;
185} 186}
186 187
187static int 188static long omap_wdt_ioctl(struct file *file, unsigned int cmd,
188omap_wdt_ioctl(struct inode *inode, struct file *file, 189 unsigned long arg)
189 unsigned int cmd, unsigned long arg)
190{ 190{
191 int new_margin; 191 int new_margin;
192 static struct watchdog_info ident = { 192 static const struct watchdog_info ident = {
193 .identity = "OMAP Watchdog", 193 .identity = "OMAP Watchdog",
194 .options = WDIOF_SETTIMEOUT, 194 .options = WDIOF_SETTIMEOUT,
195 .firmware_version = 0, 195 .firmware_version = 0,
196 }; 196 };
197 197
198 switch (cmd) { 198 switch (cmd) {
199 default:
200 return -ENOTTY;
201 case WDIOC_GETSUPPORT: 199 case WDIOC_GETSUPPORT:
202 return copy_to_user((struct watchdog_info __user *)arg, &ident, 200 return copy_to_user((struct watchdog_info __user *)arg, &ident,
203 sizeof(ident)); 201 sizeof(ident));
@@ -211,28 +209,34 @@ omap_wdt_ioctl(struct inode *inode, struct file *file,
211 return put_user(omap_prcm_get_reset_sources(), 209 return put_user(omap_prcm_get_reset_sources(),
212 (int __user *)arg); 210 (int __user *)arg);
213 case WDIOC_KEEPALIVE: 211 case WDIOC_KEEPALIVE:
212 spin_lock(&wdt_lock);
214 omap_wdt_ping(); 213 omap_wdt_ping();
214 spin_unlock(&wdt_lock);
215 return 0; 215 return 0;
216 case WDIOC_SETTIMEOUT: 216 case WDIOC_SETTIMEOUT:
217 if (get_user(new_margin, (int __user *)arg)) 217 if (get_user(new_margin, (int __user *)arg))
218 return -EFAULT; 218 return -EFAULT;
219 omap_wdt_adjust_timeout(new_margin); 219 omap_wdt_adjust_timeout(new_margin);
220 220
221 spin_lock(&wdt_lock);
221 omap_wdt_disable(); 222 omap_wdt_disable();
222 omap_wdt_set_timeout(); 223 omap_wdt_set_timeout();
223 omap_wdt_enable(); 224 omap_wdt_enable();
224 225
225 omap_wdt_ping(); 226 omap_wdt_ping();
227 spin_unlock(&wdt_lock);
226 /* Fall */ 228 /* Fall */
227 case WDIOC_GETTIMEOUT: 229 case WDIOC_GETTIMEOUT:
228 return put_user(timer_margin, (int __user *)arg); 230 return put_user(timer_margin, (int __user *)arg);
231 default:
232 return -ENOTTY;
229 } 233 }
230} 234}
231 235
232static const struct file_operations omap_wdt_fops = { 236static const struct file_operations omap_wdt_fops = {
233 .owner = THIS_MODULE, 237 .owner = THIS_MODULE,
234 .write = omap_wdt_write, 238 .write = omap_wdt_write,
235 .ioctl = omap_wdt_ioctl, 239 .unlocked_ioctl = omap_wdt_ioctl,
236 .open = omap_wdt_open, 240 .open = omap_wdt_open,
237 .release = omap_wdt_release, 241 .release = omap_wdt_release,
238}; 242};
@@ -240,7 +244,7 @@ static const struct file_operations omap_wdt_fops = {
240static struct miscdevice omap_wdt_miscdev = { 244static struct miscdevice omap_wdt_miscdev = {
241 .minor = WATCHDOG_MINOR, 245 .minor = WATCHDOG_MINOR,
242 .name = "watchdog", 246 .name = "watchdog",
243 .fops = &omap_wdt_fops 247 .fops = &omap_wdt_fops,
244}; 248};
245 249
246static int __init omap_wdt_probe(struct platform_device *pdev) 250static int __init omap_wdt_probe(struct platform_device *pdev)
@@ -373,6 +377,7 @@ static struct platform_driver omap_wdt_driver = {
373 377
374static int __init omap_wdt_init(void) 378static int __init omap_wdt_init(void)
375{ 379{
380 spin_lock_init(&wdt_lock);
376 return platform_driver_register(&omap_wdt_driver); 381 return platform_driver_register(&omap_wdt_driver);
377} 382}
378 383
diff --git a/drivers/watchdog/pc87413_wdt.c b/drivers/watchdog/pc87413_wdt.c
index 15e4f8887a9e..e91ada72da1d 100644
--- a/drivers/watchdog/pc87413_wdt.c
+++ b/drivers/watchdog/pc87413_wdt.c
@@ -31,14 +31,14 @@
31#include <linux/spinlock.h> 31#include <linux/spinlock.h>
32#include <linux/moduleparam.h> 32#include <linux/moduleparam.h>
33#include <linux/version.h> 33#include <linux/version.h>
34#include <linux/io.h>
35#include <linux/uaccess.h>
34 36
35#include <asm/io.h>
36#include <asm/uaccess.h>
37#include <asm/system.h> 37#include <asm/system.h>
38 38
39/* #define DEBUG 1 */ 39/* #define DEBUG 1 */
40 40
41#define DEFAULT_TIMEOUT 1 /* 1 minute */ 41#define DEFAULT_TIMEOUT 1 /* 1 minute */
42#define MAX_TIMEOUT 255 42#define MAX_TIMEOUT 255
43 43
44#define VERSION "1.1" 44#define VERSION "1.1"
@@ -46,22 +46,22 @@
46#define PFX MODNAME ": " 46#define PFX MODNAME ": "
47#define DPFX MODNAME " - DEBUG: " 47#define DPFX MODNAME " - DEBUG: "
48 48
49#define WDT_INDEX_IO_PORT (io+0) /* I/O port base (index register) */ 49#define WDT_INDEX_IO_PORT (io+0) /* I/O port base (index register) */
50#define WDT_DATA_IO_PORT (WDT_INDEX_IO_PORT+1) 50#define WDT_DATA_IO_PORT (WDT_INDEX_IO_PORT+1)
51#define SWC_LDN 0x04 51#define SWC_LDN 0x04
52#define SIOCFG2 0x22 /* Serial IO register */ 52#define SIOCFG2 0x22 /* Serial IO register */
53#define WDCTL 0x10 /* Watchdog-Timer-Controll-Register */ 53#define WDCTL 0x10 /* Watchdog-Timer-Controll-Register */
54#define WDTO 0x11 /* Watchdog timeout register */ 54#define WDTO 0x11 /* Watchdog timeout register */
55#define WDCFG 0x12 /* Watchdog config register */ 55#define WDCFG 0x12 /* Watchdog config register */
56 56
57static int io = 0x2E; /* Address used on Portwell Boards */ 57static int io = 0x2E; /* Address used on Portwell Boards */
58 58
59static int timeout = DEFAULT_TIMEOUT; /* timeout value */ 59static int timeout = DEFAULT_TIMEOUT; /* timeout value */
60static unsigned long timer_enabled = 0; /* is the timer enabled? */ 60static unsigned long timer_enabled; /* is the timer enabled? */
61 61
62static char expect_close; /* is the close expected? */ 62static char expect_close; /* is the close expected? */
63 63
64static DEFINE_SPINLOCK(io_lock);/* to guard the watchdog from io races */ 64static DEFINE_SPINLOCK(io_lock); /* to guard us from io races */
65 65
66static int nowayout = WATCHDOG_NOWAYOUT; 66static int nowayout = WATCHDOG_NOWAYOUT;
67 67
@@ -69,7 +69,7 @@ static int nowayout = WATCHDOG_NOWAYOUT;
69 69
70/* Select pins for Watchdog output */ 70/* Select pins for Watchdog output */
71 71
72static inline void pc87413_select_wdt_out (void) 72static inline void pc87413_select_wdt_out(void)
73{ 73{
74 unsigned int cr_data = 0; 74 unsigned int cr_data = 0;
75 75
@@ -77,7 +77,7 @@ static inline void pc87413_select_wdt_out (void)
77 77
78 outb_p(SIOCFG2, WDT_INDEX_IO_PORT); 78 outb_p(SIOCFG2, WDT_INDEX_IO_PORT);
79 79
80 cr_data = inb (WDT_DATA_IO_PORT); 80 cr_data = inb(WDT_DATA_IO_PORT);
81 81
82 cr_data |= 0x80; /* Set Bit7 to 1*/ 82 cr_data |= 0x80; /* Set Bit7 to 1*/
83 outb_p(SIOCFG2, WDT_INDEX_IO_PORT); 83 outb_p(SIOCFG2, WDT_INDEX_IO_PORT);
@@ -85,8 +85,9 @@ static inline void pc87413_select_wdt_out (void)
85 outb_p(cr_data, WDT_DATA_IO_PORT); 85 outb_p(cr_data, WDT_DATA_IO_PORT);
86 86
87#ifdef DEBUG 87#ifdef DEBUG
88 printk(KERN_INFO DPFX "Select multiple pin,pin55,as WDT output:" 88 printk(KERN_INFO DPFX
89 " Bit7 to 1: %d\n", cr_data); 89 "Select multiple pin,pin55,as WDT output: Bit7 to 1: %d\n",
90 cr_data);
90#endif 91#endif
91} 92}
92 93
@@ -94,18 +95,18 @@ static inline void pc87413_select_wdt_out (void)
94 95
95static inline void pc87413_enable_swc(void) 96static inline void pc87413_enable_swc(void)
96{ 97{
97 unsigned int cr_data=0; 98 unsigned int cr_data = 0;
98 99
99 /* Step 2: Enable SWC functions */ 100 /* Step 2: Enable SWC functions */
100 101
101 outb_p(0x07, WDT_INDEX_IO_PORT); /* Point SWC_LDN (LDN=4) */ 102 outb_p(0x07, WDT_INDEX_IO_PORT); /* Point SWC_LDN (LDN=4) */
102 outb_p(SWC_LDN, WDT_DATA_IO_PORT); 103 outb_p(SWC_LDN, WDT_DATA_IO_PORT);
103 104
104 outb_p(0x30, WDT_INDEX_IO_PORT); /* Read Index 0x30 First */ 105 outb_p(0x30, WDT_INDEX_IO_PORT); /* Read Index 0x30 First */
105 cr_data = inb(WDT_DATA_IO_PORT); 106 cr_data = inb(WDT_DATA_IO_PORT);
106 cr_data |= 0x01; /* Set Bit0 to 1 */ 107 cr_data |= 0x01; /* Set Bit0 to 1 */
107 outb_p(0x30, WDT_INDEX_IO_PORT); 108 outb_p(0x30, WDT_INDEX_IO_PORT);
108 outb_p(cr_data, WDT_DATA_IO_PORT); /* Index0x30_bit0P1 */ 109 outb_p(cr_data, WDT_DATA_IO_PORT); /* Index0x30_bit0P1 */
109 110
110#ifdef DEBUG 111#ifdef DEBUG
111 printk(KERN_INFO DPFX "pc87413 - Enable SWC functions\n"); 112 printk(KERN_INFO DPFX "pc87413 - Enable SWC functions\n");
@@ -121,20 +122,19 @@ static inline unsigned int pc87413_get_swc_base(void)
121 122
122 /* Step 3: Read SWC I/O Base Address */ 123 /* Step 3: Read SWC I/O Base Address */
123 124
124 outb_p(0x60, WDT_INDEX_IO_PORT); /* Read Index 0x60 */ 125 outb_p(0x60, WDT_INDEX_IO_PORT); /* Read Index 0x60 */
125 addr_h = inb(WDT_DATA_IO_PORT); 126 addr_h = inb(WDT_DATA_IO_PORT);
126 127
127 outb_p(0x61, WDT_INDEX_IO_PORT); /* Read Index 0x61 */ 128 outb_p(0x61, WDT_INDEX_IO_PORT); /* Read Index 0x61 */
128 129
129 addr_l = inb(WDT_DATA_IO_PORT); 130 addr_l = inb(WDT_DATA_IO_PORT);
130 131
131 swc_base_addr = (addr_h << 8) + addr_l; 132 swc_base_addr = (addr_h << 8) + addr_l;
132
133#ifdef DEBUG 133#ifdef DEBUG
134 printk(KERN_INFO DPFX "Read SWC I/O Base Address: low %d, high %d," 134 printk(KERN_INFO DPFX
135 " res %d\n", addr_l, addr_h, swc_base_addr); 135 "Read SWC I/O Base Address: low %d, high %d, res %d\n",
136 addr_l, addr_h, swc_base_addr);
136#endif 137#endif
137
138 return swc_base_addr; 138 return swc_base_addr;
139} 139}
140 140
@@ -143,9 +143,7 @@ static inline unsigned int pc87413_get_swc_base(void)
143static inline void pc87413_swc_bank3(unsigned int swc_base_addr) 143static inline void pc87413_swc_bank3(unsigned int swc_base_addr)
144{ 144{
145 /* Step 4: Select Bank3 of SWC */ 145 /* Step 4: Select Bank3 of SWC */
146
147 outb_p(inb(swc_base_addr + 0x0f) | 0x03, swc_base_addr + 0x0f); 146 outb_p(inb(swc_base_addr + 0x0f) | 0x03, swc_base_addr + 0x0f);
148
149#ifdef DEBUG 147#ifdef DEBUG
150 printk(KERN_INFO DPFX "Select Bank3 of SWC\n"); 148 printk(KERN_INFO DPFX "Select Bank3 of SWC\n");
151#endif 149#endif
@@ -157,9 +155,7 @@ static inline void pc87413_programm_wdto(unsigned int swc_base_addr,
157 char pc87413_time) 155 char pc87413_time)
158{ 156{
159 /* Step 5: Programm WDTO, Twd. */ 157 /* Step 5: Programm WDTO, Twd. */
160
161 outb_p(pc87413_time, swc_base_addr + WDTO); 158 outb_p(pc87413_time, swc_base_addr + WDTO);
162
163#ifdef DEBUG 159#ifdef DEBUG
164 printk(KERN_INFO DPFX "Set WDTO to %d minutes\n", pc87413_time); 160 printk(KERN_INFO DPFX "Set WDTO to %d minutes\n", pc87413_time);
165#endif 161#endif
@@ -170,9 +166,7 @@ static inline void pc87413_programm_wdto(unsigned int swc_base_addr,
170static inline void pc87413_enable_wden(unsigned int swc_base_addr) 166static inline void pc87413_enable_wden(unsigned int swc_base_addr)
171{ 167{
172 /* Step 6: Enable WDEN */ 168 /* Step 6: Enable WDEN */
173 169 outb_p(inb(swc_base_addr + WDCTL) | 0x01, swc_base_addr + WDCTL);
174 outb_p(inb (swc_base_addr + WDCTL) | 0x01, swc_base_addr + WDCTL);
175
176#ifdef DEBUG 170#ifdef DEBUG
177 printk(KERN_INFO DPFX "Enable WDEN\n"); 171 printk(KERN_INFO DPFX "Enable WDEN\n");
178#endif 172#endif
@@ -182,9 +176,7 @@ static inline void pc87413_enable_wden(unsigned int swc_base_addr)
182static inline void pc87413_enable_sw_wd_tren(unsigned int swc_base_addr) 176static inline void pc87413_enable_sw_wd_tren(unsigned int swc_base_addr)
183{ 177{
184 /* Enable SW_WD_TREN */ 178 /* Enable SW_WD_TREN */
185 179 outb_p(inb(swc_base_addr + WDCFG) | 0x80, swc_base_addr + WDCFG);
186 outb_p(inb (swc_base_addr + WDCFG) | 0x80, swc_base_addr + WDCFG);
187
188#ifdef DEBUG 180#ifdef DEBUG
189 printk(KERN_INFO DPFX "Enable SW_WD_TREN\n"); 181 printk(KERN_INFO DPFX "Enable SW_WD_TREN\n");
190#endif 182#endif
@@ -195,9 +187,7 @@ static inline void pc87413_enable_sw_wd_tren(unsigned int swc_base_addr)
195static inline void pc87413_disable_sw_wd_tren(unsigned int swc_base_addr) 187static inline void pc87413_disable_sw_wd_tren(unsigned int swc_base_addr)
196{ 188{
197 /* Disable SW_WD_TREN */ 189 /* Disable SW_WD_TREN */
198 190 outb_p(inb(swc_base_addr + WDCFG) & 0x7f, swc_base_addr + WDCFG);
199 outb_p(inb (swc_base_addr + WDCFG) & 0x7f, swc_base_addr + WDCFG);
200
201#ifdef DEBUG 191#ifdef DEBUG
202 printk(KERN_INFO DPFX "pc87413 - Disable SW_WD_TREN\n"); 192 printk(KERN_INFO DPFX "pc87413 - Disable SW_WD_TREN\n");
203#endif 193#endif
@@ -208,9 +198,7 @@ static inline void pc87413_disable_sw_wd_tren(unsigned int swc_base_addr)
208static inline void pc87413_enable_sw_wd_trg(unsigned int swc_base_addr) 198static inline void pc87413_enable_sw_wd_trg(unsigned int swc_base_addr)
209{ 199{
210 /* Enable SW_WD_TRG */ 200 /* Enable SW_WD_TRG */
211 201 outb_p(inb(swc_base_addr + WDCTL) | 0x80, swc_base_addr + WDCTL);
212 outb_p(inb (swc_base_addr + WDCTL) | 0x80, swc_base_addr + WDCTL);
213
214#ifdef DEBUG 202#ifdef DEBUG
215 printk(KERN_INFO DPFX "pc87413 - Enable SW_WD_TRG\n"); 203 printk(KERN_INFO DPFX "pc87413 - Enable SW_WD_TRG\n");
216#endif 204#endif
@@ -221,9 +209,7 @@ static inline void pc87413_enable_sw_wd_trg(unsigned int swc_base_addr)
221static inline void pc87413_disable_sw_wd_trg(unsigned int swc_base_addr) 209static inline void pc87413_disable_sw_wd_trg(unsigned int swc_base_addr)
222{ 210{
223 /* Disable SW_WD_TRG */ 211 /* Disable SW_WD_TRG */
224 212 outb_p(inb(swc_base_addr + WDCTL) & 0x7f, swc_base_addr + WDCTL);
225 outb_p(inb (swc_base_addr + WDCTL) & 0x7f, swc_base_addr + WDCTL);
226
227#ifdef DEBUG 213#ifdef DEBUG
228 printk(KERN_INFO DPFX "Disable SW_WD_TRG\n"); 214 printk(KERN_INFO DPFX "Disable SW_WD_TRG\n");
229#endif 215#endif
@@ -314,8 +300,8 @@ static int pc87413_open(struct inode *inode, struct file *file)
314 /* Reload and activate timer */ 300 /* Reload and activate timer */
315 pc87413_refresh(); 301 pc87413_refresh();
316 302
317 printk(KERN_INFO MODNAME "Watchdog enabled. Timeout set to" 303 printk(KERN_INFO MODNAME
318 " %d minute(s).\n", timeout); 304 "Watchdog enabled. Timeout set to %d minute(s).\n", timeout);
319 305
320 return nonseekable_open(inode, file); 306 return nonseekable_open(inode, file);
321} 307}
@@ -338,17 +324,15 @@ static int pc87413_release(struct inode *inode, struct file *file)
338 324
339 if (expect_close == 42) { 325 if (expect_close == 42) {
340 pc87413_disable(); 326 pc87413_disable();
341 printk(KERN_INFO MODNAME "Watchdog disabled," 327 printk(KERN_INFO MODNAME
342 " sleeping again...\n"); 328 "Watchdog disabled, sleeping again...\n");
343 } else { 329 } else {
344 printk(KERN_CRIT MODNAME "Unexpected close, not stopping" 330 printk(KERN_CRIT MODNAME
345 " watchdog!\n"); 331 "Unexpected close, not stopping watchdog!\n");
346 pc87413_refresh(); 332 pc87413_refresh();
347 } 333 }
348
349 clear_bit(0, &timer_enabled); 334 clear_bit(0, &timer_enabled);
350 expect_close = 0; 335 expect_close = 0;
351
352 return 0; 336 return 0;
353} 337}
354 338
@@ -386,10 +370,11 @@ static ssize_t pc87413_write(struct file *file, const char __user *data,
386 /* reset expect flag */ 370 /* reset expect flag */
387 expect_close = 0; 371 expect_close = 0;
388 372
389 /* scan to see whether or not we got the magic character */ 373 /* scan to see whether or not we got the
374 magic character */
390 for (i = 0; i != len; i++) { 375 for (i = 0; i != len; i++) {
391 char c; 376 char c;
392 if (get_user(c, data+i)) 377 if (get_user(c, data + i))
393 return -EFAULT; 378 return -EFAULT;
394 if (c == 'V') 379 if (c == 'V')
395 expect_close = 42; 380 expect_close = 42;
@@ -404,7 +389,6 @@ static ssize_t pc87413_write(struct file *file, const char __user *data,
404 389
405/** 390/**
406 * pc87413_ioctl: 391 * pc87413_ioctl:
407 * @inode: inode of the device
408 * @file: file handle to the device 392 * @file: file handle to the device
409 * @cmd: watchdog command 393 * @cmd: watchdog command
410 * @arg: argument pointer 394 * @arg: argument pointer
@@ -414,8 +398,8 @@ static ssize_t pc87413_write(struct file *file, const char __user *data,
414 * querying capabilities and current status. 398 * querying capabilities and current status.
415 */ 399 */
416 400
417static int pc87413_ioctl(struct inode *inode, struct file *file, 401static long pc87413_ioctl(struct file *file, unsigned int cmd,
418 unsigned int cmd, unsigned long arg) 402 unsigned long arg)
419{ 403{
420 int new_timeout; 404 int new_timeout;
421 405
@@ -426,75 +410,58 @@ static int pc87413_ioctl(struct inode *inode, struct file *file,
426 410
427 static struct watchdog_info ident = { 411 static struct watchdog_info ident = {
428 .options = WDIOF_KEEPALIVEPING | 412 .options = WDIOF_KEEPALIVEPING |
429 WDIOF_SETTIMEOUT | 413 WDIOF_SETTIMEOUT |
430 WDIOF_MAGICCLOSE, 414 WDIOF_MAGICCLOSE,
431 .firmware_version = 1, 415 .firmware_version = 1,
432 .identity = "PC87413(HF/F) watchdog" 416 .identity = "PC87413(HF/F) watchdog",
433 }; 417 };
434 418
435 uarg.i = (int __user *)arg; 419 uarg.i = (int __user *)arg;
436 420
437 switch(cmd) { 421 switch (cmd) {
438 default: 422 case WDIOC_GETSUPPORT:
439 return -ENOTTY; 423 return copy_to_user(uarg.ident, &ident,
440 424 sizeof(ident)) ? -EFAULT : 0;
441 case WDIOC_GETSUPPORT: 425 case WDIOC_GETSTATUS:
442 return copy_to_user(uarg.ident, &ident, 426 return put_user(pc87413_status(), uarg.i);
443 sizeof(ident)) ? -EFAULT : 0; 427 case WDIOC_GETBOOTSTATUS:
444 428 return put_user(0, uarg.i);
445 case WDIOC_GETSTATUS: 429 case WDIOC_SETOPTIONS:
446 return put_user(pc87413_status(), uarg.i); 430 {
447 431 int options, retval = -EINVAL;
448 case WDIOC_GETBOOTSTATUS: 432 if (get_user(options, uarg.i))
449 return put_user(0, uarg.i); 433 return -EFAULT;
450 434 if (options & WDIOS_DISABLECARD) {
451 case WDIOC_KEEPALIVE: 435 pc87413_disable();
452 pc87413_refresh(); 436 retval = 0;
437 }
438 if (options & WDIOS_ENABLECARD) {
439 pc87413_enable();
440 retval = 0;
441 }
442 return retval;
443 }
444 case WDIOC_KEEPALIVE:
445 pc87413_refresh();
453#ifdef DEBUG 446#ifdef DEBUG
454 printk(KERN_INFO DPFX "keepalive\n"); 447 printk(KERN_INFO DPFX "keepalive\n");
455#endif 448#endif
456 return 0; 449 return 0;
457 450 case WDIOC_SETTIMEOUT:
458 case WDIOC_SETTIMEOUT: 451 if (get_user(new_timeout, uarg.i))
459 if (get_user(new_timeout, uarg.i)) 452 return -EFAULT;
460 return -EFAULT; 453 /* the API states this is given in secs */
461 454 new_timeout /= 60;
462 // the API states this is given in secs 455 if (new_timeout < 0 || new_timeout > MAX_TIMEOUT)
463 new_timeout /= 60; 456 return -EINVAL;
464 457 timeout = new_timeout;
465 if (new_timeout < 0 || new_timeout > MAX_TIMEOUT) 458 pc87413_refresh();
466 return -EINVAL; 459 /* fall through and return the new timeout... */
467 460 case WDIOC_GETTIMEOUT:
468 timeout = new_timeout; 461 new_timeout = timeout * 60;
469 pc87413_refresh(); 462 return put_user(new_timeout, uarg.i);
470 463 default:
471 // fall through and return the new timeout... 464 return -ENOTTY;
472
473 case WDIOC_GETTIMEOUT:
474
475 new_timeout = timeout * 60;
476
477 return put_user(new_timeout, uarg.i);
478
479 case WDIOC_SETOPTIONS:
480 {
481 int options, retval = -EINVAL;
482
483 if (get_user(options, uarg.i))
484 return -EFAULT;
485
486 if (options & WDIOS_DISABLECARD) {
487 pc87413_disable();
488 retval = 0;
489 }
490
491 if (options & WDIOS_ENABLECARD) {
492 pc87413_enable();
493 retval = 0;
494 }
495
496 return retval;
497 }
498 } 465 }
499} 466}
500 467
@@ -517,10 +484,8 @@ static int pc87413_notify_sys(struct notifier_block *this,
517 void *unused) 484 void *unused)
518{ 485{
519 if (code == SYS_DOWN || code == SYS_HALT) 486 if (code == SYS_DOWN || code == SYS_HALT)
520 {
521 /* Turn the card off */ 487 /* Turn the card off */
522 pc87413_disable(); 488 pc87413_disable();
523 }
524 return NOTIFY_DONE; 489 return NOTIFY_DONE;
525} 490}
526 491
@@ -530,21 +495,19 @@ static const struct file_operations pc87413_fops = {
530 .owner = THIS_MODULE, 495 .owner = THIS_MODULE,
531 .llseek = no_llseek, 496 .llseek = no_llseek,
532 .write = pc87413_write, 497 .write = pc87413_write,
533 .ioctl = pc87413_ioctl, 498 .unlocked_ioctl = pc87413_ioctl,
534 .open = pc87413_open, 499 .open = pc87413_open,
535 .release = pc87413_release, 500 .release = pc87413_release,
536}; 501};
537 502
538static struct notifier_block pc87413_notifier = 503static struct notifier_block pc87413_notifier = {
539{
540 .notifier_call = pc87413_notify_sys, 504 .notifier_call = pc87413_notify_sys,
541}; 505};
542 506
543static struct miscdevice pc87413_miscdev= 507static struct miscdevice pc87413_miscdev = {
544{
545 .minor = WATCHDOG_MINOR, 508 .minor = WATCHDOG_MINOR,
546 .name = "watchdog", 509 .name = "watchdog",
547 .fops = &pc87413_fops 510 .fops = &pc87413_fops,
548}; 511};
549 512
550/* -- Module init functions -------------------------------------*/ 513/* -- Module init functions -------------------------------------*/
@@ -561,29 +524,26 @@ static int __init pc87413_init(void)
561{ 524{
562 int ret; 525 int ret;
563 526
564 printk(KERN_INFO PFX "Version " VERSION " at io 0x%X\n", WDT_INDEX_IO_PORT); 527 printk(KERN_INFO PFX "Version " VERSION " at io 0x%X\n",
528 WDT_INDEX_IO_PORT);
565 529
566 /* request_region(io, 2, "pc87413"); */ 530 /* request_region(io, 2, "pc87413"); */
567 531
568 ret = register_reboot_notifier(&pc87413_notifier); 532 ret = register_reboot_notifier(&pc87413_notifier);
569 if (ret != 0) { 533 if (ret != 0) {
570 printk(KERN_ERR PFX "cannot register reboot notifier (err=%d)\n", 534 printk(KERN_ERR PFX
571 ret); 535 "cannot register reboot notifier (err=%d)\n", ret);
572 } 536 }
573 537
574 ret = misc_register(&pc87413_miscdev); 538 ret = misc_register(&pc87413_miscdev);
575
576 if (ret != 0) { 539 if (ret != 0) {
577 printk(KERN_ERR PFX "cannot register miscdev on minor=%d (err=%d)\n", 540 printk(KERN_ERR PFX "cannot register miscdev on minor=%d (err=%d)\n",
578 WATCHDOG_MINOR, ret); 541 WATCHDOG_MINOR, ret);
579 unregister_reboot_notifier(&pc87413_notifier); 542 unregister_reboot_notifier(&pc87413_notifier);
580 return ret; 543 return ret;
581 } 544 }
582
583 printk(KERN_INFO PFX "initialized. timeout=%d min \n", timeout); 545 printk(KERN_INFO PFX "initialized. timeout=%d min \n", timeout);
584
585 pc87413_enable(); 546 pc87413_enable();
586
587 return 0; 547 return 0;
588} 548}
589 549
@@ -600,17 +560,16 @@ static int __init pc87413_init(void)
600static void __exit pc87413_exit(void) 560static void __exit pc87413_exit(void)
601{ 561{
602 /* Stop the timer before we leave */ 562 /* Stop the timer before we leave */
603 if (!nowayout) 563 if (!nowayout) {
604 {
605 pc87413_disable(); 564 pc87413_disable();
606 printk(KERN_INFO MODNAME "Watchdog disabled.\n"); 565 printk(KERN_INFO MODNAME "Watchdog disabled.\n");
607 } 566 }
608 567
609 misc_deregister(&pc87413_miscdev); 568 misc_deregister(&pc87413_miscdev);
610 unregister_reboot_notifier(&pc87413_notifier); 569 unregister_reboot_notifier(&pc87413_notifier);
611 /* release_region(io,2); */ 570 /* release_region(io, 2); */
612 571
613 printk(MODNAME " watchdog component driver removed.\n"); 572 printk(KERN_INFO MODNAME " watchdog component driver removed.\n");
614} 573}
615 574
616module_init(pc87413_init); 575module_init(pc87413_init);
@@ -626,8 +585,12 @@ module_param(io, int, 0);
626MODULE_PARM_DESC(io, MODNAME " I/O port (default: " __MODULE_STRING(io) ")."); 585MODULE_PARM_DESC(io, MODNAME " I/O port (default: " __MODULE_STRING(io) ").");
627 586
628module_param(timeout, int, 0); 587module_param(timeout, int, 0);
629MODULE_PARM_DESC(timeout, "Watchdog timeout in minutes (default=" __MODULE_STRING(timeout) ")."); 588MODULE_PARM_DESC(timeout,
589 "Watchdog timeout in minutes (default="
590 __MODULE_STRING(timeout) ").");
630 591
631module_param(nowayout, int, 0); 592module_param(nowayout, int, 0);
632MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 593MODULE_PARM_DESC(nowayout,
594 "Watchdog cannot be stopped once started (default="
595 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
633 596
diff --git a/drivers/watchdog/pcwd.c b/drivers/watchdog/pcwd.c
index 7b41434fac8c..3b0ddc7fcf3f 100644
--- a/drivers/watchdog/pcwd.c
+++ b/drivers/watchdog/pcwd.c
@@ -40,13 +40,15 @@
40 * fairly useless proc entry. 40 * fairly useless proc entry.
41 * 990610 removed said useless proc code for the merge <alan> 41 * 990610 removed said useless proc code for the merge <alan>
42 * 000403 Removed last traces of proc code. <davej> 42 * 000403 Removed last traces of proc code. <davej>
43 * 011214 Added nowayout module option to override CONFIG_WATCHDOG_NOWAYOUT <Matt_Domsch@dell.com> 43 * 011214 Added nowayout module option to override
44 * CONFIG_WATCHDOG_NOWAYOUT <Matt_Domsch@dell.com>
44 * Added timeout module option to override default 45 * Added timeout module option to override default
45 */ 46 */
46 47
47/* 48/*
48 * A bells and whistles driver is available from http://www.pcwd.de/ 49 * A bells and whistles driver is available from http://www.pcwd.de/
49 * More info available at http://www.berkprod.com/ or http://www.pcwatchdog.com/ 50 * More info available at http://www.berkprod.com/ or
51 * http://www.pcwatchdog.com/
50 */ 52 */
51 53
52#include <linux/module.h> /* For module specific items */ 54#include <linux/module.h> /* For module specific items */
@@ -65,9 +67,8 @@
65#include <linux/isa.h> /* For isa devices */ 67#include <linux/isa.h> /* For isa devices */
66#include <linux/ioport.h> /* For io-port access */ 68#include <linux/ioport.h> /* For io-port access */
67#include <linux/spinlock.h> /* For spin_lock/spin_unlock/... */ 69#include <linux/spinlock.h> /* For spin_lock/spin_unlock/... */
68 70#include <linux/uaccess.h> /* For copy_to_user/put_user/... */
69#include <asm/uaccess.h> /* For copy_to_user/put_user/... */ 71#include <linux/io.h> /* For inb/outb/... */
70#include <asm/io.h> /* For inb/outb/... */
71 72
72/* Module and version information */ 73/* Module and version information */
73#define WATCHDOG_VERSION "1.20" 74#define WATCHDOG_VERSION "1.20"
@@ -111,14 +112,16 @@ static int pcwd_ioports[] = { 0x270, 0x350, 0x370, 0x000 };
111#define WD_REVC_WTRP 0x01 /* Watchdog Trip status */ 112#define WD_REVC_WTRP 0x01 /* Watchdog Trip status */
112#define WD_REVC_HRBT 0x02 /* Watchdog Heartbeat */ 113#define WD_REVC_HRBT 0x02 /* Watchdog Heartbeat */
113#define WD_REVC_TTRP 0x04 /* Temperature Trip status */ 114#define WD_REVC_TTRP 0x04 /* Temperature Trip status */
114#define WD_REVC_RL2A 0x08 /* Relay 2 activated by on-board processor */ 115#define WD_REVC_RL2A 0x08 /* Relay 2 activated by
116 on-board processor */
115#define WD_REVC_RL1A 0x10 /* Relay 1 active */ 117#define WD_REVC_RL1A 0x10 /* Relay 1 active */
116#define WD_REVC_R2DS 0x40 /* Relay 2 disable */ 118#define WD_REVC_R2DS 0x40 /* Relay 2 disable */
117#define WD_REVC_RLY2 0x80 /* Relay 2 activated? */ 119#define WD_REVC_RLY2 0x80 /* Relay 2 activated? */
118/* Port 2 : Control Status #2 */ 120/* Port 2 : Control Status #2 */
119#define WD_WDIS 0x10 /* Watchdog Disabled */ 121#define WD_WDIS 0x10 /* Watchdog Disabled */
120#define WD_ENTP 0x20 /* Watchdog Enable Temperature Trip */ 122#define WD_ENTP 0x20 /* Watchdog Enable Temperature Trip */
121#define WD_SSEL 0x40 /* Watchdog Switch Select (1:SW1 <-> 0:SW2) */ 123#define WD_SSEL 0x40 /* Watchdog Switch Select
124 (1:SW1 <-> 0:SW2) */
122#define WD_WCMD 0x80 /* Watchdog Command Mode */ 125#define WD_WCMD 0x80 /* Watchdog Command Mode */
123 126
124/* max. time we give an ISA watchdog card to process a command */ 127/* max. time we give an ISA watchdog card to process a command */
@@ -142,7 +145,7 @@ static int pcwd_ioports[] = { 0x270, 0x350, 0x370, 0x000 };
142#define CMD_ISA_RESET_RELAYS 0x0D 145#define CMD_ISA_RESET_RELAYS 0x0D
143 146
144/* Watchdog's Dip Switch heartbeat values */ 147/* Watchdog's Dip Switch heartbeat values */
145static const int heartbeat_tbl [] = { 148static const int heartbeat_tbl[] = {
146 20, /* OFF-OFF-OFF = 20 Sec */ 149 20, /* OFF-OFF-OFF = 20 Sec */
147 40, /* OFF-OFF-ON = 40 Sec */ 150 40, /* OFF-OFF-ON = 40 Sec */
148 60, /* OFF-ON-OFF = 1 Min */ 151 60, /* OFF-ON-OFF = 1 Min */
@@ -168,11 +171,15 @@ static int cards_found;
168static atomic_t open_allowed = ATOMIC_INIT(1); 171static atomic_t open_allowed = ATOMIC_INIT(1);
169static char expect_close; 172static char expect_close;
170static int temp_panic; 173static int temp_panic;
171static struct { /* this is private data for each ISA-PC watchdog card */ 174
175/* this is private data for each ISA-PC watchdog card */
176static struct {
172 char fw_ver_str[6]; /* The cards firmware version */ 177 char fw_ver_str[6]; /* The cards firmware version */
173 int revision; /* The card's revision */ 178 int revision; /* The card's revision */
174 int supports_temp; /* Wether or not the card has a temperature device */ 179 int supports_temp; /* Whether or not the card has
175 int command_mode; /* Wether or not the card is in command mode */ 180 a temperature device */
181 int command_mode; /* Whether or not the card is in
182 command mode */
176 int boot_status; /* The card's boot status */ 183 int boot_status; /* The card's boot status */
177 int io_addr; /* The cards I/O address */ 184 int io_addr; /* The cards I/O address */
178 spinlock_t io_lock; /* the lock for io operations */ 185 spinlock_t io_lock; /* the lock for io operations */
@@ -186,16 +193,20 @@ static struct { /* this is private data for each ISA-PC watchdog card */
186#define DEBUG 2 /* print fancy stuff too */ 193#define DEBUG 2 /* print fancy stuff too */
187static int debug = QUIET; 194static int debug = QUIET;
188module_param(debug, int, 0); 195module_param(debug, int, 0);
189MODULE_PARM_DESC(debug, "Debug level: 0=Quiet, 1=Verbose, 2=Debug (default=0)"); 196MODULE_PARM_DESC(debug,
197 "Debug level: 0=Quiet, 1=Verbose, 2=Debug (default=0)");
190 198
191#define WATCHDOG_HEARTBEAT 0 /* default heartbeat = delay-time from dip-switches */ 199/* default heartbeat = delay-time from dip-switches */
200#define WATCHDOG_HEARTBEAT 0
192static int heartbeat = WATCHDOG_HEARTBEAT; 201static int heartbeat = WATCHDOG_HEARTBEAT;
193module_param(heartbeat, int, 0); 202module_param(heartbeat, int, 0);
194MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. (2<=heartbeat<=7200 or 0=delay-time from dip-switches, default=" __MODULE_STRING(WATCHDOG_HEARTBEAT) ")"); 203MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. (2 <= heartbeat <= 7200 or 0=delay-time from dip-switches, default=" __MODULE_STRING(WATCHDOG_HEARTBEAT) ")");
195 204
196static int nowayout = WATCHDOG_NOWAYOUT; 205static int nowayout = WATCHDOG_NOWAYOUT;
197module_param(nowayout, int, 0); 206module_param(nowayout, int, 0);
198MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 207MODULE_PARM_DESC(nowayout,
208 "Watchdog cannot be stopped once started (default="
209 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
199 210
200/* 211/*
201 * Internal functions 212 * Internal functions
@@ -224,7 +235,7 @@ static int send_isa_command(int cmd)
224 if (port0 == last_port0) 235 if (port0 == last_port0)
225 break; /* Data is stable */ 236 break; /* Data is stable */
226 237
227 udelay (250); 238 udelay(250);
228 } 239 }
229 240
230 if (debug >= DEBUG) 241 if (debug >= DEBUG)
@@ -236,7 +247,7 @@ static int send_isa_command(int cmd)
236 247
237static int set_command_mode(void) 248static int set_command_mode(void)
238{ 249{
239 int i, found=0, count=0; 250 int i, found = 0, count = 0;
240 251
241 /* Set the card into command mode */ 252 /* Set the card into command mode */
242 spin_lock(&pcwd_private.io_lock); 253 spin_lock(&pcwd_private.io_lock);
@@ -261,7 +272,7 @@ static int set_command_mode(void)
261 printk(KERN_DEBUG PFX "command_mode=%d\n", 272 printk(KERN_DEBUG PFX "command_mode=%d\n",
262 pcwd_private.command_mode); 273 pcwd_private.command_mode);
263 274
264 return(found); 275 return found;
265} 276}
266 277
267static void unset_command_mode(void) 278static void unset_command_mode(void)
@@ -296,7 +307,8 @@ static inline void pcwd_get_firmware(void)
296 ten = send_isa_command(CMD_ISA_VERSION_TENTH); 307 ten = send_isa_command(CMD_ISA_VERSION_TENTH);
297 hund = send_isa_command(CMD_ISA_VERSION_HUNDRETH); 308 hund = send_isa_command(CMD_ISA_VERSION_HUNDRETH);
298 minor = send_isa_command(CMD_ISA_VERSION_MINOR); 309 minor = send_isa_command(CMD_ISA_VERSION_MINOR);
299 sprintf(pcwd_private.fw_ver_str, "%c.%c%c%c", one, ten, hund, minor); 310 sprintf(pcwd_private.fw_ver_str, "%c.%c%c%c",
311 one, ten, hund, minor);
300 } 312 }
301 unset_command_mode(); 313 unset_command_mode();
302 314
@@ -305,7 +317,7 @@ static inline void pcwd_get_firmware(void)
305 317
306static inline int pcwd_get_option_switches(void) 318static inline int pcwd_get_option_switches(void)
307{ 319{
308 int option_switches=0; 320 int option_switches = 0;
309 321
310 if (set_command_mode()) { 322 if (set_command_mode()) {
311 /* Get switch settings */ 323 /* Get switch settings */
@@ -313,7 +325,7 @@ static inline int pcwd_get_option_switches(void)
313 } 325 }
314 326
315 unset_command_mode(); 327 unset_command_mode();
316 return(option_switches); 328 return option_switches;
317} 329}
318 330
319static void pcwd_show_card_info(void) 331static void pcwd_show_card_info(void)
@@ -322,7 +334,9 @@ static void pcwd_show_card_info(void)
322 334
323 /* Get some extra info from the hardware (in command/debug/diag mode) */ 335 /* Get some extra info from the hardware (in command/debug/diag mode) */
324 if (pcwd_private.revision == PCWD_REVISION_A) 336 if (pcwd_private.revision == PCWD_REVISION_A)
325 printk(KERN_INFO PFX "ISA-PC Watchdog (REV.A) detected at port 0x%04x\n", pcwd_private.io_addr); 337 printk(KERN_INFO PFX
338 "ISA-PC Watchdog (REV.A) detected at port 0x%04x\n",
339 pcwd_private.io_addr);
326 else if (pcwd_private.revision == PCWD_REVISION_C) { 340 else if (pcwd_private.revision == PCWD_REVISION_C) {
327 pcwd_get_firmware(); 341 pcwd_get_firmware();
328 printk(KERN_INFO PFX "ISA-PC Watchdog (REV.C) detected at port 0x%04x (Firmware version: %s)\n", 342 printk(KERN_INFO PFX "ISA-PC Watchdog (REV.C) detected at port 0x%04x (Firmware version: %s)\n",
@@ -347,12 +361,15 @@ static void pcwd_show_card_info(void)
347 printk(KERN_INFO PFX "Previous reboot was caused by the card\n"); 361 printk(KERN_INFO PFX "Previous reboot was caused by the card\n");
348 362
349 if (pcwd_private.boot_status & WDIOF_OVERHEAT) { 363 if (pcwd_private.boot_status & WDIOF_OVERHEAT) {
350 printk(KERN_EMERG PFX "Card senses a CPU Overheat. Panicking!\n"); 364 printk(KERN_EMERG PFX
351 printk(KERN_EMERG PFX "CPU Overheat\n"); 365 "Card senses a CPU Overheat. Panicking!\n");
366 printk(KERN_EMERG PFX
367 "CPU Overheat\n");
352 } 368 }
353 369
354 if (pcwd_private.boot_status == 0) 370 if (pcwd_private.boot_status == 0)
355 printk(KERN_INFO PFX "No previous trip detected - Cold boot or reset\n"); 371 printk(KERN_INFO PFX
372 "No previous trip detected - Cold boot or reset\n");
356} 373}
357 374
358static void pcwd_timer_ping(unsigned long data) 375static void pcwd_timer_ping(unsigned long data)
@@ -361,11 +378,12 @@ static void pcwd_timer_ping(unsigned long data)
361 378
362 /* If we got a heartbeat pulse within the WDT_INTERVAL 379 /* If we got a heartbeat pulse within the WDT_INTERVAL
363 * we agree to ping the WDT */ 380 * we agree to ping the WDT */
364 if(time_before(jiffies, pcwd_private.next_heartbeat)) { 381 if (time_before(jiffies, pcwd_private.next_heartbeat)) {
365 /* Ping the watchdog */ 382 /* Ping the watchdog */
366 spin_lock(&pcwd_private.io_lock); 383 spin_lock(&pcwd_private.io_lock);
367 if (pcwd_private.revision == PCWD_REVISION_A) { 384 if (pcwd_private.revision == PCWD_REVISION_A) {
368 /* Rev A cards are reset by setting the WD_WDRST bit in register 1 */ 385 /* Rev A cards are reset by setting the
386 WD_WDRST bit in register 1 */
369 wdrst_stat = inb_p(pcwd_private.io_addr); 387 wdrst_stat = inb_p(pcwd_private.io_addr);
370 wdrst_stat &= 0x0F; 388 wdrst_stat &= 0x0F;
371 wdrst_stat |= WD_WDRST; 389 wdrst_stat |= WD_WDRST;
@@ -381,7 +399,8 @@ static void pcwd_timer_ping(unsigned long data)
381 399
382 spin_unlock(&pcwd_private.io_lock); 400 spin_unlock(&pcwd_private.io_lock);
383 } else { 401 } else {
384 printk(KERN_WARNING PFX "Heartbeat lost! Will not ping the watchdog\n"); 402 printk(KERN_WARNING PFX
403 "Heartbeat lost! Will not ping the watchdog\n");
385 } 404 }
386} 405}
387 406
@@ -454,7 +473,7 @@ static int pcwd_keepalive(void)
454 473
455static int pcwd_set_heartbeat(int t) 474static int pcwd_set_heartbeat(int t)
456{ 475{
457 if ((t < 2) || (t > 7200)) /* arbitrary upper limit */ 476 if (t < 2 || t > 7200) /* arbitrary upper limit */
458 return -EINVAL; 477 return -EINVAL;
459 478
460 heartbeat = t; 479 heartbeat = t;
@@ -470,7 +489,7 @@ static int pcwd_get_status(int *status)
470{ 489{
471 int control_status; 490 int control_status;
472 491
473 *status=0; 492 *status = 0;
474 spin_lock(&pcwd_private.io_lock); 493 spin_lock(&pcwd_private.io_lock);
475 if (pcwd_private.revision == PCWD_REVISION_A) 494 if (pcwd_private.revision == PCWD_REVISION_A)
476 /* Rev A cards return status information from 495 /* Rev A cards return status information from
@@ -494,9 +513,9 @@ static int pcwd_get_status(int *status)
494 if (control_status & WD_T110) { 513 if (control_status & WD_T110) {
495 *status |= WDIOF_OVERHEAT; 514 *status |= WDIOF_OVERHEAT;
496 if (temp_panic) { 515 if (temp_panic) {
497 printk(KERN_INFO PFX "Temperature overheat trip!\n"); 516 printk(KERN_INFO PFX
517 "Temperature overheat trip!\n");
498 kernel_power_off(); 518 kernel_power_off();
499 /* or should we just do a: panic(PFX "Temperature overheat trip!\n"); */
500 } 519 }
501 } 520 }
502 } else { 521 } else {
@@ -506,9 +525,9 @@ static int pcwd_get_status(int *status)
506 if (control_status & WD_REVC_TTRP) { 525 if (control_status & WD_REVC_TTRP) {
507 *status |= WDIOF_OVERHEAT; 526 *status |= WDIOF_OVERHEAT;
508 if (temp_panic) { 527 if (temp_panic) {
509 printk(KERN_INFO PFX "Temperature overheat trip!\n"); 528 printk(KERN_INFO PFX
529 "Temperature overheat trip!\n");
510 kernel_power_off(); 530 kernel_power_off();
511 /* or should we just do a: panic(PFX "Temperature overheat trip!\n"); */
512 } 531 }
513 } 532 }
514 } 533 }
@@ -524,18 +543,21 @@ static int pcwd_clear_status(void)
524 spin_lock(&pcwd_private.io_lock); 543 spin_lock(&pcwd_private.io_lock);
525 544
526 if (debug >= VERBOSE) 545 if (debug >= VERBOSE)
527 printk(KERN_INFO PFX "clearing watchdog trip status\n"); 546 printk(KERN_INFO PFX
547 "clearing watchdog trip status\n");
528 548
529 control_status = inb_p(pcwd_private.io_addr + 1); 549 control_status = inb_p(pcwd_private.io_addr + 1);
530 550
531 if (debug >= DEBUG) { 551 if (debug >= DEBUG) {
532 printk(KERN_DEBUG PFX "status was: 0x%02x\n", control_status); 552 printk(KERN_DEBUG PFX "status was: 0x%02x\n",
553 control_status);
533 printk(KERN_DEBUG PFX "sending: 0x%02x\n", 554 printk(KERN_DEBUG PFX "sending: 0x%02x\n",
534 (control_status & WD_REVC_R2DS)); 555 (control_status & WD_REVC_R2DS));
535 } 556 }
536 557
537 /* clear reset status & Keep Relay 2 disable state as it is */ 558 /* clear reset status & Keep Relay 2 disable state as it is */
538 outb_p((control_status & WD_REVC_R2DS), pcwd_private.io_addr + 1); 559 outb_p((control_status & WD_REVC_R2DS),
560 pcwd_private.io_addr + 1);
539 561
540 spin_unlock(&pcwd_private.io_lock); 562 spin_unlock(&pcwd_private.io_lock);
541 } 563 }
@@ -572,8 +594,7 @@ static int pcwd_get_temperature(int *temperature)
572 * /dev/watchdog handling 594 * /dev/watchdog handling
573 */ 595 */
574 596
575static int pcwd_ioctl(struct inode *inode, struct file *file, 597static long pcwd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
576 unsigned int cmd, unsigned long arg)
577{ 598{
578 int rv; 599 int rv;
579 int status; 600 int status;
@@ -590,12 +611,9 @@ static int pcwd_ioctl(struct inode *inode, struct file *file,
590 .identity = "PCWD", 611 .identity = "PCWD",
591 }; 612 };
592 613
593 switch(cmd) { 614 switch (cmd) {
594 default:
595 return -ENOTTY;
596
597 case WDIOC_GETSUPPORT: 615 case WDIOC_GETSUPPORT:
598 if(copy_to_user(argp, &ident, sizeof(ident))) 616 if (copy_to_user(argp, &ident, sizeof(ident)))
599 return -EFAULT; 617 return -EFAULT;
600 return 0; 618 return 0;
601 619
@@ -613,25 +631,22 @@ static int pcwd_ioctl(struct inode *inode, struct file *file,
613 return put_user(temperature, argp); 631 return put_user(temperature, argp);
614 632
615 case WDIOC_SETOPTIONS: 633 case WDIOC_SETOPTIONS:
616 if (pcwd_private.revision == PCWD_REVISION_C) 634 if (pcwd_private.revision == PCWD_REVISION_C) {
617 { 635 if (get_user(rv, argp))
618 if(copy_from_user(&rv, argp, sizeof(int)))
619 return -EFAULT; 636 return -EFAULT;
620 637
621 if (rv & WDIOS_DISABLECARD) 638 if (rv & WDIOS_DISABLECARD) {
622 { 639 status = pcwd_stop();
623 return pcwd_stop(); 640 if (status < 0)
641 return status;
624 } 642 }
625 643 if (rv & WDIOS_ENABLECARD) {
626 if (rv & WDIOS_ENABLECARD) 644 status = pcwd_start();
627 { 645 if (status < 0)
628 return pcwd_start(); 646 return status;
629 } 647 }
630
631 if (rv & WDIOS_TEMPPANIC) 648 if (rv & WDIOS_TEMPPANIC)
632 {
633 temp_panic = 1; 649 temp_panic = 1;
634 }
635 } 650 }
636 return -EINVAL; 651 return -EINVAL;
637 652
@@ -651,6 +666,9 @@ static int pcwd_ioctl(struct inode *inode, struct file *file,
651 666
652 case WDIOC_GETTIMEOUT: 667 case WDIOC_GETTIMEOUT:
653 return put_user(heartbeat, argp); 668 return put_user(heartbeat, argp);
669
670 default:
671 return -ENOTTY;
654 } 672 }
655 673
656 return 0; 674 return 0;
@@ -682,16 +700,10 @@ static ssize_t pcwd_write(struct file *file, const char __user *buf, size_t len,
682 700
683static int pcwd_open(struct inode *inode, struct file *file) 701static int pcwd_open(struct inode *inode, struct file *file)
684{ 702{
685 if (!atomic_dec_and_test(&open_allowed) ) { 703 if (test_and_set_bit(0, &open_allowed))
686 if (debug >= VERBOSE)
687 printk(KERN_ERR PFX "Attempt to open already opened device.\n");
688 atomic_inc( &open_allowed );
689 return -EBUSY; 704 return -EBUSY;
690 }
691
692 if (nowayout) 705 if (nowayout)
693 __module_get(THIS_MODULE); 706 __module_get(THIS_MODULE);
694
695 /* Activate */ 707 /* Activate */
696 pcwd_start(); 708 pcwd_start();
697 pcwd_keepalive(); 709 pcwd_keepalive();
@@ -700,14 +712,15 @@ static int pcwd_open(struct inode *inode, struct file *file)
700 712
701static int pcwd_close(struct inode *inode, struct file *file) 713static int pcwd_close(struct inode *inode, struct file *file)
702{ 714{
703 if (expect_close == 42) { 715 if (expect_close == 42)
704 pcwd_stop(); 716 pcwd_stop();
705 } else { 717 else {
706 printk(KERN_CRIT PFX "Unexpected close, not stopping watchdog!\n"); 718 printk(KERN_CRIT PFX
719 "Unexpected close, not stopping watchdog!\n");
707 pcwd_keepalive(); 720 pcwd_keepalive();
708 } 721 }
709 expect_close = 0; 722 expect_close = 0;
710 atomic_inc( &open_allowed ); 723 clear_bit(0, &open_allowed);
711 return 0; 724 return 0;
712} 725}
713 726
@@ -750,7 +763,7 @@ static const struct file_operations pcwd_fops = {
750 .owner = THIS_MODULE, 763 .owner = THIS_MODULE,
751 .llseek = no_llseek, 764 .llseek = no_llseek,
752 .write = pcwd_write, 765 .write = pcwd_write,
753 .ioctl = pcwd_ioctl, 766 .unlocked_ioctl = pcwd_ioctl,
754 .open = pcwd_open, 767 .open = pcwd_open,
755 .release = pcwd_close, 768 .release = pcwd_close,
756}; 769};
@@ -788,7 +801,7 @@ static inline int get_revision(void)
788 * presumes a floating bus reads as 0xff. */ 801 * presumes a floating bus reads as 0xff. */
789 if ((inb(pcwd_private.io_addr + 2) == 0xFF) || 802 if ((inb(pcwd_private.io_addr + 2) == 0xFF) ||
790 (inb(pcwd_private.io_addr + 3) == 0xFF)) 803 (inb(pcwd_private.io_addr + 3) == 0xFF))
791 r=PCWD_REVISION_A; 804 r = PCWD_REVISION_A;
792 spin_unlock(&pcwd_private.io_lock); 805 spin_unlock(&pcwd_private.io_lock);
793 806
794 return r; 807 return r;
@@ -803,7 +816,7 @@ static inline int get_revision(void)
803 */ 816 */
804static int __devinit pcwd_isa_match(struct device *dev, unsigned int id) 817static int __devinit pcwd_isa_match(struct device *dev, unsigned int id)
805{ 818{
806 int base_addr=pcwd_ioports[id]; 819 int base_addr = pcwd_ioports[id];
807 int port0, last_port0; /* Reg 0, in case it's REV A */ 820 int port0, last_port0; /* Reg 0, in case it's REV A */
808 int port1, last_port1; /* Register 1 for REV C cards */ 821 int port1, last_port1; /* Register 1 for REV C cards */
809 int i; 822 int i;
@@ -813,7 +826,7 @@ static int __devinit pcwd_isa_match(struct device *dev, unsigned int id)
813 printk(KERN_DEBUG PFX "pcwd_isa_match id=%d\n", 826 printk(KERN_DEBUG PFX "pcwd_isa_match id=%d\n",
814 id); 827 id);
815 828
816 if (!request_region (base_addr, 4, "PCWD")) { 829 if (!request_region(base_addr, 4, "PCWD")) {
817 printk(KERN_INFO PFX "Port 0x%04x unavailable\n", base_addr); 830 printk(KERN_INFO PFX "Port 0x%04x unavailable\n", base_addr);
818 return 0; 831 return 0;
819 } 832 }
@@ -842,7 +855,7 @@ static int __devinit pcwd_isa_match(struct device *dev, unsigned int id)
842 } 855 }
843 } 856 }
844 } 857 }
845 release_region (base_addr, 4); 858 release_region(base_addr, 4);
846 859
847 return retval; 860 return retval;
848} 861}
@@ -857,7 +870,8 @@ static int __devinit pcwd_isa_probe(struct device *dev, unsigned int id)
857 870
858 cards_found++; 871 cards_found++;
859 if (cards_found == 1) 872 if (cards_found == 1)
860 printk(KERN_INFO PFX "v%s Ken Hollis (kenji@bitgate.com)\n", WD_VER); 873 printk(KERN_INFO PFX "v%s Ken Hollis (kenji@bitgate.com)\n",
874 WD_VER);
861 875
862 if (cards_found > 1) { 876 if (cards_found > 1) {
863 printk(KERN_ERR PFX "This driver only supports 1 device\n"); 877 printk(KERN_ERR PFX "This driver only supports 1 device\n");
@@ -875,10 +889,11 @@ static int __devinit pcwd_isa_probe(struct device *dev, unsigned int id)
875 /* Check card's revision */ 889 /* Check card's revision */
876 pcwd_private.revision = get_revision(); 890 pcwd_private.revision = get_revision();
877 891
878 if (!request_region(pcwd_private.io_addr, (pcwd_private.revision == PCWD_REVISION_A) ? 2 : 4, "PCWD")) { 892 if (!request_region(pcwd_private.io_addr,
893 (pcwd_private.revision == PCWD_REVISION_A) ? 2 : 4, "PCWD")) {
879 printk(KERN_ERR PFX "I/O address 0x%04x already in use\n", 894 printk(KERN_ERR PFX "I/O address 0x%04x already in use\n",
880 pcwd_private.io_addr); 895 pcwd_private.io_addr);
881 ret=-EIO; 896 ret = -EIO;
882 goto error_request_region; 897 goto error_request_region;
883 } 898 }
884 899
@@ -908,26 +923,30 @@ static int __devinit pcwd_isa_probe(struct device *dev, unsigned int id)
908 if (heartbeat == 0) 923 if (heartbeat == 0)
909 heartbeat = heartbeat_tbl[(pcwd_get_option_switches() & 0x07)]; 924 heartbeat = heartbeat_tbl[(pcwd_get_option_switches() & 0x07)];
910 925
911 /* Check that the heartbeat value is within it's range ; if not reset to the default */ 926 /* Check that the heartbeat value is within it's range;
927 if not reset to the default */
912 if (pcwd_set_heartbeat(heartbeat)) { 928 if (pcwd_set_heartbeat(heartbeat)) {
913 pcwd_set_heartbeat(WATCHDOG_HEARTBEAT); 929 pcwd_set_heartbeat(WATCHDOG_HEARTBEAT);
914 printk(KERN_INFO PFX "heartbeat value must be 2<=heartbeat<=7200, using %d\n", 930 printk(KERN_INFO PFX
915 WATCHDOG_HEARTBEAT); 931 "heartbeat value must be 2 <= heartbeat <= 7200, using %d\n",
932 WATCHDOG_HEARTBEAT);
916 } 933 }
917 934
918 if (pcwd_private.supports_temp) { 935 if (pcwd_private.supports_temp) {
919 ret = misc_register(&temp_miscdev); 936 ret = misc_register(&temp_miscdev);
920 if (ret) { 937 if (ret) {
921 printk(KERN_ERR PFX "cannot register miscdev on minor=%d (err=%d)\n", 938 printk(KERN_ERR PFX
922 TEMP_MINOR, ret); 939 "cannot register miscdev on minor=%d (err=%d)\n",
940 TEMP_MINOR, ret);
923 goto error_misc_register_temp; 941 goto error_misc_register_temp;
924 } 942 }
925 } 943 }
926 944
927 ret = misc_register(&pcwd_miscdev); 945 ret = misc_register(&pcwd_miscdev);
928 if (ret) { 946 if (ret) {
929 printk(KERN_ERR PFX "cannot register miscdev on minor=%d (err=%d)\n", 947 printk(KERN_ERR PFX
930 WATCHDOG_MINOR, ret); 948 "cannot register miscdev on minor=%d (err=%d)\n",
949 WATCHDOG_MINOR, ret);
931 goto error_misc_register_watchdog; 950 goto error_misc_register_watchdog;
932 } 951 }
933 952
@@ -940,7 +959,8 @@ error_misc_register_watchdog:
940 if (pcwd_private.supports_temp) 959 if (pcwd_private.supports_temp)
941 misc_deregister(&temp_miscdev); 960 misc_deregister(&temp_miscdev);
942error_misc_register_temp: 961error_misc_register_temp:
943 release_region(pcwd_private.io_addr, (pcwd_private.revision == PCWD_REVISION_A) ? 2 : 4); 962 release_region(pcwd_private.io_addr,
963 (pcwd_private.revision == PCWD_REVISION_A) ? 2 : 4);
944error_request_region: 964error_request_region:
945 pcwd_private.io_addr = 0x0000; 965 pcwd_private.io_addr = 0x0000;
946 cards_found--; 966 cards_found--;
@@ -964,7 +984,8 @@ static int __devexit pcwd_isa_remove(struct device *dev, unsigned int id)
964 misc_deregister(&pcwd_miscdev); 984 misc_deregister(&pcwd_miscdev);
965 if (pcwd_private.supports_temp) 985 if (pcwd_private.supports_temp)
966 misc_deregister(&temp_miscdev); 986 misc_deregister(&temp_miscdev);
967 release_region(pcwd_private.io_addr, (pcwd_private.revision == PCWD_REVISION_A) ? 2 : 4); 987 release_region(pcwd_private.io_addr,
988 (pcwd_private.revision == PCWD_REVISION_A) ? 2 : 4);
968 pcwd_private.io_addr = 0x0000; 989 pcwd_private.io_addr = 0x0000;
969 cards_found--; 990 cards_found--;
970 991
diff --git a/drivers/watchdog/pcwd_pci.c b/drivers/watchdog/pcwd_pci.c
index 61a89e959642..90eb1d4271d7 100644
--- a/drivers/watchdog/pcwd_pci.c
+++ b/drivers/watchdog/pcwd_pci.c
@@ -46,9 +46,8 @@
46#include <linux/pci.h> /* For pci functions */ 46#include <linux/pci.h> /* For pci functions */
47#include <linux/ioport.h> /* For io-port access */ 47#include <linux/ioport.h> /* For io-port access */
48#include <linux/spinlock.h> /* For spin_lock/spin_unlock/... */ 48#include <linux/spinlock.h> /* For spin_lock/spin_unlock/... */
49 49#include <linux/uaccess.h> /* For copy_to_user/put_user/... */
50#include <asm/uaccess.h> /* For copy_to_user/put_user/... */ 50#include <linux/io.h> /* For inb/outb/... */
51#include <asm/io.h> /* For inb/outb/... */
52 51
53/* Module and version information */ 52/* Module and version information */
54#define WATCHDOG_VERSION "1.03" 53#define WATCHDOG_VERSION "1.03"
@@ -97,7 +96,7 @@
97#define CMD_GET_CLEAR_RESET_COUNT 0x84 96#define CMD_GET_CLEAR_RESET_COUNT 0x84
98 97
99/* Watchdog's Dip Switch heartbeat values */ 98/* Watchdog's Dip Switch heartbeat values */
100static const int heartbeat_tbl [] = { 99static const int heartbeat_tbl[] = {
101 5, /* OFF-OFF-OFF = 5 Sec */ 100 5, /* OFF-OFF-OFF = 5 Sec */
102 10, /* OFF-OFF-ON = 10 Sec */ 101 10, /* OFF-OFF-ON = 10 Sec */
103 30, /* OFF-ON-OFF = 30 Sec */ 102 30, /* OFF-ON-OFF = 30 Sec */
@@ -220,11 +219,10 @@ static void pcipcwd_show_card_info(void)
220 int option_switches; 219 int option_switches;
221 220
222 got_fw_rev = send_command(CMD_GET_FIRMWARE_VERSION, &fw_rev_major, &fw_rev_minor); 221 got_fw_rev = send_command(CMD_GET_FIRMWARE_VERSION, &fw_rev_major, &fw_rev_minor);
223 if (got_fw_rev) { 222 if (got_fw_rev)
224 sprintf(fw_ver_str, "%u.%02u", fw_rev_major, fw_rev_minor); 223 sprintf(fw_ver_str, "%u.%02u", fw_rev_major, fw_rev_minor);
225 } else { 224 else
226 sprintf(fw_ver_str, "<card no answer>"); 225 sprintf(fw_ver_str, "<card no answer>");
227 }
228 226
229 /* Get switch settings */ 227 /* Get switch settings */
230 option_switches = pcipcwd_get_option_switches(); 228 option_switches = pcipcwd_get_option_switches();
@@ -331,7 +329,7 @@ static int pcipcwd_get_status(int *status)
331{ 329{
332 int control_status; 330 int control_status;
333 331
334 *status=0; 332 *status = 0;
335 control_status = inb_p(pcipcwd_private.io_addr + 1); 333 control_status = inb_p(pcipcwd_private.io_addr + 1);
336 if (control_status & WD_PCI_WTRP) 334 if (control_status & WD_PCI_WTRP)
337 *status |= WDIOF_CARDRESET; 335 *status |= WDIOF_CARDRESET;
@@ -369,8 +367,8 @@ static int pcipcwd_clear_status(void)
369 outb_p((control_status & WD_PCI_R2DS) | WD_PCI_WTRP, pcipcwd_private.io_addr + 1); 367 outb_p((control_status & WD_PCI_R2DS) | WD_PCI_WTRP, pcipcwd_private.io_addr + 1);
370 368
371 /* clear reset counter */ 369 /* clear reset counter */
372 msb=0; 370 msb = 0;
373 reset_counter=0xff; 371 reset_counter = 0xff;
374 send_command(CMD_GET_CLEAR_RESET_COUNT, &msb, &reset_counter); 372 send_command(CMD_GET_CLEAR_RESET_COUNT, &msb, &reset_counter);
375 373
376 if (debug >= DEBUG) { 374 if (debug >= DEBUG) {
@@ -442,7 +440,7 @@ static ssize_t pcipcwd_write(struct file *file, const char __user *data,
442 /* scan to see whether or not we got the magic character */ 440 /* scan to see whether or not we got the magic character */
443 for (i = 0; i != len; i++) { 441 for (i = 0; i != len; i++) {
444 char c; 442 char c;
445 if(get_user(c, data+i)) 443 if (get_user(c, data + i))
446 return -EFAULT; 444 return -EFAULT;
447 if (c == 'V') 445 if (c == 'V')
448 expect_release = 42; 446 expect_release = 42;
@@ -455,8 +453,8 @@ static ssize_t pcipcwd_write(struct file *file, const char __user *data,
455 return len; 453 return len;
456} 454}
457 455
458static int pcipcwd_ioctl(struct inode *inode, struct file *file, 456static long pcipcwd_ioctl(struct file *file, unsigned int cmd,
459 unsigned int cmd, unsigned long arg) 457 unsigned long arg)
460{ 458{
461 void __user *argp = (void __user *)arg; 459 void __user *argp = (void __user *)arg;
462 int __user *p = argp; 460 int __user *p = argp;
@@ -471,92 +469,89 @@ static int pcipcwd_ioctl(struct inode *inode, struct file *file,
471 }; 469 };
472 470
473 switch (cmd) { 471 switch (cmd) {
474 case WDIOC_GETSUPPORT: 472 case WDIOC_GETSUPPORT:
475 return copy_to_user(argp, &ident, 473 return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
476 sizeof (ident)) ? -EFAULT : 0; 474
475 case WDIOC_GETSTATUS:
476 {
477 int status;
478 pcipcwd_get_status(&status);
479 return put_user(status, p);
480 }
477 481
478 case WDIOC_GETSTATUS: 482 case WDIOC_GETBOOTSTATUS:
479 { 483 return put_user(pcipcwd_private.boot_status, p);
480 int status;
481 484
482 pcipcwd_get_status(&status); 485 case WDIOC_GETTEMP:
486 {
487 int temperature;
483 488
484 return put_user(status, p); 489 if (pcipcwd_get_temperature(&temperature))
485 } 490 return -EFAULT;
486 491
487 case WDIOC_GETBOOTSTATUS: 492 return put_user(temperature, p);
488 return put_user(pcipcwd_private.boot_status, p); 493 }
489 494
490 case WDIOC_GETTEMP: 495 case WDIOC_SETOPTIONS:
491 { 496 {
492 int temperature; 497 int new_options, retval = -EINVAL;
493 498
494 if (pcipcwd_get_temperature(&temperature)) 499 if (get_user(new_options, p))
495 return -EFAULT; 500 return -EFAULT;
496 501
497 return put_user(temperature, p); 502 if (new_options & WDIOS_DISABLECARD) {
503 if (pcipcwd_stop())
504 return -EIO;
505 retval = 0;
498 } 506 }
499 507
500 case WDIOC_KEEPALIVE: 508 if (new_options & WDIOS_ENABLECARD) {
501 pcipcwd_keepalive(); 509 if (pcipcwd_start())
502 return 0; 510 return -EIO;
503 511 retval = 0;
504 case WDIOC_SETOPTIONS: 512 }
505 {
506 int new_options, retval = -EINVAL;
507
508 if (get_user (new_options, p))
509 return -EFAULT;
510
511 if (new_options & WDIOS_DISABLECARD) {
512 if (pcipcwd_stop())
513 return -EIO;
514 retval = 0;
515 }
516 513
517 if (new_options & WDIOS_ENABLECARD) { 514 if (new_options & WDIOS_TEMPPANIC) {
518 if (pcipcwd_start()) 515 temp_panic = 1;
519 return -EIO; 516 retval = 0;
520 retval = 0; 517 }
521 }
522 518
523 if (new_options & WDIOS_TEMPPANIC) { 519 return retval;
524 temp_panic = 1; 520 }
525 retval = 0;
526 }
527 521
528 return retval; 522 case WDIOC_KEEPALIVE:
529 } 523 pcipcwd_keepalive();
524 return 0;
530 525
531 case WDIOC_SETTIMEOUT: 526 case WDIOC_SETTIMEOUT:
532 { 527 {
533 int new_heartbeat; 528 int new_heartbeat;
534 529
535 if (get_user(new_heartbeat, p)) 530 if (get_user(new_heartbeat, p))
536 return -EFAULT; 531 return -EFAULT;
537 532
538 if (pcipcwd_set_heartbeat(new_heartbeat)) 533 if (pcipcwd_set_heartbeat(new_heartbeat))
539 return -EINVAL; 534 return -EINVAL;
540 535
541 pcipcwd_keepalive(); 536 pcipcwd_keepalive();
542 /* Fall */ 537 /* Fall */
543 } 538 }
544 539
545 case WDIOC_GETTIMEOUT: 540 case WDIOC_GETTIMEOUT:
546 return put_user(heartbeat, p); 541 return put_user(heartbeat, p);
547 542
548 case WDIOC_GETTIMELEFT: 543 case WDIOC_GETTIMELEFT:
549 { 544 {
550 int time_left; 545 int time_left;
551 546
552 if (pcipcwd_get_timeleft(&time_left)) 547 if (pcipcwd_get_timeleft(&time_left))
553 return -EFAULT; 548 return -EFAULT;
554 549
555 return put_user(time_left, p); 550 return put_user(time_left, p);
556 } 551 }
557 552
558 default: 553 default:
559 return -ENOTTY; 554 return -ENOTTY;
560 } 555 }
561} 556}
562 557
@@ -603,7 +598,7 @@ static ssize_t pcipcwd_temp_read(struct file *file, char __user *data,
603 if (pcipcwd_get_temperature(&temperature)) 598 if (pcipcwd_get_temperature(&temperature))
604 return -EFAULT; 599 return -EFAULT;
605 600
606 if (copy_to_user (data, &temperature, 1)) 601 if (copy_to_user(data, &temperature, 1))
607 return -EFAULT; 602 return -EFAULT;
608 603
609 return 1; 604 return 1;
@@ -628,10 +623,8 @@ static int pcipcwd_temp_release(struct inode *inode, struct file *file)
628 623
629static int pcipcwd_notify_sys(struct notifier_block *this, unsigned long code, void *unused) 624static int pcipcwd_notify_sys(struct notifier_block *this, unsigned long code, void *unused)
630{ 625{
631 if (code==SYS_DOWN || code==SYS_HALT) { 626 if (code == SYS_DOWN || code == SYS_HALT)
632 /* Turn the WDT off */ 627 pcipcwd_stop(); /* Turn the WDT off */
633 pcipcwd_stop();
634 }
635 628
636 return NOTIFY_DONE; 629 return NOTIFY_DONE;
637} 630}
@@ -644,7 +637,7 @@ static const struct file_operations pcipcwd_fops = {
644 .owner = THIS_MODULE, 637 .owner = THIS_MODULE,
645 .llseek = no_llseek, 638 .llseek = no_llseek,
646 .write = pcipcwd_write, 639 .write = pcipcwd_write,
647 .ioctl = pcipcwd_ioctl, 640 .unlocked_ioctl = pcipcwd_ioctl,
648 .open = pcipcwd_open, 641 .open = pcipcwd_open,
649 .release = pcipcwd_release, 642 .release = pcipcwd_release,
650}; 643};
diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c
index bf443d077a1e..c1685c942de6 100644
--- a/drivers/watchdog/pcwd_usb.c
+++ b/drivers/watchdog/pcwd_usb.c
@@ -40,8 +40,7 @@
40#include <linux/slab.h> /* For kmalloc, ... */ 40#include <linux/slab.h> /* For kmalloc, ... */
41#include <linux/mutex.h> /* For mutex locking */ 41#include <linux/mutex.h> /* For mutex locking */
42#include <linux/hid.h> /* For HID_REQ_SET_REPORT & HID_DT_REPORT */ 42#include <linux/hid.h> /* For HID_REQ_SET_REPORT & HID_DT_REPORT */
43 43#include <linux/uaccess.h> /* For copy_to_user/put_user/... */
44#include <asm/uaccess.h> /* For copy_to_user/put_user/... */
45 44
46 45
47#ifdef CONFIG_USB_DEBUG 46#ifdef CONFIG_USB_DEBUG
@@ -88,7 +87,7 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" _
88#define USB_PCWD_PRODUCT_ID 0x1140 87#define USB_PCWD_PRODUCT_ID 0x1140
89 88
90/* table of devices that work with this driver */ 89/* table of devices that work with this driver */
91static struct usb_device_id usb_pcwd_table [] = { 90static struct usb_device_id usb_pcwd_table[] = {
92 { USB_DEVICE(USB_PCWD_VENDOR_ID, USB_PCWD_PRODUCT_ID) }, 91 { USB_DEVICE(USB_PCWD_VENDOR_ID, USB_PCWD_PRODUCT_ID) },
93 { } /* Terminating entry */ 92 { } /* Terminating entry */
94}; 93};
@@ -110,7 +109,7 @@ MODULE_DEVICE_TABLE (usb, usb_pcwd_table);
110#define CMD_DISABLE_WATCHDOG CMD_ENABLE_WATCHDOG 109#define CMD_DISABLE_WATCHDOG CMD_ENABLE_WATCHDOG
111 110
112/* Watchdog's Dip Switch heartbeat values */ 111/* Watchdog's Dip Switch heartbeat values */
113static const int heartbeat_tbl [] = { 112static const int heartbeat_tbl[] = {
114 5, /* OFF-OFF-OFF = 5 Sec */ 113 5, /* OFF-OFF-OFF = 5 Sec */
115 10, /* OFF-OFF-ON = 10 Sec */ 114 10, /* OFF-OFF-ON = 10 Sec */
116 30, /* OFF-ON-OFF = 30 Sec */ 115 30, /* OFF-ON-OFF = 30 Sec */
@@ -130,15 +129,15 @@ static char expect_release;
130 129
131/* Structure to hold all of our device specific stuff */ 130/* Structure to hold all of our device specific stuff */
132struct usb_pcwd_private { 131struct usb_pcwd_private {
133 struct usb_device * udev; /* save off the usb device pointer */ 132 struct usb_device *udev; /* save off the usb device pointer */
134 struct usb_interface * interface; /* the interface for this device */ 133 struct usb_interface *interface; /* the interface for this device */
135 134
136 unsigned int interface_number; /* the interface number used for cmd's */ 135 unsigned int interface_number; /* the interface number used for cmd's */
137 136
138 unsigned char * intr_buffer; /* the buffer to intr data */ 137 unsigned char *intr_buffer; /* the buffer to intr data */
139 dma_addr_t intr_dma; /* the dma address for the intr buffer */ 138 dma_addr_t intr_dma; /* the dma address for the intr buffer */
140 size_t intr_size; /* the size of the intr buffer */ 139 size_t intr_size; /* the size of the intr buffer */
141 struct urb * intr_urb; /* the urb used for the intr pipe */ 140 struct urb *intr_urb; /* the urb used for the intr pipe */
142 141
143 unsigned char cmd_command; /* The command that is reported back */ 142 unsigned char cmd_command; /* The command that is reported back */
144 unsigned char cmd_data_msb; /* The data MSB that is reported back */ 143 unsigned char cmd_data_msb; /* The data MSB that is reported back */
@@ -154,8 +153,8 @@ static struct usb_pcwd_private *usb_pcwd_device;
154static DEFINE_MUTEX(disconnect_mutex); 153static DEFINE_MUTEX(disconnect_mutex);
155 154
156/* local function prototypes */ 155/* local function prototypes */
157static int usb_pcwd_probe (struct usb_interface *interface, const struct usb_device_id *id); 156static int usb_pcwd_probe(struct usb_interface *interface, const struct usb_device_id *id);
158static void usb_pcwd_disconnect (struct usb_interface *interface); 157static void usb_pcwd_disconnect(struct usb_interface *interface);
159 158
160/* usb specific object needed to register this driver with the usb subsystem */ 159/* usb specific object needed to register this driver with the usb subsystem */
161static struct usb_driver usb_pcwd_driver = { 160static struct usb_driver usb_pcwd_driver = {
@@ -195,10 +194,10 @@ static void usb_pcwd_intr_done(struct urb *urb)
195 usb_pcwd->cmd_data_lsb = data[2]; 194 usb_pcwd->cmd_data_lsb = data[2];
196 195
197 /* notify anyone waiting that the cmd has finished */ 196 /* notify anyone waiting that the cmd has finished */
198 atomic_set (&usb_pcwd->cmd_received, 1); 197 atomic_set(&usb_pcwd->cmd_received, 1);
199 198
200resubmit: 199resubmit:
201 retval = usb_submit_urb (urb, GFP_ATOMIC); 200 retval = usb_submit_urb(urb, GFP_ATOMIC);
202 if (retval) 201 if (retval)
203 printk(KERN_ERR PFX "can't resubmit intr, usb_submit_urb failed with result %d\n", 202 printk(KERN_ERR PFX "can't resubmit intr, usb_submit_urb failed with result %d\n",
204 retval); 203 retval);
@@ -224,7 +223,7 @@ static int usb_pcwd_send_command(struct usb_pcwd_private *usb_pcwd, unsigned cha
224 dbg("sending following data cmd=0x%02x msb=0x%02x lsb=0x%02x", 223 dbg("sending following data cmd=0x%02x msb=0x%02x lsb=0x%02x",
225 buf[0], buf[1], buf[2]); 224 buf[0], buf[1], buf[2]);
226 225
227 atomic_set (&usb_pcwd->cmd_received, 0); 226 atomic_set(&usb_pcwd->cmd_received, 0);
228 227
229 if (usb_control_msg(usb_pcwd->udev, usb_sndctrlpipe(usb_pcwd->udev, 0), 228 if (usb_control_msg(usb_pcwd->udev, usb_sndctrlpipe(usb_pcwd->udev, 0),
230 HID_REQ_SET_REPORT, HID_DT_REPORT, 229 HID_REQ_SET_REPORT, HID_DT_REPORT,
@@ -237,7 +236,7 @@ static int usb_pcwd_send_command(struct usb_pcwd_private *usb_pcwd, unsigned cha
237 got_response = 0; 236 got_response = 0;
238 for (count = 0; (count < USB_COMMAND_TIMEOUT) && (!got_response); count++) { 237 for (count = 0; (count < USB_COMMAND_TIMEOUT) && (!got_response); count++) {
239 mdelay(1); 238 mdelay(1);
240 if (atomic_read (&usb_pcwd->cmd_received)) 239 if (atomic_read(&usb_pcwd->cmd_received))
241 got_response = 1; 240 got_response = 1;
242 } 241 }
243 242
@@ -356,7 +355,7 @@ static ssize_t usb_pcwd_write(struct file *file, const char __user *data,
356 /* scan to see whether or not we got the magic character */ 355 /* scan to see whether or not we got the magic character */
357 for (i = 0; i != len; i++) { 356 for (i = 0; i != len; i++) {
358 char c; 357 char c;
359 if(get_user(c, data+i)) 358 if (get_user(c, data + i))
360 return -EFAULT; 359 return -EFAULT;
361 if (c == 'V') 360 if (c == 'V')
362 expect_release = 42; 361 expect_release = 42;
@@ -369,8 +368,8 @@ static ssize_t usb_pcwd_write(struct file *file, const char __user *data,
369 return len; 368 return len;
370} 369}
371 370
372static int usb_pcwd_ioctl(struct inode *inode, struct file *file, 371static long usb_pcwd_ioctl(struct file *file, unsigned int cmd,
373 unsigned int cmd, unsigned long arg) 372 unsigned long arg)
374{ 373{
375 void __user *argp = (void __user *)arg; 374 void __user *argp = (void __user *)arg;
376 int __user *p = argp; 375 int __user *p = argp;
@@ -383,77 +382,76 @@ static int usb_pcwd_ioctl(struct inode *inode, struct file *file,
383 }; 382 };
384 383
385 switch (cmd) { 384 switch (cmd) {
386 case WDIOC_GETSUPPORT: 385 case WDIOC_GETSUPPORT:
387 return copy_to_user(argp, &ident, 386 return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
388 sizeof (ident)) ? -EFAULT : 0;
389 387
390 case WDIOC_GETSTATUS: 388 case WDIOC_GETSTATUS:
391 case WDIOC_GETBOOTSTATUS: 389 case WDIOC_GETBOOTSTATUS:
392 return put_user(0, p); 390 return put_user(0, p);
393 391
394 case WDIOC_GETTEMP: 392 case WDIOC_GETTEMP:
395 { 393 {
396 int temperature; 394 int temperature;
397 395
398 if (usb_pcwd_get_temperature(usb_pcwd_device, &temperature)) 396 if (usb_pcwd_get_temperature(usb_pcwd_device, &temperature))
399 return -EFAULT; 397 return -EFAULT;
400 398
401 return put_user(temperature, p); 399 return put_user(temperature, p);
402 } 400 }
403 401
404 case WDIOC_KEEPALIVE: 402 case WDIOC_SETOPTIONS:
405 usb_pcwd_keepalive(usb_pcwd_device); 403 {
406 return 0; 404 int new_options, retval = -EINVAL;
407 405
408 case WDIOC_SETOPTIONS: 406 if (get_user(new_options, p))
409 { 407 return -EFAULT;
410 int new_options, retval = -EINVAL;
411 408
412 if (get_user (new_options, p)) 409 if (new_options & WDIOS_DISABLECARD) {
413 return -EFAULT; 410 usb_pcwd_stop(usb_pcwd_device);
411 retval = 0;
412 }
414 413
415 if (new_options & WDIOS_DISABLECARD) { 414 if (new_options & WDIOS_ENABLECARD) {
416 usb_pcwd_stop(usb_pcwd_device); 415 usb_pcwd_start(usb_pcwd_device);
417 retval = 0; 416 retval = 0;
418 } 417 }
419 418
420 if (new_options & WDIOS_ENABLECARD) { 419 return retval;
421 usb_pcwd_start(usb_pcwd_device); 420 }
422 retval = 0;
423 }
424 421
425 return retval; 422 case WDIOC_KEEPALIVE:
426 } 423 usb_pcwd_keepalive(usb_pcwd_device);
424 return 0;
427 425
428 case WDIOC_SETTIMEOUT: 426 case WDIOC_SETTIMEOUT:
429 { 427 {
430 int new_heartbeat; 428 int new_heartbeat;
431 429
432 if (get_user(new_heartbeat, p)) 430 if (get_user(new_heartbeat, p))
433 return -EFAULT; 431 return -EFAULT;
434 432
435 if (usb_pcwd_set_heartbeat(usb_pcwd_device, new_heartbeat)) 433 if (usb_pcwd_set_heartbeat(usb_pcwd_device, new_heartbeat))
436 return -EINVAL; 434 return -EINVAL;
437 435
438 usb_pcwd_keepalive(usb_pcwd_device); 436 usb_pcwd_keepalive(usb_pcwd_device);
439 /* Fall */ 437 /* Fall */
440 } 438 }
441 439
442 case WDIOC_GETTIMEOUT: 440 case WDIOC_GETTIMEOUT:
443 return put_user(heartbeat, p); 441 return put_user(heartbeat, p);
444 442
445 case WDIOC_GETTIMELEFT: 443 case WDIOC_GETTIMELEFT:
446 { 444 {
447 int time_left; 445 int time_left;
448 446
449 if (usb_pcwd_get_timeleft(usb_pcwd_device, &time_left)) 447 if (usb_pcwd_get_timeleft(usb_pcwd_device, &time_left))
450 return -EFAULT; 448 return -EFAULT;
451 449
452 return put_user(time_left, p); 450 return put_user(time_left, p);
453 } 451 }
454 452
455 default: 453 default:
456 return -ENOTTY; 454 return -ENOTTY;
457 } 455 }
458} 456}
459 457
@@ -519,10 +517,8 @@ static int usb_pcwd_temperature_release(struct inode *inode, struct file *file)
519 517
520static int usb_pcwd_notify_sys(struct notifier_block *this, unsigned long code, void *unused) 518static int usb_pcwd_notify_sys(struct notifier_block *this, unsigned long code, void *unused)
521{ 519{
522 if (code==SYS_DOWN || code==SYS_HALT) { 520 if (code == SYS_DOWN || code == SYS_HALT)
523 /* Turn the WDT off */ 521 usb_pcwd_stop(usb_pcwd_device); /* Turn the WDT off */
524 usb_pcwd_stop(usb_pcwd_device);
525 }
526 522
527 return NOTIFY_DONE; 523 return NOTIFY_DONE;
528} 524}
@@ -535,7 +531,7 @@ static const struct file_operations usb_pcwd_fops = {
535 .owner = THIS_MODULE, 531 .owner = THIS_MODULE,
536 .llseek = no_llseek, 532 .llseek = no_llseek,
537 .write = usb_pcwd_write, 533 .write = usb_pcwd_write,
538 .ioctl = usb_pcwd_ioctl, 534 .unlocked_ioctl = usb_pcwd_ioctl,
539 .open = usb_pcwd_open, 535 .open = usb_pcwd_open,
540 .release = usb_pcwd_release, 536 .release = usb_pcwd_release,
541}; 537};
@@ -567,13 +563,13 @@ static struct notifier_block usb_pcwd_notifier = {
567/** 563/**
568 * usb_pcwd_delete 564 * usb_pcwd_delete
569 */ 565 */
570static inline void usb_pcwd_delete (struct usb_pcwd_private *usb_pcwd) 566static inline void usb_pcwd_delete(struct usb_pcwd_private *usb_pcwd)
571{ 567{
572 usb_free_urb(usb_pcwd->intr_urb); 568 usb_free_urb(usb_pcwd->intr_urb);
573 if (usb_pcwd->intr_buffer != NULL) 569 if (usb_pcwd->intr_buffer != NULL)
574 usb_buffer_free(usb_pcwd->udev, usb_pcwd->intr_size, 570 usb_buffer_free(usb_pcwd->udev, usb_pcwd->intr_size,
575 usb_pcwd->intr_buffer, usb_pcwd->intr_dma); 571 usb_pcwd->intr_buffer, usb_pcwd->intr_dma);
576 kfree (usb_pcwd); 572 kfree(usb_pcwd);
577} 573}
578 574
579/** 575/**
@@ -626,7 +622,7 @@ static int usb_pcwd_probe(struct usb_interface *interface, const struct usb_devi
626 maxp = usb_maxpacket(udev, pipe, usb_pipeout(pipe)); 622 maxp = usb_maxpacket(udev, pipe, usb_pipeout(pipe));
627 623
628 /* allocate memory for our device and initialize it */ 624 /* allocate memory for our device and initialize it */
629 usb_pcwd = kzalloc (sizeof(struct usb_pcwd_private), GFP_KERNEL); 625 usb_pcwd = kzalloc(sizeof(struct usb_pcwd_private), GFP_KERNEL);
630 if (usb_pcwd == NULL) { 626 if (usb_pcwd == NULL) {
631 printk(KERN_ERR PFX "Out of memory\n"); 627 printk(KERN_ERR PFX "Out of memory\n");
632 goto error; 628 goto error;
@@ -641,7 +637,8 @@ static int usb_pcwd_probe(struct usb_interface *interface, const struct usb_devi
641 usb_pcwd->intr_size = (le16_to_cpu(endpoint->wMaxPacketSize) > 8 ? le16_to_cpu(endpoint->wMaxPacketSize) : 8); 637 usb_pcwd->intr_size = (le16_to_cpu(endpoint->wMaxPacketSize) > 8 ? le16_to_cpu(endpoint->wMaxPacketSize) : 8);
642 638
643 /* set up the memory buffer's */ 639 /* set up the memory buffer's */
644 if (!(usb_pcwd->intr_buffer = usb_buffer_alloc(udev, usb_pcwd->intr_size, GFP_ATOMIC, &usb_pcwd->intr_dma))) { 640 usb_pcwd->intr_buffer = usb_buffer_alloc(udev, usb_pcwd->intr_size, GFP_ATOMIC, &usb_pcwd->intr_dma);
641 if (!usb_pcwd->intr_buffer) {
645 printk(KERN_ERR PFX "Out of memory\n"); 642 printk(KERN_ERR PFX "Out of memory\n");
646 goto error; 643 goto error;
647 } 644 }
@@ -675,11 +672,10 @@ static int usb_pcwd_probe(struct usb_interface *interface, const struct usb_devi
675 672
676 /* Get the Firmware Version */ 673 /* Get the Firmware Version */
677 got_fw_rev = usb_pcwd_send_command(usb_pcwd, CMD_GET_FIRMWARE_VERSION, &fw_rev_major, &fw_rev_minor); 674 got_fw_rev = usb_pcwd_send_command(usb_pcwd, CMD_GET_FIRMWARE_VERSION, &fw_rev_major, &fw_rev_minor);
678 if (got_fw_rev) { 675 if (got_fw_rev)
679 sprintf(fw_ver_str, "%u.%02u", fw_rev_major, fw_rev_minor); 676 sprintf(fw_ver_str, "%u.%02u", fw_rev_major, fw_rev_minor);
680 } else { 677 else
681 sprintf(fw_ver_str, "<card no answer>"); 678 sprintf(fw_ver_str, "<card no answer>");
682 }
683 679
684 printk(KERN_INFO PFX "Found card (Firmware: %s) with temp option\n", 680 printk(KERN_INFO PFX "Found card (Firmware: %s) with temp option\n",
685 fw_ver_str); 681 fw_ver_str);
@@ -725,7 +721,7 @@ static int usb_pcwd_probe(struct usb_interface *interface, const struct usb_devi
725 } 721 }
726 722
727 /* we can register the device now, as it is ready */ 723 /* we can register the device now, as it is ready */
728 usb_set_intfdata (interface, usb_pcwd); 724 usb_set_intfdata(interface, usb_pcwd);
729 725
730 printk(KERN_INFO PFX "initialized. heartbeat=%d sec (nowayout=%d)\n", 726 printk(KERN_INFO PFX "initialized. heartbeat=%d sec (nowayout=%d)\n",
731 heartbeat, nowayout); 727 heartbeat, nowayout);
@@ -759,8 +755,8 @@ static void usb_pcwd_disconnect(struct usb_interface *interface)
759 /* prevent races with open() */ 755 /* prevent races with open() */
760 mutex_lock(&disconnect_mutex); 756 mutex_lock(&disconnect_mutex);
761 757
762 usb_pcwd = usb_get_intfdata (interface); 758 usb_pcwd = usb_get_intfdata(interface);
763 usb_set_intfdata (interface, NULL); 759 usb_set_intfdata(interface, NULL);
764 760
765 mutex_lock(&usb_pcwd->mtx); 761 mutex_lock(&usb_pcwd->mtx);
766 762
@@ -820,5 +816,5 @@ static void __exit usb_pcwd_exit(void)
820} 816}
821 817
822 818
823module_init (usb_pcwd_init); 819module_init(usb_pcwd_init);
824module_exit (usb_pcwd_exit); 820module_exit(usb_pcwd_exit);
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c
index 6b8483d3c783..0ed84162437b 100644
--- a/drivers/watchdog/pnx4008_wdt.c
+++ b/drivers/watchdog/pnx4008_wdt.c
@@ -28,10 +28,9 @@
28#include <linux/platform_device.h> 28#include <linux/platform_device.h>
29#include <linux/clk.h> 29#include <linux/clk.h>
30#include <linux/spinlock.h> 30#include <linux/spinlock.h>
31 31#include <linux/uaccess.h>
32#include <asm/hardware.h> 32#include <linux/io.h>
33#include <asm/uaccess.h> 33#include <mach/hardware.h>
34#include <asm/io.h>
35 34
36#define MODULE_NAME "PNX4008-WDT: " 35#define MODULE_NAME "PNX4008-WDT: "
37 36
@@ -144,9 +143,8 @@ static int pnx4008_wdt_open(struct inode *inode, struct file *file)
144 return nonseekable_open(inode, file); 143 return nonseekable_open(inode, file);
145} 144}
146 145
147static ssize_t 146static ssize_t pnx4008_wdt_write(struct file *file, const char *data,
148pnx4008_wdt_write(struct file *file, const char *data, size_t len, 147 size_t len, loff_t *ppos)
149 loff_t * ppos)
150{ 148{
151 if (len) { 149 if (len) {
152 if (!nowayout) { 150 if (!nowayout) {
@@ -169,15 +167,14 @@ pnx4008_wdt_write(struct file *file, const char *data, size_t len,
169 return len; 167 return len;
170} 168}
171 169
172static struct watchdog_info ident = { 170static const struct watchdog_info ident = {
173 .options = WDIOF_CARDRESET | WDIOF_MAGICCLOSE | 171 .options = WDIOF_CARDRESET | WDIOF_MAGICCLOSE |
174 WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, 172 WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
175 .identity = "PNX4008 Watchdog", 173 .identity = "PNX4008 Watchdog",
176}; 174};
177 175
178static int 176static long pnx4008_wdt_ioctl(struct inode *inode, struct file *file,
179pnx4008_wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, 177 unsigned int cmd, unsigned long arg)
180 unsigned long arg)
181{ 178{
182 int ret = -ENOTTY; 179 int ret = -ENOTTY;
183 int time; 180 int time;
@@ -196,6 +193,11 @@ pnx4008_wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
196 ret = put_user(boot_status, (int *)arg); 193 ret = put_user(boot_status, (int *)arg);
197 break; 194 break;
198 195
196 case WDIOC_KEEPALIVE:
197 wdt_enable();
198 ret = 0;
199 break;
200
199 case WDIOC_SETTIMEOUT: 201 case WDIOC_SETTIMEOUT:
200 ret = get_user(time, (int *)arg); 202 ret = get_user(time, (int *)arg);
201 if (ret) 203 if (ret)
@@ -213,11 +215,6 @@ pnx4008_wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
213 case WDIOC_GETTIMEOUT: 215 case WDIOC_GETTIMEOUT:
214 ret = put_user(heartbeat, (int *)arg); 216 ret = put_user(heartbeat, (int *)arg);
215 break; 217 break;
216
217 case WDIOC_KEEPALIVE:
218 wdt_enable();
219 ret = 0;
220 break;
221 } 218 }
222 return ret; 219 return ret;
223} 220}
@@ -238,7 +235,7 @@ static const struct file_operations pnx4008_wdt_fops = {
238 .owner = THIS_MODULE, 235 .owner = THIS_MODULE,
239 .llseek = no_llseek, 236 .llseek = no_llseek,
240 .write = pnx4008_wdt_write, 237 .write = pnx4008_wdt_write,
241 .ioctl = pnx4008_wdt_ioctl, 238 .unlocked_ioctl = pnx4008_wdt_ioctl,
242 .open = pnx4008_wdt_open, 239 .open = pnx4008_wdt_open,
243 .release = pnx4008_wdt_release, 240 .release = pnx4008_wdt_release,
244}; 241};
diff --git a/drivers/watchdog/rm9k_wdt.c b/drivers/watchdog/rm9k_wdt.c
index 5c921e471564..f1ae3729a19e 100644
--- a/drivers/watchdog/rm9k_wdt.c
+++ b/drivers/watchdog/rm9k_wdt.c
@@ -29,10 +29,10 @@
29#include <linux/notifier.h> 29#include <linux/notifier.h>
30#include <linux/miscdevice.h> 30#include <linux/miscdevice.h>
31#include <linux/watchdog.h> 31#include <linux/watchdog.h>
32#include <asm/io.h> 32#include <linux/io.h>
33#include <linux/uaccess.h>
33#include <asm/atomic.h> 34#include <asm/atomic.h>
34#include <asm/processor.h> 35#include <asm/processor.h>
35#include <asm/uaccess.h>
36#include <asm/system.h> 36#include <asm/system.h>
37#include <asm/rm9k-ocd.h> 37#include <asm/rm9k-ocd.h>
38 38
@@ -53,10 +53,12 @@ static void wdt_gpi_stop(void);
53static void wdt_gpi_set_timeout(unsigned int); 53static void wdt_gpi_set_timeout(unsigned int);
54static int wdt_gpi_open(struct inode *, struct file *); 54static int wdt_gpi_open(struct inode *, struct file *);
55static int wdt_gpi_release(struct inode *, struct file *); 55static int wdt_gpi_release(struct inode *, struct file *);
56static ssize_t wdt_gpi_write(struct file *, const char __user *, size_t, loff_t *); 56static ssize_t wdt_gpi_write(struct file *, const char __user *, size_t,
57 loff_t *);
57static long wdt_gpi_ioctl(struct file *, unsigned int, unsigned long); 58static long wdt_gpi_ioctl(struct file *, unsigned int, unsigned long);
58static int wdt_gpi_notify(struct notifier_block *, unsigned long, void *); 59static int wdt_gpi_notify(struct notifier_block *, unsigned long, void *);
59static const struct resource *wdt_gpi_get_resource(struct platform_device *, const char *, unsigned int); 60static const struct resource *wdt_gpi_get_resource(struct platform_device *,
61 const char *, unsigned int);
60static int __init wdt_gpi_probe(struct device *); 62static int __init wdt_gpi_probe(struct device *);
61static int __exit wdt_gpi_remove(struct device *); 63static int __exit wdt_gpi_remove(struct device *);
62 64
@@ -68,7 +70,7 @@ static int locked;
68 70
69 71
70/* These are set from device resources */ 72/* These are set from device resources */
71static void __iomem * wd_regs; 73static void __iomem *wd_regs;
72static unsigned int wd_irq, wd_ctr; 74static unsigned int wd_irq, wd_ctr;
73 75
74 76
@@ -216,7 +218,8 @@ static int wdt_gpi_release(struct inode *inode, struct file *file)
216 if (expect_close) { 218 if (expect_close) {
217 wdt_gpi_stop(); 219 wdt_gpi_stop();
218 free_irq(wd_irq, &miscdev); 220 free_irq(wd_irq, &miscdev);
219 printk(KERN_INFO "%s: watchdog stopped\n", wdt_gpi_name); 221 printk(KERN_INFO "%s: watchdog stopped\n",
222 wdt_gpi_name);
220 } else { 223 } else {
221 printk(KERN_CRIT "%s: unexpected close() -" 224 printk(KERN_CRIT "%s: unexpected close() -"
222 " watchdog left running\n", 225 " watchdog left running\n",
@@ -231,8 +234,8 @@ static int wdt_gpi_release(struct inode *inode, struct file *file)
231 return 0; 234 return 0;
232} 235}
233 236
234static ssize_t 237static ssize_t wdt_gpi_write(struct file *f, const char __user *d, size_t s,
235wdt_gpi_write(struct file *f, const char __user *d, size_t s, loff_t *o) 238 loff_t *o)
236{ 239{
237 char val; 240 char val;
238 241
@@ -241,8 +244,7 @@ wdt_gpi_write(struct file *f, const char __user *d, size_t s, loff_t *o)
241 return s ? 1 : 0; 244 return s ? 1 : 0;
242} 245}
243 246
244static long 247static long wdt_gpi_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
245wdt_gpi_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
246{ 248{
247 long res = -ENOTTY; 249 long res = -ENOTTY;
248 const long size = _IOC_SIZE(cmd); 250 const long size = _IOC_SIZE(cmd);
@@ -271,7 +273,8 @@ wdt_gpi_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
271 case WDIOC_GETSUPPORT: 273 case WDIOC_GETSUPPORT:
272 wdinfo.options = nowayout ? 274 wdinfo.options = nowayout ?
273 WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING : 275 WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING :
274 WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE; 276 WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING |
277 WDIOF_MAGICCLOSE;
275 res = __copy_to_user(argp, &wdinfo, size) ? -EFAULT : size; 278 res = __copy_to_user(argp, &wdinfo, size) ? -EFAULT : size;
276 break; 279 break;
277 280
@@ -322,8 +325,8 @@ wdt_gpi_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
322 325
323 326
324/* Shutdown notifier */ 327/* Shutdown notifier */
325static int 328static int wdt_gpi_notify(struct notifier_block *this, unsigned long code,
326wdt_gpi_notify(struct notifier_block *this, unsigned long code, void *unused) 329 void *unused)
327{ 330{
328 if (code == SYS_DOWN || code == SYS_HALT) 331 if (code == SYS_DOWN || code == SYS_HALT)
329 wdt_gpi_stop(); 332 wdt_gpi_stop();
@@ -333,9 +336,8 @@ wdt_gpi_notify(struct notifier_block *this, unsigned long code, void *unused)
333 336
334 337
335/* Init & exit procedures */ 338/* Init & exit procedures */
336static const struct resource * 339static const struct resource *wdt_gpi_get_resource(struct platform_device *pdv,
337wdt_gpi_get_resource(struct platform_device *pdv, const char *name, 340 const char *name, unsigned int type)
338 unsigned int type)
339{ 341{
340 char buf[80]; 342 char buf[80];
341 if (snprintf(buf, sizeof buf, "%s_0", name) >= sizeof buf) 343 if (snprintf(buf, sizeof buf, "%s_0", name) >= sizeof buf)
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 98532c0e0689..3da2b90d2fe6 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -46,11 +46,10 @@
46#include <linux/platform_device.h> 46#include <linux/platform_device.h>
47#include <linux/interrupt.h> 47#include <linux/interrupt.h>
48#include <linux/clk.h> 48#include <linux/clk.h>
49#include <linux/uaccess.h>
50#include <linux/io.h>
49 51
50#include <asm/uaccess.h> 52#include <mach/map.h>
51#include <asm/io.h>
52
53#include <asm/arch/map.h>
54 53
55#undef S3C_VA_WATCHDOG 54#undef S3C_VA_WATCHDOG
56#define S3C_VA_WATCHDOG (0) 55#define S3C_VA_WATCHDOG (0)
@@ -65,8 +64,8 @@
65static int nowayout = WATCHDOG_NOWAYOUT; 64static int nowayout = WATCHDOG_NOWAYOUT;
66static int tmr_margin = CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME; 65static int tmr_margin = CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME;
67static int tmr_atboot = CONFIG_S3C2410_WATCHDOG_ATBOOT; 66static int tmr_atboot = CONFIG_S3C2410_WATCHDOG_ATBOOT;
68static int soft_noboot = 0; 67static int soft_noboot;
69static int debug = 0; 68static int debug;
70 69
71module_param(tmr_margin, int, 0); 70module_param(tmr_margin, int, 0);
72module_param(tmr_atboot, int, 0); 71module_param(tmr_atboot, int, 0);
@@ -74,24 +73,23 @@ module_param(nowayout, int, 0);
74module_param(soft_noboot, int, 0); 73module_param(soft_noboot, int, 0);
75module_param(debug, int, 0); 74module_param(debug, int, 0);
76 75
77MODULE_PARM_DESC(tmr_margin, "Watchdog tmr_margin in seconds. default=" __MODULE_STRING(CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME) ")"); 76MODULE_PARM_DESC(tmr_margin, "Watchdog tmr_margin in seconds. default="
78 77 __MODULE_STRING(CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME) ")");
79MODULE_PARM_DESC(tmr_atboot, "Watchdog is started at boot time if set to 1, default=" __MODULE_STRING(CONFIG_S3C2410_WATCHDOG_ATBOOT)); 78MODULE_PARM_DESC(tmr_atboot,
80 79 "Watchdog is started at boot time if set to 1, default="
81MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 80 __MODULE_STRING(CONFIG_S3C2410_WATCHDOG_ATBOOT));
82 81MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
82 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
83MODULE_PARM_DESC(soft_noboot, "Watchdog action, set to 1 to ignore reboots, 0 to reboot (default depends on ONLY_TESTING)"); 83MODULE_PARM_DESC(soft_noboot, "Watchdog action, set to 1 to ignore reboots, 0 to reboot (default depends on ONLY_TESTING)");
84
85MODULE_PARM_DESC(debug, "Watchdog debug, set to >1 for debug, (default 0)"); 84MODULE_PARM_DESC(debug, "Watchdog debug, set to >1 for debug, (default 0)");
86 85
87 86
88typedef enum close_state { 87typedef enum close_state {
89 CLOSE_STATE_NOT, 88 CLOSE_STATE_NOT,
90 CLOSE_STATE_ALLOW=0x4021 89 CLOSE_STATE_ALLOW = 0x4021
91} close_state_t; 90} close_state_t;
92 91
93static DECLARE_MUTEX(open_lock); 92static unsigned long open_lock;
94
95static struct device *wdt_dev; /* platform device attached to */ 93static struct device *wdt_dev; /* platform device attached to */
96static struct resource *wdt_mem; 94static struct resource *wdt_mem;
97static struct resource *wdt_irq; 95static struct resource *wdt_irq;
@@ -99,38 +97,47 @@ static struct clk *wdt_clock;
99static void __iomem *wdt_base; 97static void __iomem *wdt_base;
100static unsigned int wdt_count; 98static unsigned int wdt_count;
101static close_state_t allow_close; 99static close_state_t allow_close;
100static DEFINE_SPINLOCK(wdt_lock);
102 101
103/* watchdog control routines */ 102/* watchdog control routines */
104 103
105#define DBG(msg...) do { \ 104#define DBG(msg...) do { \
106 if (debug) \ 105 if (debug) \
107 printk(KERN_INFO msg); \ 106 printk(KERN_INFO msg); \
108 } while(0) 107 } while (0)
109 108
110/* functions */ 109/* functions */
111 110
112static int s3c2410wdt_keepalive(void) 111static void s3c2410wdt_keepalive(void)
113{ 112{
113 spin_lock(&wdt_lock);
114 writel(wdt_count, wdt_base + S3C2410_WTCNT); 114 writel(wdt_count, wdt_base + S3C2410_WTCNT);
115 return 0; 115 spin_unlock(&wdt_lock);
116} 116}
117 117
118static int s3c2410wdt_stop(void) 118static void __s3c2410wdt_stop(void)
119{ 119{
120 unsigned long wtcon; 120 unsigned long wtcon;
121 121
122 wtcon = readl(wdt_base + S3C2410_WTCON); 122 wtcon = readl(wdt_base + S3C2410_WTCON);
123 wtcon &= ~(S3C2410_WTCON_ENABLE | S3C2410_WTCON_RSTEN); 123 wtcon &= ~(S3C2410_WTCON_ENABLE | S3C2410_WTCON_RSTEN);
124 writel(wtcon, wdt_base + S3C2410_WTCON); 124 writel(wtcon, wdt_base + S3C2410_WTCON);
125}
125 126
126 return 0; 127static void s3c2410wdt_stop(void)
128{
129 spin_lock(&wdt_lock);
130 __s3c2410wdt_stop();
131 spin_unlock(&wdt_lock);
127} 132}
128 133
129static int s3c2410wdt_start(void) 134static void s3c2410wdt_start(void)
130{ 135{
131 unsigned long wtcon; 136 unsigned long wtcon;
132 137
133 s3c2410wdt_stop(); 138 spin_lock(&wdt_lock);
139
140 __s3c2410wdt_stop();
134 141
135 wtcon = readl(wdt_base + S3C2410_WTCON); 142 wtcon = readl(wdt_base + S3C2410_WTCON);
136 wtcon |= S3C2410_WTCON_ENABLE | S3C2410_WTCON_DIV128; 143 wtcon |= S3C2410_WTCON_ENABLE | S3C2410_WTCON_DIV128;
@@ -149,6 +156,7 @@ static int s3c2410wdt_start(void)
149 writel(wdt_count, wdt_base + S3C2410_WTDAT); 156 writel(wdt_count, wdt_base + S3C2410_WTDAT);
150 writel(wdt_count, wdt_base + S3C2410_WTCNT); 157 writel(wdt_count, wdt_base + S3C2410_WTCNT);
151 writel(wtcon, wdt_base + S3C2410_WTCON); 158 writel(wtcon, wdt_base + S3C2410_WTCON);
159 spin_unlock(&wdt_lock);
152 160
153 return 0; 161 return 0;
154} 162}
@@ -211,7 +219,7 @@ static int s3c2410wdt_set_heartbeat(int timeout)
211 219
212static int s3c2410wdt_open(struct inode *inode, struct file *file) 220static int s3c2410wdt_open(struct inode *inode, struct file *file)
213{ 221{
214 if(down_trylock(&open_lock)) 222 if (test_and_set_bit(0, &open_lock))
215 return -EBUSY; 223 return -EBUSY;
216 224
217 if (nowayout) 225 if (nowayout)
@@ -231,15 +239,14 @@ static int s3c2410wdt_release(struct inode *inode, struct file *file)
231 * Lock it in if it's a module and we set nowayout 239 * Lock it in if it's a module and we set nowayout
232 */ 240 */
233 241
234 if (allow_close == CLOSE_STATE_ALLOW) { 242 if (allow_close == CLOSE_STATE_ALLOW)
235 s3c2410wdt_stop(); 243 s3c2410wdt_stop();
236 } else { 244 else {
237 dev_err(wdt_dev, "Unexpected close, not stopping watchdog\n"); 245 dev_err(wdt_dev, "Unexpected close, not stopping watchdog\n");
238 s3c2410wdt_keepalive(); 246 s3c2410wdt_keepalive();
239 } 247 }
240
241 allow_close = CLOSE_STATE_NOT; 248 allow_close = CLOSE_STATE_NOT;
242 up(&open_lock); 249 clear_bit(0, &open_lock);
243 return 0; 250 return 0;
244} 251}
245 252
@@ -249,7 +256,7 @@ static ssize_t s3c2410wdt_write(struct file *file, const char __user *data,
249 /* 256 /*
250 * Refresh the timer. 257 * Refresh the timer.
251 */ 258 */
252 if(len) { 259 if (len) {
253 if (!nowayout) { 260 if (!nowayout) {
254 size_t i; 261 size_t i;
255 262
@@ -265,7 +272,6 @@ static ssize_t s3c2410wdt_write(struct file *file, const char __user *data,
265 allow_close = CLOSE_STATE_ALLOW; 272 allow_close = CLOSE_STATE_ALLOW;
266 } 273 }
267 } 274 }
268
269 s3c2410wdt_keepalive(); 275 s3c2410wdt_keepalive();
270 } 276 }
271 return len; 277 return len;
@@ -273,48 +279,41 @@ static ssize_t s3c2410wdt_write(struct file *file, const char __user *data,
273 279
274#define OPTIONS WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE 280#define OPTIONS WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE
275 281
276static struct watchdog_info s3c2410_wdt_ident = { 282static const struct watchdog_info s3c2410_wdt_ident = {
277 .options = OPTIONS, 283 .options = OPTIONS,
278 .firmware_version = 0, 284 .firmware_version = 0,
279 .identity = "S3C2410 Watchdog", 285 .identity = "S3C2410 Watchdog",
280}; 286};
281 287
282 288
283static int s3c2410wdt_ioctl(struct inode *inode, struct file *file, 289static long s3c2410wdt_ioctl(struct file *file, unsigned int cmd,
284 unsigned int cmd, unsigned long arg) 290 unsigned long arg)
285{ 291{
286 void __user *argp = (void __user *)arg; 292 void __user *argp = (void __user *)arg;
287 int __user *p = argp; 293 int __user *p = argp;
288 int new_margin; 294 int new_margin;
289 295
290 switch (cmd) { 296 switch (cmd) {
291 default: 297 case WDIOC_GETSUPPORT:
292 return -ENOTTY; 298 return copy_to_user(argp, &s3c2410_wdt_ident,
293 299 sizeof(s3c2410_wdt_ident)) ? -EFAULT : 0;
294 case WDIOC_GETSUPPORT: 300 case WDIOC_GETSTATUS:
295 return copy_to_user(argp, &s3c2410_wdt_ident, 301 case WDIOC_GETBOOTSTATUS:
296 sizeof(s3c2410_wdt_ident)) ? -EFAULT : 0; 302 return put_user(0, p);
297 303 case WDIOC_KEEPALIVE:
298 case WDIOC_GETSTATUS: 304 s3c2410wdt_keepalive();
299 case WDIOC_GETBOOTSTATUS: 305 return 0;
300 return put_user(0, p); 306 case WDIOC_SETTIMEOUT:
301 307 if (get_user(new_margin, p))
302 case WDIOC_KEEPALIVE: 308 return -EFAULT;
303 s3c2410wdt_keepalive(); 309 if (s3c2410wdt_set_heartbeat(new_margin))
304 return 0; 310 return -EINVAL;
305 311 s3c2410wdt_keepalive();
306 case WDIOC_SETTIMEOUT: 312 return put_user(tmr_margin, p);
307 if (get_user(new_margin, p)) 313 case WDIOC_GETTIMEOUT:
308 return -EFAULT; 314 return put_user(tmr_margin, p);
309 315 default:
310 if (s3c2410wdt_set_heartbeat(new_margin)) 316 return -ENOTTY;
311 return -EINVAL;
312
313 s3c2410wdt_keepalive();
314 return put_user(tmr_margin, p);
315
316 case WDIOC_GETTIMEOUT:
317 return put_user(tmr_margin, p);
318 } 317 }
319} 318}
320 319
@@ -324,7 +323,7 @@ static const struct file_operations s3c2410wdt_fops = {
324 .owner = THIS_MODULE, 323 .owner = THIS_MODULE,
325 .llseek = no_llseek, 324 .llseek = no_llseek,
326 .write = s3c2410wdt_write, 325 .write = s3c2410wdt_write,
327 .ioctl = s3c2410wdt_ioctl, 326 .unlocked_ioctl = s3c2410wdt_ioctl,
328 .open = s3c2410wdt_open, 327 .open = s3c2410wdt_open,
329 .release = s3c2410wdt_release, 328 .release = s3c2410wdt_release,
330}; 329};
@@ -411,14 +410,15 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
411 * not, try the default value */ 410 * not, try the default value */
412 411
413 if (s3c2410wdt_set_heartbeat(tmr_margin)) { 412 if (s3c2410wdt_set_heartbeat(tmr_margin)) {
414 started = s3c2410wdt_set_heartbeat(CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME); 413 started = s3c2410wdt_set_heartbeat(
414 CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME);
415 415
416 if (started == 0) { 416 if (started == 0)
417 dev_info(dev,"tmr_margin value out of range, default %d used\n", 417 dev_info(dev,
418 "tmr_margin value out of range, default %d used\n",
418 CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME); 419 CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME);
419 } else { 420 else
420 dev_info(dev, "default timer value is out of range, cannot start\n"); 421 dev_info(dev, "default timer value is out of range, cannot start\n");
421 }
422 } 422 }
423 423
424 ret = misc_register(&s3c2410wdt_miscdev); 424 ret = misc_register(&s3c2410wdt_miscdev);
@@ -447,7 +447,7 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
447 (wtcon & S3C2410_WTCON_ENABLE) ? "" : "in", 447 (wtcon & S3C2410_WTCON_ENABLE) ? "" : "in",
448 (wtcon & S3C2410_WTCON_RSTEN) ? "" : "dis", 448 (wtcon & S3C2410_WTCON_RSTEN) ? "" : "dis",
449 (wtcon & S3C2410_WTCON_INTEN) ? "" : "en"); 449 (wtcon & S3C2410_WTCON_INTEN) ? "" : "en");
450 450
451 return 0; 451 return 0;
452 452
453 err_clk: 453 err_clk:
@@ -487,7 +487,7 @@ static int s3c2410wdt_remove(struct platform_device *dev)
487 487
488static void s3c2410wdt_shutdown(struct platform_device *dev) 488static void s3c2410wdt_shutdown(struct platform_device *dev)
489{ 489{
490 s3c2410wdt_stop(); 490 s3c2410wdt_stop();
491} 491}
492 492
493#ifdef CONFIG_PM 493#ifdef CONFIG_PM
@@ -540,7 +540,8 @@ static struct platform_driver s3c2410wdt_driver = {
540}; 540};
541 541
542 542
543static char banner[] __initdata = KERN_INFO "S3C2410 Watchdog Timer, (c) 2004 Simtec Electronics\n"; 543static char banner[] __initdata =
544 KERN_INFO "S3C2410 Watchdog Timer, (c) 2004 Simtec Electronics\n";
544 545
545static int __init watchdog_init(void) 546static int __init watchdog_init(void)
546{ 547{
diff --git a/drivers/watchdog/sa1100_wdt.c b/drivers/watchdog/sa1100_wdt.c
index 34a2b3b81800..31a48437dc3d 100644
--- a/drivers/watchdog/sa1100_wdt.c
+++ b/drivers/watchdog/sa1100_wdt.c
@@ -26,13 +26,14 @@
26#include <linux/watchdog.h> 26#include <linux/watchdog.h>
27#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/bitops.h> 28#include <linux/bitops.h>
29#include <linux/uaccess.h>
29 30
30#ifdef CONFIG_ARCH_PXA 31#ifdef CONFIG_ARCH_PXA
31#include <asm/arch/pxa-regs.h> 32#include <mach/pxa-regs.h>
32#endif 33#endif
33 34
34#include <asm/hardware.h> 35#include <mach/reset.h>
35#include <asm/uaccess.h> 36#include <mach/hardware.h>
36 37
37#define OSCR_FREQ CLOCK_TICK_RATE 38#define OSCR_FREQ CLOCK_TICK_RATE
38 39
@@ -45,7 +46,7 @@ static int boot_status;
45 */ 46 */
46static int sa1100dog_open(struct inode *inode, struct file *file) 47static int sa1100dog_open(struct inode *inode, struct file *file)
47{ 48{
48 if (test_and_set_bit(1,&sa1100wdt_users)) 49 if (test_and_set_bit(1, &sa1100wdt_users))
49 return -EBUSY; 50 return -EBUSY;
50 51
51 /* Activate SA1100 Watchdog timer */ 52 /* Activate SA1100 Watchdog timer */
@@ -66,28 +67,27 @@ static int sa1100dog_open(struct inode *inode, struct file *file)
66static int sa1100dog_release(struct inode *inode, struct file *file) 67static int sa1100dog_release(struct inode *inode, struct file *file)
67{ 68{
68 printk(KERN_CRIT "WATCHDOG: Device closed - timer will not stop\n"); 69 printk(KERN_CRIT "WATCHDOG: Device closed - timer will not stop\n");
69
70 clear_bit(1, &sa1100wdt_users); 70 clear_bit(1, &sa1100wdt_users);
71
72 return 0; 71 return 0;
73} 72}
74 73
75static ssize_t sa1100dog_write(struct file *file, const char __user *data, size_t len, loff_t *ppos) 74static ssize_t sa1100dog_write(struct file *file, const char __user *data,
75 size_t len, loff_t *ppos)
76{ 76{
77 if (len) 77 if (len)
78 /* Refresh OSMR3 timer. */ 78 /* Refresh OSMR3 timer. */
79 OSMR3 = OSCR + pre_margin; 79 OSMR3 = OSCR + pre_margin;
80
81 return len; 80 return len;
82} 81}
83 82
84static struct watchdog_info ident = { 83static const struct watchdog_info ident = {
85 .options = WDIOF_CARDRESET | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, 84 .options = WDIOF_CARDRESET | WDIOF_SETTIMEOUT
85 | WDIOF_KEEPALIVEPING,
86 .identity = "SA1100/PXA255 Watchdog", 86 .identity = "SA1100/PXA255 Watchdog",
87}; 87};
88 88
89static int sa1100dog_ioctl(struct inode *inode, struct file *file, 89static long sa1100dog_ioctl(struct file *file, unsigned int cmd,
90 unsigned int cmd, unsigned long arg) 90 unsigned long arg)
91{ 91{
92 int ret = -ENOTTY; 92 int ret = -ENOTTY;
93 int time; 93 int time;
@@ -108,6 +108,11 @@ static int sa1100dog_ioctl(struct inode *inode, struct file *file,
108 ret = put_user(boot_status, p); 108 ret = put_user(boot_status, p);
109 break; 109 break;
110 110
111 case WDIOC_KEEPALIVE:
112 OSMR3 = OSCR + pre_margin;
113 ret = 0;
114 break;
115
111 case WDIOC_SETTIMEOUT: 116 case WDIOC_SETTIMEOUT:
112 ret = get_user(time, p); 117 ret = get_user(time, p);
113 if (ret) 118 if (ret)
@@ -125,27 +130,20 @@ static int sa1100dog_ioctl(struct inode *inode, struct file *file,
125 case WDIOC_GETTIMEOUT: 130 case WDIOC_GETTIMEOUT:
126 ret = put_user(pre_margin / OSCR_FREQ, p); 131 ret = put_user(pre_margin / OSCR_FREQ, p);
127 break; 132 break;
128
129 case WDIOC_KEEPALIVE:
130 OSMR3 = OSCR + pre_margin;
131 ret = 0;
132 break;
133 } 133 }
134 return ret; 134 return ret;
135} 135}
136 136
137static const struct file_operations sa1100dog_fops = 137static const struct file_operations sa1100dog_fops = {
138{
139 .owner = THIS_MODULE, 138 .owner = THIS_MODULE,
140 .llseek = no_llseek, 139 .llseek = no_llseek,
141 .write = sa1100dog_write, 140 .write = sa1100dog_write,
142 .ioctl = sa1100dog_ioctl, 141 .unlocked_ioctl = sa1100dog_ioctl,
143 .open = sa1100dog_open, 142 .open = sa1100dog_open,
144 .release = sa1100dog_release, 143 .release = sa1100dog_release,
145}; 144};
146 145
147static struct miscdevice sa1100dog_miscdev = 146static struct miscdevice sa1100dog_miscdev = {
148{
149 .minor = WATCHDOG_MINOR, 147 .minor = WATCHDOG_MINOR,
150 .name = "watchdog", 148 .name = "watchdog",
151 .fops = &sa1100dog_fops, 149 .fops = &sa1100dog_fops,
@@ -162,13 +160,15 @@ static int __init sa1100dog_init(void)
162 * we suspend, RCSR will be cleared, and the watchdog 160 * we suspend, RCSR will be cleared, and the watchdog
163 * reset reason will be lost. 161 * reset reason will be lost.
164 */ 162 */
165 boot_status = (RCSR & RCSR_WDR) ? WDIOF_CARDRESET : 0; 163 boot_status = (reset_status & RESET_STATUS_WATCHDOG) ?
164 WDIOF_CARDRESET : 0;
166 pre_margin = OSCR_FREQ * margin; 165 pre_margin = OSCR_FREQ * margin;
167 166
168 ret = misc_register(&sa1100dog_miscdev); 167 ret = misc_register(&sa1100dog_miscdev);
169 if (ret == 0) 168 if (ret == 0)
170 printk("SA1100/PXA2xx Watchdog Timer: timer margin %d sec\n", 169 printk(KERN_INFO
171 margin); 170 "SA1100/PXA2xx Watchdog Timer: timer margin %d sec\n",
171 margin);
172 return ret; 172 return ret;
173} 173}
174 174
diff --git a/drivers/watchdog/sb_wdog.c b/drivers/watchdog/sb_wdog.c
index b94431433695..27e526a07c9a 100644
--- a/drivers/watchdog/sb_wdog.c
+++ b/drivers/watchdog/sb_wdog.c
@@ -57,6 +57,7 @@
57#include <asm/sibyte/sb1250_int.h> 57#include <asm/sibyte/sb1250_int.h>
58#include <asm/sibyte/sb1250_scd.h> 58#include <asm/sibyte/sb1250_scd.h>
59 59
60static DEFINE_SPINLOCK(sbwd_lock);
60 61
61/* 62/*
62 * set the initial count value of a timer 63 * set the initial count value of a timer
@@ -65,8 +66,10 @@
65 */ 66 */
66void sbwdog_set(char __iomem *wdog, unsigned long t) 67void sbwdog_set(char __iomem *wdog, unsigned long t)
67{ 68{
69 spin_lock(&sbwd_lock);
68 __raw_writeb(0, wdog - 0x10); 70 __raw_writeb(0, wdog - 0x10);
69 __raw_writeq(t & 0x7fffffUL, wdog); 71 __raw_writeq(t & 0x7fffffUL, wdog);
72 spin_unlock(&sbwd_lock);
70} 73}
71 74
72/* 75/*
@@ -77,7 +80,9 @@ void sbwdog_set(char __iomem *wdog, unsigned long t)
77 */ 80 */
78void sbwdog_pet(char __iomem *wdog) 81void sbwdog_pet(char __iomem *wdog)
79{ 82{
83 spin_lock(&sbwd_lock);
80 __raw_writeb(__raw_readb(wdog) | 1, wdog); 84 __raw_writeb(__raw_readb(wdog) | 1, wdog);
85 spin_unlock(&sbwd_lock);
81} 86}
82 87
83static unsigned long sbwdog_gate; /* keeps it to one thread only */ 88static unsigned long sbwdog_gate; /* keeps it to one thread only */
@@ -86,8 +91,9 @@ static char __iomem *user_dog = (char __iomem *)(IO_BASE + (A_SCD_WDOG_CFG_1));
86static unsigned long timeout = 0x7fffffUL; /* useconds: 8.3ish secs. */ 91static unsigned long timeout = 0x7fffffUL; /* useconds: 8.3ish secs. */
87static int expect_close; 92static int expect_close;
88 93
89static struct watchdog_info ident = { 94static const struct watchdog_info ident = {
90 .options = WDIOF_CARDRESET | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, 95 .options = WDIOF_CARDRESET | WDIOF_SETTIMEOUT |
96 WDIOF_KEEPALIVEPING,
91 .identity = "SiByte Watchdog", 97 .identity = "SiByte Watchdog",
92}; 98};
93 99
@@ -97,9 +103,8 @@ static struct watchdog_info ident = {
97static int sbwdog_open(struct inode *inode, struct file *file) 103static int sbwdog_open(struct inode *inode, struct file *file)
98{ 104{
99 nonseekable_open(inode, file); 105 nonseekable_open(inode, file);
100 if (test_and_set_bit(0, &sbwdog_gate)) { 106 if (test_and_set_bit(0, &sbwdog_gate))
101 return -EBUSY; 107 return -EBUSY;
102 }
103 __module_get(THIS_MODULE); 108 __module_get(THIS_MODULE);
104 109
105 /* 110 /*
@@ -120,8 +125,9 @@ static int sbwdog_release(struct inode *inode, struct file *file)
120 __raw_writeb(0, user_dog); 125 __raw_writeb(0, user_dog);
121 module_put(THIS_MODULE); 126 module_put(THIS_MODULE);
122 } else { 127 } else {
123 printk(KERN_CRIT "%s: Unexpected close, not stopping watchdog!\n", 128 printk(KERN_CRIT
124 ident.identity); 129 "%s: Unexpected close, not stopping watchdog!\n",
130 ident.identity);
125 sbwdog_pet(user_dog); 131 sbwdog_pet(user_dog);
126 } 132 }
127 clear_bit(0, &sbwdog_gate); 133 clear_bit(0, &sbwdog_gate);
@@ -147,12 +153,10 @@ static ssize_t sbwdog_write(struct file *file, const char __user *data,
147 for (i = 0; i != len; i++) { 153 for (i = 0; i != len; i++) {
148 char c; 154 char c;
149 155
150 if (get_user(c, data + i)) { 156 if (get_user(c, data + i))
151 return -EFAULT; 157 return -EFAULT;
152 } 158 if (c == 'V')
153 if (c == 'V') {
154 expect_close = 42; 159 expect_close = 42;
155 }
156 } 160 }
157 sbwdog_pet(user_dog); 161 sbwdog_pet(user_dog);
158 } 162 }
@@ -160,8 +164,8 @@ static ssize_t sbwdog_write(struct file *file, const char __user *data,
160 return len; 164 return len;
161} 165}
162 166
163static int sbwdog_ioctl(struct inode *inode, struct file *file, 167static long sbwdog_ioctl(struct file *file, unsigned int cmd,
164 unsigned int cmd, unsigned long arg) 168 unsigned long arg)
165{ 169{
166 int ret = -ENOTTY; 170 int ret = -ENOTTY;
167 unsigned long time; 171 unsigned long time;
@@ -178,11 +182,15 @@ static int sbwdog_ioctl(struct inode *inode, struct file *file,
178 ret = put_user(0, p); 182 ret = put_user(0, p);
179 break; 183 break;
180 184
185 case WDIOC_KEEPALIVE:
186 sbwdog_pet(user_dog);
187 ret = 0;
188 break;
189
181 case WDIOC_SETTIMEOUT: 190 case WDIOC_SETTIMEOUT:
182 ret = get_user(time, p); 191 ret = get_user(time, p);
183 if (ret) { 192 if (ret)
184 break; 193 break;
185 }
186 194
187 time *= 1000000; 195 time *= 1000000;
188 if (time > 0x7fffffUL) { 196 if (time > 0x7fffffUL) {
@@ -200,11 +208,6 @@ static int sbwdog_ioctl(struct inode *inode, struct file *file,
200 */ 208 */
201 ret = put_user(__raw_readq(user_dog - 8) / 1000000, p); 209 ret = put_user(__raw_readq(user_dog - 8) / 1000000, p);
202 break; 210 break;
203
204 case WDIOC_KEEPALIVE:
205 sbwdog_pet(user_dog);
206 ret = 0;
207 break;
208 } 211 }
209 return ret; 212 return ret;
210} 213}
@@ -212,8 +215,8 @@ static int sbwdog_ioctl(struct inode *inode, struct file *file,
212/* 215/*
213 * Notifier for system down 216 * Notifier for system down
214 */ 217 */
215static int 218static int sbwdog_notify_sys(struct notifier_block *this, unsigned long code,
216sbwdog_notify_sys(struct notifier_block *this, unsigned long code, void *erf) 219 void *erf)
217{ 220{
218 if (code == SYS_DOWN || code == SYS_HALT) { 221 if (code == SYS_DOWN || code == SYS_HALT) {
219 /* 222 /*
@@ -226,18 +229,16 @@ sbwdog_notify_sys(struct notifier_block *this, unsigned long code, void *erf)
226 return NOTIFY_DONE; 229 return NOTIFY_DONE;
227} 230}
228 231
229static const struct file_operations sbwdog_fops = 232static const struct file_operations sbwdog_fops = {
230{
231 .owner = THIS_MODULE, 233 .owner = THIS_MODULE,
232 .llseek = no_llseek, 234 .llseek = no_llseek,
233 .write = sbwdog_write, 235 .write = sbwdog_write,
234 .ioctl = sbwdog_ioctl, 236 .unlocked_ioctl = sbwdog_ioctl,
235 .open = sbwdog_open, 237 .open = sbwdog_open,
236 .release = sbwdog_release, 238 .release = sbwdog_release,
237}; 239};
238 240
239static struct miscdevice sbwdog_miscdev = 241static struct miscdevice sbwdog_miscdev = {
240{
241 .minor = WATCHDOG_MINOR, 242 .minor = WATCHDOG_MINOR,
242 .name = "watchdog", 243 .name = "watchdog",
243 .fops = &sbwdog_fops, 244 .fops = &sbwdog_fops,
@@ -267,13 +268,12 @@ irqreturn_t sbwdog_interrupt(int irq, void *addr)
267 /* 268 /*
268 * if it's the second watchdog timer, it's for those users 269 * if it's the second watchdog timer, it's for those users
269 */ 270 */
270 if (wd_cfg_reg == user_dog) { 271 if (wd_cfg_reg == user_dog)
271 printk(KERN_CRIT 272 printk(KERN_CRIT
272 "%s in danger of initiating system reset in %ld.%01ld seconds\n", 273 "%s in danger of initiating system reset in %ld.%01ld seconds\n",
273 ident.identity, wd_init / 1000000, (wd_init / 100000) % 10); 274 ident.identity, wd_init / 1000000, (wd_init / 100000) % 10);
274 } else { 275 else
275 cfg |= 1; 276 cfg |= 1;
276 }
277 277
278 __raw_writeb(cfg, wd_cfg_reg); 278 __raw_writeb(cfg, wd_cfg_reg);
279 279
@@ -289,28 +289,31 @@ static int __init sbwdog_init(void)
289 */ 289 */
290 ret = register_reboot_notifier(&sbwdog_notifier); 290 ret = register_reboot_notifier(&sbwdog_notifier);
291 if (ret) { 291 if (ret) {
292 printk (KERN_ERR "%s: cannot register reboot notifier (err=%d)\n", 292 printk(KERN_ERR
293 ident.identity, ret); 293 "%s: cannot register reboot notifier (err=%d)\n",
294 ident.identity, ret);
294 return ret; 295 return ret;
295 } 296 }
296 297
297 /* 298 /*
298 * get the resources 299 * get the resources
299 */ 300 */
300 ret = misc_register(&sbwdog_miscdev);
301 if (ret == 0) {
302 printk(KERN_INFO "%s: timeout is %ld.%ld secs\n", ident.identity,
303 timeout / 1000000, (timeout / 100000) % 10);
304 }
305 301
306 ret = request_irq(1, sbwdog_interrupt, IRQF_DISABLED | IRQF_SHARED, 302 ret = request_irq(1, sbwdog_interrupt, IRQF_DISABLED | IRQF_SHARED,
307 ident.identity, (void *)user_dog); 303 ident.identity, (void *)user_dog);
308 if (ret) { 304 if (ret) {
309 printk(KERN_ERR "%s: failed to request irq 1 - %d\n", ident.identity, 305 printk(KERN_ERR "%s: failed to request irq 1 - %d\n",
310 ret); 306 ident.identity, ret);
311 misc_deregister(&sbwdog_miscdev); 307 return ret;
312 } 308 }
313 309
310 ret = misc_register(&sbwdog_miscdev);
311 if (ret == 0) {
312 printk(KERN_INFO "%s: timeout is %ld.%ld secs\n",
313 ident.identity,
314 timeout / 1000000, (timeout / 100000) % 10);
315 } else
316 free_irq(1, (void *)user_dog);
314 return ret; 317 return ret;
315} 318}
316 319
@@ -327,7 +330,7 @@ MODULE_DESCRIPTION("SiByte Watchdog");
327 330
328module_param(timeout, ulong, 0); 331module_param(timeout, ulong, 0);
329MODULE_PARM_DESC(timeout, 332MODULE_PARM_DESC(timeout,
330 "Watchdog timeout in microseconds (max/default 8388607 or 8.3ish secs)"); 333 "Watchdog timeout in microseconds (max/default 8388607 or 8.3ish secs)");
331 334
332MODULE_LICENSE("GPL"); 335MODULE_LICENSE("GPL");
333MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); 336MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
@@ -336,16 +339,15 @@ MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
336 * example code that can be put in a platform code area to utilize the 339 * example code that can be put in a platform code area to utilize the
337 * first watchdog timer for the kernels own purpose. 340 * first watchdog timer for the kernels own purpose.
338 341
339 void 342void platform_wd_setup(void)
340platform_wd_setup(void)
341{ 343{
342 int ret; 344 int ret;
343 345
344 ret = request_irq(0, sbwdog_interrupt, IRQF_DISABLED | IRQF_SHARED, 346 ret = request_irq(1, sbwdog_interrupt, IRQF_DISABLED | IRQF_SHARED,
345 "Kernel Watchdog", IOADDR(A_SCD_WDOG_CFG_0)); 347 "Kernel Watchdog", IOADDR(A_SCD_WDOG_CFG_0));
346 if (ret) { 348 if (ret) {
347 printk(KERN_CRIT "Watchdog IRQ zero(0) failed to be requested - %d\n", 349 printk(KERN_CRIT
348 ret); 350 "Watchdog IRQ zero(0) failed to be requested - %d\n", ret);
349 } 351 }
350} 352}
351 353
diff --git a/drivers/watchdog/sbc60xxwdt.c b/drivers/watchdog/sbc60xxwdt.c
index ef76f01625e7..3266daaaecf8 100644
--- a/drivers/watchdog/sbc60xxwdt.c
+++ b/drivers/watchdog/sbc60xxwdt.c
@@ -16,19 +16,23 @@
16 * 16 *
17 * 12/4 - 2000 [Initial revision] 17 * 12/4 - 2000 [Initial revision]
18 * 25/4 - 2000 Added /dev/watchdog support 18 * 25/4 - 2000 Added /dev/watchdog support
19 * 09/5 - 2001 [smj@oro.net] fixed fop_write to "return 1" on success 19 * 09/5 - 2001 [smj@oro.net] fixed fop_write to "return 1"
20 * on success
20 * 12/4 - 2002 [rob@osinvestor.com] eliminate fop_read 21 * 12/4 - 2002 [rob@osinvestor.com] eliminate fop_read
21 * fix possible wdt_is_open race 22 * fix possible wdt_is_open race
22 * add CONFIG_WATCHDOG_NOWAYOUT support 23 * add CONFIG_WATCHDOG_NOWAYOUT support
23 * remove lock_kernel/unlock_kernel pairs 24 * remove lock_kernel/unlock_kernel pairs
24 * added KERN_* to printk's 25 * added KERN_* to printk's
25 * got rid of extraneous comments 26 * got rid of extraneous comments
26 * changed watchdog_info to correctly reflect what the driver offers 27 * changed watchdog_info to correctly reflect what
27 * added WDIOC_GETSTATUS, WDIOC_GETBOOTSTATUS, WDIOC_SETTIMEOUT, 28 * the driver offers
28 * WDIOC_GETTIMEOUT, and WDIOC_SETOPTIONS ioctls 29 * added WDIOC_GETSTATUS, WDIOC_GETBOOTSTATUS,
30 * WDIOC_SETTIMEOUT, WDIOC_GETTIMEOUT, and
31 * WDIOC_SETOPTIONS ioctls
29 * 09/8 - 2003 [wim@iguana.be] cleanup of trailing spaces 32 * 09/8 - 2003 [wim@iguana.be] cleanup of trailing spaces
30 * use module_param 33 * use module_param
31 * made timeout (the emulated heartbeat) a module_param 34 * made timeout (the emulated heartbeat) a
35 * module_param
32 * made the keepalive ping an internal subroutine 36 * made the keepalive ping an internal subroutine
33 * made wdt_stop and wdt_start module params 37 * made wdt_stop and wdt_start module params
34 * added extra printk's for startup problems 38 * added extra printk's for startup problems
@@ -56,9 +60,9 @@
56#include <linux/notifier.h> 60#include <linux/notifier.h>
57#include <linux/reboot.h> 61#include <linux/reboot.h>
58#include <linux/init.h> 62#include <linux/init.h>
63#include <linux/io.h>
64#include <linux/uaccess.h>
59 65
60#include <asm/io.h>
61#include <asm/uaccess.h>
62#include <asm/system.h> 66#include <asm/system.h>
63 67
64#define OUR_NAME "sbc60xxwdt" 68#define OUR_NAME "sbc60xxwdt"
@@ -94,13 +98,18 @@ MODULE_PARM_DESC(wdt_start, "SBC60xx WDT 'start' io port (default 0x443)");
94 */ 98 */
95 99
96#define WATCHDOG_TIMEOUT 30 /* 30 sec default timeout */ 100#define WATCHDOG_TIMEOUT 30 /* 30 sec default timeout */
97static int timeout = WATCHDOG_TIMEOUT; /* in seconds, will be multiplied by HZ to get seconds to wait for a ping */ 101static int timeout = WATCHDOG_TIMEOUT; /* in seconds, multiplied by HZ to
102 get seconds to wait for a ping */
98module_param(timeout, int, 0); 103module_param(timeout, int, 0);
99MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds. (1<=timeout<=3600, default=" __MODULE_STRING(WATCHDOG_TIMEOUT) ")"); 104MODULE_PARM_DESC(timeout,
105 "Watchdog timeout in seconds. (1<=timeout<=3600, default="
106 __MODULE_STRING(WATCHDOG_TIMEOUT) ")");
100 107
101static int nowayout = WATCHDOG_NOWAYOUT; 108static int nowayout = WATCHDOG_NOWAYOUT;
102module_param(nowayout, int, 0); 109module_param(nowayout, int, 0);
103MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 110MODULE_PARM_DESC(nowayout,
111 "Watchdog cannot be stopped once started (default="
112 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
104 113
105static void wdt_timer_ping(unsigned long); 114static void wdt_timer_ping(unsigned long);
106static DEFINE_TIMER(timer, wdt_timer_ping, 0, 0); 115static DEFINE_TIMER(timer, wdt_timer_ping, 0, 0);
@@ -117,15 +126,14 @@ static void wdt_timer_ping(unsigned long data)
117 /* If we got a heartbeat pulse within the WDT_US_INTERVAL 126 /* If we got a heartbeat pulse within the WDT_US_INTERVAL
118 * we agree to ping the WDT 127 * we agree to ping the WDT
119 */ 128 */
120 if(time_before(jiffies, next_heartbeat)) 129 if (time_before(jiffies, next_heartbeat)) {
121 {
122 /* Ping the WDT by reading from wdt_start */ 130 /* Ping the WDT by reading from wdt_start */
123 inb_p(wdt_start); 131 inb_p(wdt_start);
124 /* Re-set the timer interval */ 132 /* Re-set the timer interval */
125 mod_timer(&timer, jiffies + WDT_INTERVAL); 133 mod_timer(&timer, jiffies + WDT_INTERVAL);
126 } else { 134 } else
127 printk(KERN_WARNING PFX "Heartbeat lost! Will not ping the watchdog\n"); 135 printk(KERN_WARNING PFX
128 } 136 "Heartbeat lost! Will not ping the watchdog\n");
129} 137}
130 138
131/* 139/*
@@ -159,40 +167,40 @@ static void wdt_keepalive(void)
159 * /dev/watchdog handling 167 * /dev/watchdog handling
160 */ 168 */
161 169
162static ssize_t fop_write(struct file * file, const char __user * buf, size_t count, loff_t * ppos) 170static ssize_t fop_write(struct file *file, const char __user *buf,
171 size_t count, loff_t *ppos)
163{ 172{
164 /* See if we got the magic character 'V' and reload the timer */ 173 /* See if we got the magic character 'V' and reload the timer */
165 if(count) 174 if (count) {
166 { 175 if (!nowayout) {
167 if (!nowayout)
168 {
169 size_t ofs; 176 size_t ofs;
170 177
171 /* note: just in case someone wrote the magic character 178 /* note: just in case someone wrote the
172 * five months ago... */ 179 magic character five months ago... */
173 wdt_expect_close = 0; 180 wdt_expect_close = 0;
174 181
175 /* scan to see whether or not we got the magic character */ 182 /* scan to see whether or not we got the
176 for(ofs = 0; ofs != count; ofs++) 183 magic character */
177 { 184 for (ofs = 0; ofs != count; ofs++) {
178 char c; 185 char c;
179 if(get_user(c, buf+ofs)) 186 if (get_user(c, buf + ofs))
180 return -EFAULT; 187 return -EFAULT;
181 if(c == 'V') 188 if (c == 'V')
182 wdt_expect_close = 42; 189 wdt_expect_close = 42;
183 } 190 }
184 } 191 }
185 192
186 /* Well, anyhow someone wrote to us, we should return that favour */ 193 /* Well, anyhow someone wrote to us, we should
194 return that favour */
187 wdt_keepalive(); 195 wdt_keepalive();
188 } 196 }
189 return count; 197 return count;
190} 198}
191 199
192static int fop_open(struct inode * inode, struct file * file) 200static int fop_open(struct inode *inode, struct file *file)
193{ 201{
194 /* Just in case we're already talking to someone... */ 202 /* Just in case we're already talking to someone... */
195 if(test_and_set_bit(0, &wdt_is_open)) 203 if (test_and_set_bit(0, &wdt_is_open))
196 return -EBUSY; 204 return -EBUSY;
197 205
198 if (nowayout) 206 if (nowayout)
@@ -203,78 +211,72 @@ static int fop_open(struct inode * inode, struct file * file)
203 return nonseekable_open(inode, file); 211 return nonseekable_open(inode, file);
204} 212}
205 213
206static int fop_close(struct inode * inode, struct file * file) 214static int fop_close(struct inode *inode, struct file *file)
207{ 215{
208 if(wdt_expect_close == 42) 216 if (wdt_expect_close == 42)
209 wdt_turnoff(); 217 wdt_turnoff();
210 else { 218 else {
211 del_timer(&timer); 219 del_timer(&timer);
212 printk(KERN_CRIT PFX "device file closed unexpectedly. Will not stop the WDT!\n"); 220 printk(KERN_CRIT PFX
221 "device file closed unexpectedly. Will not stop the WDT!\n");
213 } 222 }
214 clear_bit(0, &wdt_is_open); 223 clear_bit(0, &wdt_is_open);
215 wdt_expect_close = 0; 224 wdt_expect_close = 0;
216 return 0; 225 return 0;
217} 226}
218 227
219static int fop_ioctl(struct inode *inode, struct file *file, unsigned int cmd, 228static long fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
220 unsigned long arg)
221{ 229{
222 void __user *argp = (void __user *)arg; 230 void __user *argp = (void __user *)arg;
223 int __user *p = argp; 231 int __user *p = argp;
224 static struct watchdog_info ident= 232 static const struct watchdog_info ident = {
225 { 233 .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT |
226 .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE, 234 WDIOF_MAGICCLOSE,
227 .firmware_version = 1, 235 .firmware_version = 1,
228 .identity = "SBC60xx", 236 .identity = "SBC60xx",
229 }; 237 };
230 238
231 switch(cmd) 239 switch (cmd) {
240 case WDIOC_GETSUPPORT:
241 return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
242 case WDIOC_GETSTATUS:
243 case WDIOC_GETBOOTSTATUS:
244 return put_user(0, p);
245 case WDIOC_SETOPTIONS:
232 { 246 {
233 default: 247 int new_options, retval = -EINVAL;
234 return -ENOTTY; 248 if (get_user(new_options, p))
235 case WDIOC_GETSUPPORT: 249 return -EFAULT;
236 return copy_to_user(argp, &ident, sizeof(ident))?-EFAULT:0; 250 if (new_options & WDIOS_DISABLECARD) {
237 case WDIOC_GETSTATUS: 251 wdt_turnoff();
238 case WDIOC_GETBOOTSTATUS: 252 retval = 0;
239 return put_user(0, p);
240 case WDIOC_KEEPALIVE:
241 wdt_keepalive();
242 return 0;
243 case WDIOC_SETOPTIONS:
244 {
245 int new_options, retval = -EINVAL;
246
247 if(get_user(new_options, p))
248 return -EFAULT;
249
250 if(new_options & WDIOS_DISABLECARD) {
251 wdt_turnoff();
252 retval = 0;
253 }
254
255 if(new_options & WDIOS_ENABLECARD) {
256 wdt_startup();
257 retval = 0;
258 }
259
260 return retval;
261 } 253 }
262 case WDIOC_SETTIMEOUT: 254 if (new_options & WDIOS_ENABLECARD) {
263 { 255 wdt_startup();
264 int new_timeout; 256 retval = 0;
265
266 if(get_user(new_timeout, p))
267 return -EFAULT;
268
269 if(new_timeout < 1 || new_timeout > 3600) /* arbitrary upper limit */
270 return -EINVAL;
271
272 timeout = new_timeout;
273 wdt_keepalive();
274 /* Fall through */
275 } 257 }
276 case WDIOC_GETTIMEOUT: 258 return retval;
277 return put_user(timeout, p); 259 }
260 case WDIOC_KEEPALIVE:
261 wdt_keepalive();
262 return 0;
263 case WDIOC_SETTIMEOUT:
264 {
265 int new_timeout;
266 if (get_user(new_timeout, p))
267 return -EFAULT;
268 /* arbitrary upper limit */
269 if (new_timeout < 1 || new_timeout > 3600)
270 return -EINVAL;
271
272 timeout = new_timeout;
273 wdt_keepalive();
274 /* Fall through */
275 }
276 case WDIOC_GETTIMEOUT:
277 return put_user(timeout, p);
278 default:
279 return -ENOTTY;
278 } 280 }
279} 281}
280 282
@@ -284,7 +286,7 @@ static const struct file_operations wdt_fops = {
284 .write = fop_write, 286 .write = fop_write,
285 .open = fop_open, 287 .open = fop_open,
286 .release = fop_close, 288 .release = fop_close,
287 .ioctl = fop_ioctl, 289 .unlocked_ioctl = fop_ioctl,
288}; 290};
289 291
290static struct miscdevice wdt_miscdev = { 292static struct miscdevice wdt_miscdev = {
@@ -300,7 +302,7 @@ static struct miscdevice wdt_miscdev = {
300static int wdt_notify_sys(struct notifier_block *this, unsigned long code, 302static int wdt_notify_sys(struct notifier_block *this, unsigned long code,
301 void *unused) 303 void *unused)
302{ 304{
303 if(code==SYS_DOWN || code==SYS_HALT) 305 if (code == SYS_DOWN || code == SYS_HALT)
304 wdt_turnoff(); 306 wdt_turnoff();
305 return NOTIFY_DONE; 307 return NOTIFY_DONE;
306} 308}
@@ -310,8 +312,7 @@ static int wdt_notify_sys(struct notifier_block *this, unsigned long code,
310 * turn the timebomb registers off. 312 * turn the timebomb registers off.
311 */ 313 */
312 314
313static struct notifier_block wdt_notifier= 315static struct notifier_block wdt_notifier = {
314{
315 .notifier_call = wdt_notify_sys, 316 .notifier_call = wdt_notify_sys,
316}; 317};
317 318
@@ -324,23 +325,22 @@ static void __exit sbc60xxwdt_unload(void)
324 325
325 unregister_reboot_notifier(&wdt_notifier); 326 unregister_reboot_notifier(&wdt_notifier);
326 if ((wdt_stop != 0x45) && (wdt_stop != wdt_start)) 327 if ((wdt_stop != 0x45) && (wdt_stop != wdt_start))
327 release_region(wdt_stop,1); 328 release_region(wdt_stop, 1);
328 release_region(wdt_start,1); 329 release_region(wdt_start, 1);
329} 330}
330 331
331static int __init sbc60xxwdt_init(void) 332static int __init sbc60xxwdt_init(void)
332{ 333{
333 int rc = -EBUSY; 334 int rc = -EBUSY;
334 335
335 if(timeout < 1 || timeout > 3600) /* arbitrary upper limit */ 336 if (timeout < 1 || timeout > 3600) { /* arbitrary upper limit */
336 {
337 timeout = WATCHDOG_TIMEOUT; 337 timeout = WATCHDOG_TIMEOUT;
338 printk(KERN_INFO PFX "timeout value must be 1<=x<=3600, using %d\n", 338 printk(KERN_INFO PFX
339 timeout); 339 "timeout value must be 1 <= x <= 3600, using %d\n",
340 } 340 timeout);
341 }
341 342
342 if (!request_region(wdt_start, 1, "SBC 60XX WDT")) 343 if (!request_region(wdt_start, 1, "SBC 60XX WDT")) {
343 {
344 printk(KERN_ERR PFX "I/O address 0x%04x already in use\n", 344 printk(KERN_ERR PFX "I/O address 0x%04x already in use\n",
345 wdt_start); 345 wdt_start);
346 rc = -EIO; 346 rc = -EIO;
@@ -348,33 +348,30 @@ static int __init sbc60xxwdt_init(void)
348 } 348 }
349 349
350 /* We cannot reserve 0x45 - the kernel already has! */ 350 /* We cannot reserve 0x45 - the kernel already has! */
351 if ((wdt_stop != 0x45) && (wdt_stop != wdt_start)) 351 if (wdt_stop != 0x45 && wdt_stop != wdt_start) {
352 { 352 if (!request_region(wdt_stop, 1, "SBC 60XX WDT")) {
353 if (!request_region(wdt_stop, 1, "SBC 60XX WDT")) 353 printk(KERN_ERR PFX
354 { 354 "I/O address 0x%04x already in use\n",
355 printk(KERN_ERR PFX "I/O address 0x%04x already in use\n", 355 wdt_stop);
356 wdt_stop);
357 rc = -EIO; 356 rc = -EIO;
358 goto err_out_region1; 357 goto err_out_region1;
359 } 358 }
360 } 359 }
361 360
362 rc = register_reboot_notifier(&wdt_notifier); 361 rc = register_reboot_notifier(&wdt_notifier);
363 if (rc) 362 if (rc) {
364 { 363 printk(KERN_ERR PFX
365 printk(KERN_ERR PFX "cannot register reboot notifier (err=%d)\n", 364 "cannot register reboot notifier (err=%d)\n", rc);
366 rc);
367 goto err_out_region2; 365 goto err_out_region2;
368 } 366 }
369 367
370 rc = misc_register(&wdt_miscdev); 368 rc = misc_register(&wdt_miscdev);
371 if (rc) 369 if (rc) {
372 { 370 printk(KERN_ERR PFX
373 printk(KERN_ERR PFX "cannot register miscdev on minor=%d (err=%d)\n", 371 "cannot register miscdev on minor=%d (err=%d)\n",
374 wdt_miscdev.minor, rc); 372 wdt_miscdev.minor, rc);
375 goto err_out_reboot; 373 goto err_out_reboot;
376 } 374 }
377
378 printk(KERN_INFO PFX "WDT driver for 60XX single board computer initialised. timeout=%d sec (nowayout=%d)\n", 375 printk(KERN_INFO PFX "WDT driver for 60XX single board computer initialised. timeout=%d sec (nowayout=%d)\n",
379 timeout, nowayout); 376 timeout, nowayout);
380 377
@@ -383,10 +380,10 @@ static int __init sbc60xxwdt_init(void)
383err_out_reboot: 380err_out_reboot:
384 unregister_reboot_notifier(&wdt_notifier); 381 unregister_reboot_notifier(&wdt_notifier);
385err_out_region2: 382err_out_region2:
386 if ((wdt_stop != 0x45) && (wdt_stop != wdt_start)) 383 if (wdt_stop != 0x45 && wdt_stop != wdt_start)
387 release_region(wdt_stop,1); 384 release_region(wdt_stop, 1);
388err_out_region1: 385err_out_region1:
389 release_region(wdt_start,1); 386 release_region(wdt_start, 1);
390err_out: 387err_out:
391 return rc; 388 return rc;
392} 389}
diff --git a/drivers/watchdog/sbc7240_wdt.c b/drivers/watchdog/sbc7240_wdt.c
index 4c8cefbd8627..67ddeb1c830a 100644
--- a/drivers/watchdog/sbc7240_wdt.c
+++ b/drivers/watchdog/sbc7240_wdt.c
@@ -27,10 +27,10 @@
27#include <linux/reboot.h> 27#include <linux/reboot.h>
28#include <linux/types.h> 28#include <linux/types.h>
29#include <linux/watchdog.h> 29#include <linux/watchdog.h>
30#include <linux/io.h>
31#include <linux/uaccess.h>
30#include <asm/atomic.h> 32#include <asm/atomic.h>
31#include <asm/io.h>
32#include <asm/system.h> 33#include <asm/system.h>
33#include <asm/uaccess.h>
34 34
35#define SBC7240_PREFIX "sbc7240_wdt: " 35#define SBC7240_PREFIX "sbc7240_wdt: "
36 36
@@ -159,7 +159,7 @@ static int fop_close(struct inode *inode, struct file *file)
159 return 0; 159 return 0;
160} 160}
161 161
162static struct watchdog_info ident = { 162static const struct watchdog_info ident = {
163 .options = WDIOF_KEEPALIVEPING| 163 .options = WDIOF_KEEPALIVEPING|
164 WDIOF_SETTIMEOUT| 164 WDIOF_SETTIMEOUT|
165 WDIOF_MAGICCLOSE, 165 WDIOF_MAGICCLOSE,
@@ -168,50 +168,50 @@ static struct watchdog_info ident = {
168}; 168};
169 169
170 170
171static int fop_ioctl(struct inode *inode, struct file *file, unsigned int cmd, 171static long fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
172 unsigned long arg)
173{ 172{
174 switch (cmd) { 173 switch (cmd) {
175 case WDIOC_GETSUPPORT: 174 case WDIOC_GETSUPPORT:
176 return copy_to_user 175 return copy_to_user((void __user *)arg, &ident, sizeof(ident))
177 ((void __user *)arg, &ident, sizeof(ident)) 176 ? -EFAULT : 0;
178 ? -EFAULT : 0;
179 case WDIOC_GETSTATUS: 177 case WDIOC_GETSTATUS:
180 case WDIOC_GETBOOTSTATUS: 178 case WDIOC_GETBOOTSTATUS:
181 return put_user(0, (int __user *)arg); 179 return put_user(0, (int __user *)arg);
182 case WDIOC_KEEPALIVE: 180 case WDIOC_SETOPTIONS:
183 wdt_keepalive(); 181 {
184 return 0; 182 int options;
185 case WDIOC_SETOPTIONS:{ 183 int retval = -EINVAL;
186 int options;
187 int retval = -EINVAL;
188 184
189 if (get_user(options, (int __user *)arg)) 185 if (get_user(options, (int __user *)arg))
190 return -EFAULT; 186 return -EFAULT;
191 187
192 if (options & WDIOS_DISABLECARD) { 188 if (options & WDIOS_DISABLECARD) {
193 wdt_disable(); 189 wdt_disable();
194 retval = 0; 190 retval = 0;
195 } 191 }
196
197 if (options & WDIOS_ENABLECARD) {
198 wdt_enable();
199 retval = 0;
200 }
201 192
202 return retval; 193 if (options & WDIOS_ENABLECARD) {
194 wdt_enable();
195 retval = 0;
203 } 196 }
204 case WDIOC_SETTIMEOUT:{
205 int new_timeout;
206 197
207 if (get_user(new_timeout, (int __user *)arg)) 198 return retval;
208 return -EFAULT; 199 }
200 case WDIOC_KEEPALIVE:
201 wdt_keepalive();
202 return 0;
203 case WDIOC_SETTIMEOUT:
204 {
205 int new_timeout;
209 206
210 if (wdt_set_timeout(new_timeout)) 207 if (get_user(new_timeout, (int __user *)arg))
211 return -EINVAL; 208 return -EFAULT;
212 209
213 /* Fall through */ 210 if (wdt_set_timeout(new_timeout))
214 } 211 return -EINVAL;
212
213 /* Fall through */
214 }
215 case WDIOC_GETTIMEOUT: 215 case WDIOC_GETTIMEOUT:
216 return put_user(timeout, (int __user *)arg); 216 return put_user(timeout, (int __user *)arg);
217 default: 217 default:
@@ -225,7 +225,7 @@ static const struct file_operations wdt_fops = {
225 .write = fop_write, 225 .write = fop_write,
226 .open = fop_open, 226 .open = fop_open,
227 .release = fop_close, 227 .release = fop_close,
228 .ioctl = fop_ioctl, 228 .unlocked_ioctl = fop_ioctl,
229}; 229};
230 230
231static struct miscdevice wdt_miscdev = { 231static struct miscdevice wdt_miscdev = {
diff --git a/drivers/watchdog/sbc8360.c b/drivers/watchdog/sbc8360.c
index 2ee2677f3648..fd83dd052d8c 100644
--- a/drivers/watchdog/sbc8360.c
+++ b/drivers/watchdog/sbc8360.c
@@ -48,13 +48,12 @@
48#include <linux/init.h> 48#include <linux/init.h>
49#include <linux/spinlock.h> 49#include <linux/spinlock.h>
50#include <linux/moduleparam.h> 50#include <linux/moduleparam.h>
51#include <linux/io.h>
52#include <linux/uaccess.h>
51 53
52#include <asm/io.h>
53#include <asm/uaccess.h>
54#include <asm/system.h> 54#include <asm/system.h>
55 55
56static unsigned long sbc8360_is_open; 56static unsigned long sbc8360_is_open;
57static DEFINE_SPINLOCK(sbc8360_lock);
58static char expect_close; 57static char expect_close;
59 58
60#define PFX "sbc8360: " 59#define PFX "sbc8360: "
@@ -204,7 +203,8 @@ module_param(timeout, int, 0);
204MODULE_PARM_DESC(timeout, "Index into timeout table (0-63) (default=27 (60s))"); 203MODULE_PARM_DESC(timeout, "Index into timeout table (0-63) (default=27 (60s))");
205module_param(nowayout, int, 0); 204module_param(nowayout, int, 0);
206MODULE_PARM_DESC(nowayout, 205MODULE_PARM_DESC(nowayout,
207 "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 206 "Watchdog cannot be stopped once started (default="
207 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
208 208
209/* 209/*
210 * Kernel methods. 210 * Kernel methods.
@@ -231,9 +231,16 @@ static void sbc8360_ping(void)
231 outb(wd_margin, SBC8360_BASETIME); 231 outb(wd_margin, SBC8360_BASETIME);
232} 232}
233 233
234/* stop watchdog */
235static void sbc8360_stop(void)
236{
237 /* De-activate the watchdog */
238 outb(0, SBC8360_ENABLE);
239}
240
234/* Userspace pings kernel driver, or requests clean close */ 241/* Userspace pings kernel driver, or requests clean close */
235static ssize_t sbc8360_write(struct file *file, const char __user * buf, 242static ssize_t sbc8360_write(struct file *file, const char __user *buf,
236 size_t count, loff_t * ppos) 243 size_t count, loff_t *ppos)
237{ 244{
238 if (count) { 245 if (count) {
239 if (!nowayout) { 246 if (!nowayout) {
@@ -257,16 +264,12 @@ static ssize_t sbc8360_write(struct file *file, const char __user * buf,
257 264
258static int sbc8360_open(struct inode *inode, struct file *file) 265static int sbc8360_open(struct inode *inode, struct file *file)
259{ 266{
260 spin_lock(&sbc8360_lock); 267 if (test_and_set_bit(0, &sbc8360_is_open))
261 if (test_and_set_bit(0, &sbc8360_is_open)) {
262 spin_unlock(&sbc8360_lock);
263 return -EBUSY; 268 return -EBUSY;
264 }
265 if (nowayout) 269 if (nowayout)
266 __module_get(THIS_MODULE); 270 __module_get(THIS_MODULE);
267 271
268 /* Activate and ping once to start the countdown */ 272 /* Activate and ping once to start the countdown */
269 spin_unlock(&sbc8360_lock);
270 sbc8360_activate(); 273 sbc8360_activate();
271 sbc8360_ping(); 274 sbc8360_ping();
272 return nonseekable_open(inode, file); 275 return nonseekable_open(inode, file);
@@ -274,16 +277,14 @@ static int sbc8360_open(struct inode *inode, struct file *file)
274 277
275static int sbc8360_close(struct inode *inode, struct file *file) 278static int sbc8360_close(struct inode *inode, struct file *file)
276{ 279{
277 spin_lock(&sbc8360_lock);
278 if (expect_close == 42) 280 if (expect_close == 42)
279 outb(0, SBC8360_ENABLE); 281 sbc8360_stop();
280 else 282 else
281 printk(KERN_CRIT PFX 283 printk(KERN_CRIT PFX
282 "SBC8360 device closed unexpectedly. SBC8360 will not stop!\n"); 284 "SBC8360 device closed unexpectedly. SBC8360 will not stop!\n");
283 285
284 clear_bit(0, &sbc8360_is_open); 286 clear_bit(0, &sbc8360_is_open);
285 expect_close = 0; 287 expect_close = 0;
286 spin_unlock(&sbc8360_lock);
287 return 0; 288 return 0;
288} 289}
289 290
@@ -294,10 +295,9 @@ static int sbc8360_close(struct inode *inode, struct file *file)
294static int sbc8360_notify_sys(struct notifier_block *this, unsigned long code, 295static int sbc8360_notify_sys(struct notifier_block *this, unsigned long code,
295 void *unused) 296 void *unused)
296{ 297{
297 if (code == SYS_DOWN || code == SYS_HALT) { 298 if (code == SYS_DOWN || code == SYS_HALT)
298 /* Disable the SBC8360 Watchdog */ 299 sbc8360_stop(); /* Disable the SBC8360 Watchdog */
299 outb(0, SBC8360_ENABLE); 300
300 }
301 return NOTIFY_DONE; 301 return NOTIFY_DONE;
302} 302}
303 303
@@ -382,13 +382,13 @@ static int __init sbc8360_init(void)
382 382
383 return 0; 383 return 0;
384 384
385 out_nomisc: 385out_nomisc:
386 unregister_reboot_notifier(&sbc8360_notifier); 386 unregister_reboot_notifier(&sbc8360_notifier);
387 out_noreboot: 387out_noreboot:
388 release_region(SBC8360_BASETIME, 1); 388 release_region(SBC8360_BASETIME, 1);
389 out_nobasetimereg: 389out_nobasetimereg:
390 release_region(SBC8360_ENABLE, 1); 390 release_region(SBC8360_ENABLE, 1);
391 out: 391out:
392 return res; 392 return res;
393} 393}
394 394
diff --git a/drivers/watchdog/sbc_epx_c3.c b/drivers/watchdog/sbc_epx_c3.c
index 82cbd8809a69..e5e470ca7759 100644
--- a/drivers/watchdog/sbc_epx_c3.c
+++ b/drivers/watchdog/sbc_epx_c3.c
@@ -25,8 +25,8 @@
25#include <linux/reboot.h> 25#include <linux/reboot.h>
26#include <linux/init.h> 26#include <linux/init.h>
27#include <linux/ioport.h> 27#include <linux/ioport.h>
28#include <asm/uaccess.h> 28#include <linux/uaccess.h>
29#include <asm/io.h> 29#include <linux/io.h>
30 30
31#define PFX "epx_c3: " 31#define PFX "epx_c3: "
32static int epx_c3_alive; 32static int epx_c3_alive;
@@ -100,12 +100,12 @@ static ssize_t epx_c3_write(struct file *file, const char __user *data,
100 return len; 100 return len;
101} 101}
102 102
103static int epx_c3_ioctl(struct inode *inode, struct file *file, 103static long epx_c3_ioctl(struct file *file, unsigned int cmd,
104 unsigned int cmd, unsigned long arg) 104 unsigned long arg)
105{ 105{
106 int options, retval = -EINVAL; 106 int options, retval = -EINVAL;
107 int __user *argp = (void __user *)arg; 107 int __user *argp = (void __user *)arg;
108 static struct watchdog_info ident = { 108 static const struct watchdog_info ident = {
109 .options = WDIOF_KEEPALIVEPING | 109 .options = WDIOF_KEEPALIVEPING |
110 WDIOF_MAGICCLOSE, 110 WDIOF_MAGICCLOSE,
111 .firmware_version = 0, 111 .firmware_version = 0,
@@ -120,11 +120,6 @@ static int epx_c3_ioctl(struct inode *inode, struct file *file,
120 case WDIOC_GETSTATUS: 120 case WDIOC_GETSTATUS:
121 case WDIOC_GETBOOTSTATUS: 121 case WDIOC_GETBOOTSTATUS:
122 return put_user(0, argp); 122 return put_user(0, argp);
123 case WDIOC_KEEPALIVE:
124 epx_c3_pet();
125 return 0;
126 case WDIOC_GETTIMEOUT:
127 return put_user(WATCHDOG_TIMEOUT, argp);
128 case WDIOC_SETOPTIONS: 123 case WDIOC_SETOPTIONS:
129 if (get_user(options, argp)) 124 if (get_user(options, argp))
130 return -EFAULT; 125 return -EFAULT;
@@ -140,6 +135,11 @@ static int epx_c3_ioctl(struct inode *inode, struct file *file,
140 } 135 }
141 136
142 return retval; 137 return retval;
138 case WDIOC_KEEPALIVE:
139 epx_c3_pet();
140 return 0;
141 case WDIOC_GETTIMEOUT:
142 return put_user(WATCHDOG_TIMEOUT, argp);
143 default: 143 default:
144 return -ENOTTY; 144 return -ENOTTY;
145 } 145 }
@@ -158,7 +158,7 @@ static const struct file_operations epx_c3_fops = {
158 .owner = THIS_MODULE, 158 .owner = THIS_MODULE,
159 .llseek = no_llseek, 159 .llseek = no_llseek,
160 .write = epx_c3_write, 160 .write = epx_c3_write,
161 .ioctl = epx_c3_ioctl, 161 .unlocked_ioctl = epx_c3_ioctl,
162 .open = epx_c3_open, 162 .open = epx_c3_open,
163 .release = epx_c3_release, 163 .release = epx_c3_release,
164}; 164};
diff --git a/drivers/watchdog/sc1200wdt.c b/drivers/watchdog/sc1200wdt.c
index 35cddff7020f..23da3ccd832a 100644
--- a/drivers/watchdog/sc1200wdt.c
+++ b/drivers/watchdog/sc1200wdt.c
@@ -15,14 +15,18 @@
15 * 15 *
16 * Changelog: 16 * Changelog:
17 * 20020220 Zwane Mwaikambo Code based on datasheet, no hardware. 17 * 20020220 Zwane Mwaikambo Code based on datasheet, no hardware.
18 * 20020221 Zwane Mwaikambo Cleanups as suggested by Jeff Garzik and Alan Cox. 18 * 20020221 Zwane Mwaikambo Cleanups as suggested by Jeff Garzik
19 * and Alan Cox.
19 * 20020222 Zwane Mwaikambo Added probing. 20 * 20020222 Zwane Mwaikambo Added probing.
20 * 20020225 Zwane Mwaikambo Added ISAPNP support. 21 * 20020225 Zwane Mwaikambo Added ISAPNP support.
21 * 20020412 Rob Radez Broke out start/stop functions 22 * 20020412 Rob Radez Broke out start/stop functions
22 * <rob@osinvestor.com> Return proper status instead of temperature warning 23 * <rob@osinvestor.com> Return proper status instead of
23 * Add WDIOC_GETBOOTSTATUS and WDIOC_SETOPTIONS ioctls 24 * temperature warning
25 * Add WDIOC_GETBOOTSTATUS and
26 * WDIOC_SETOPTIONS ioctls
24 * Fix CONFIG_WATCHDOG_NOWAYOUT 27 * Fix CONFIG_WATCHDOG_NOWAYOUT
25 * 20020530 Joel Becker Add Matt Domsch's nowayout module option 28 * 20020530 Joel Becker Add Matt Domsch's nowayout module
29 * option
26 * 20030116 Adam Belay Updated to the latest pnp code 30 * 20030116 Adam Belay Updated to the latest pnp code
27 * 31 *
28 */ 32 */
@@ -39,9 +43,8 @@
39#include <linux/pnp.h> 43#include <linux/pnp.h>
40#include <linux/fs.h> 44#include <linux/fs.h>
41#include <linux/semaphore.h> 45#include <linux/semaphore.h>
42 46#include <linux/io.h>
43#include <asm/io.h> 47#include <linux/uaccess.h>
44#include <asm/uaccess.h>
45 48
46#define SC1200_MODULE_VER "build 20020303" 49#define SC1200_MODULE_VER "build 20020303"
47#define SC1200_MODULE_NAME "sc1200wdt" 50#define SC1200_MODULE_NAME "sc1200wdt"
@@ -72,7 +75,7 @@ static char banner[] __initdata = KERN_INFO PFX SC1200_MODULE_VER;
72static int timeout = 1; 75static int timeout = 1;
73static int io = -1; 76static int io = -1;
74static int io_len = 2; /* for non plug and play */ 77static int io_len = 2; /* for non plug and play */
75static struct semaphore open_sem; 78static unsigned long open_flag;
76static char expect_close; 79static char expect_close;
77static DEFINE_SPINLOCK(sc1200wdt_lock); /* io port access serialisation */ 80static DEFINE_SPINLOCK(sc1200wdt_lock); /* io port access serialisation */
78 81
@@ -81,7 +84,8 @@ static int isapnp = 1;
81static struct pnp_dev *wdt_dev; 84static struct pnp_dev *wdt_dev;
82 85
83module_param(isapnp, int, 0); 86module_param(isapnp, int, 0);
84MODULE_PARM_DESC(isapnp, "When set to 0 driver ISA PnP support will be disabled"); 87MODULE_PARM_DESC(isapnp,
88 "When set to 0 driver ISA PnP support will be disabled");
85#endif 89#endif
86 90
87module_param(io, int, 0); 91module_param(io, int, 0);
@@ -91,26 +95,40 @@ MODULE_PARM_DESC(timeout, "range is 0-255 minutes, default is 1");
91 95
92static int nowayout = WATCHDOG_NOWAYOUT; 96static int nowayout = WATCHDOG_NOWAYOUT;
93module_param(nowayout, int, 0); 97module_param(nowayout, int, 0);
94MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 98MODULE_PARM_DESC(nowayout,
99 "Watchdog cannot be stopped once started (default="
100 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
95 101
96 102
97 103
98/* Read from Data Register */ 104/* Read from Data Register */
99static inline void sc1200wdt_read_data(unsigned char index, unsigned char *data) 105static inline void __sc1200wdt_read_data(unsigned char index,
106 unsigned char *data)
100{ 107{
101 spin_lock(&sc1200wdt_lock);
102 outb_p(index, PMIR); 108 outb_p(index, PMIR);
103 *data = inb(PMDR); 109 *data = inb(PMDR);
104 spin_unlock(&sc1200wdt_lock);
105} 110}
106 111
112static void sc1200wdt_read_data(unsigned char index, unsigned char *data)
113{
114 spin_lock(&sc1200wdt_lock);
115 __sc1200wdt_read_data(index, data);
116 spin_unlock(&sc1200wdt_lock);
117}
107 118
108/* Write to Data Register */ 119/* Write to Data Register */
109static inline void sc1200wdt_write_data(unsigned char index, unsigned char data) 120static inline void __sc1200wdt_write_data(unsigned char index,
121 unsigned char data)
110{ 122{
111 spin_lock(&sc1200wdt_lock);
112 outb_p(index, PMIR); 123 outb_p(index, PMIR);
113 outb(data, PMDR); 124 outb(data, PMDR);
125}
126
127static inline void sc1200wdt_write_data(unsigned char index,
128 unsigned char data)
129{
130 spin_lock(&sc1200wdt_lock);
131 __sc1200wdt_write_data(index, data);
114 spin_unlock(&sc1200wdt_lock); 132 spin_unlock(&sc1200wdt_lock);
115} 133}
116 134
@@ -118,22 +136,23 @@ static inline void sc1200wdt_write_data(unsigned char index, unsigned char data)
118static void sc1200wdt_start(void) 136static void sc1200wdt_start(void)
119{ 137{
120 unsigned char reg; 138 unsigned char reg;
139 spin_lock(&sc1200wdt_lock);
121 140
122 sc1200wdt_read_data(WDCF, &reg); 141 __sc1200wdt_read_data(WDCF, &reg);
123 /* assert WDO when any of the following interrupts are triggered too */ 142 /* assert WDO when any of the following interrupts are triggered too */
124 reg |= (KBC_IRQ | MSE_IRQ | UART1_IRQ | UART2_IRQ); 143 reg |= (KBC_IRQ | MSE_IRQ | UART1_IRQ | UART2_IRQ);
125 sc1200wdt_write_data(WDCF, reg); 144 __sc1200wdt_write_data(WDCF, reg);
126 /* set the timeout and get the ball rolling */ 145 /* set the timeout and get the ball rolling */
127 sc1200wdt_write_data(WDTO, timeout); 146 __sc1200wdt_write_data(WDTO, timeout);
128}
129 147
148 spin_unlock(&sc1200wdt_lock);
149}
130 150
131static void sc1200wdt_stop(void) 151static void sc1200wdt_stop(void)
132{ 152{
133 sc1200wdt_write_data(WDTO, 0); 153 sc1200wdt_write_data(WDTO, 0);
134} 154}
135 155
136
137/* This returns the status of the WDO signal, inactive high. */ 156/* This returns the status of the WDO signal, inactive high. */
138static inline int sc1200wdt_status(void) 157static inline int sc1200wdt_status(void)
139{ 158{
@@ -144,14 +163,13 @@ static inline int sc1200wdt_status(void)
144 * KEEPALIVEPING which is a bit of a kludge because there's nothing 163 * KEEPALIVEPING which is a bit of a kludge because there's nothing
145 * else for enabled/disabled status 164 * else for enabled/disabled status
146 */ 165 */
147 return (ret & 0x01) ? 0 : WDIOF_KEEPALIVEPING; /* bits 1 - 7 are undefined */ 166 return (ret & 0x01) ? 0 : WDIOF_KEEPALIVEPING;
148} 167}
149 168
150
151static int sc1200wdt_open(struct inode *inode, struct file *file) 169static int sc1200wdt_open(struct inode *inode, struct file *file)
152{ 170{
153 /* allow one at a time */ 171 /* allow one at a time */
154 if (down_trylock(&open_sem)) 172 if (test_and_set_bit(0, &open_flag))
155 return -EBUSY; 173 return -EBUSY;
156 174
157 if (timeout > MAX_TIMEOUT) 175 if (timeout > MAX_TIMEOUT)
@@ -164,71 +182,70 @@ static int sc1200wdt_open(struct inode *inode, struct file *file)
164} 182}
165 183
166 184
167static int sc1200wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) 185static long sc1200wdt_ioctl(struct file *file, unsigned int cmd,
186 unsigned long arg)
168{ 187{
169 int new_timeout; 188 int new_timeout;
170 void __user *argp = (void __user *)arg; 189 void __user *argp = (void __user *)arg;
171 int __user *p = argp; 190 int __user *p = argp;
172 static struct watchdog_info ident = { 191 static const struct watchdog_info ident = {
173 .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE, 192 .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT |
193 WDIOF_MAGICCLOSE,
174 .firmware_version = 0, 194 .firmware_version = 0,
175 .identity = "PC87307/PC97307", 195 .identity = "PC87307/PC97307",
176 }; 196 };
177 197
178 switch (cmd) { 198 switch (cmd) {
179 default: 199 case WDIOC_GETSUPPORT:
180 return -ENOTTY; 200 if (copy_to_user(argp, &ident, sizeof ident))
201 return -EFAULT;
202 return 0;
181 203
182 case WDIOC_GETSUPPORT: 204 case WDIOC_GETSTATUS:
183 if (copy_to_user(argp, &ident, sizeof ident)) 205 return put_user(sc1200wdt_status(), p);
184 return -EFAULT;
185 return 0;
186 206
187 case WDIOC_GETSTATUS: 207 case WDIOC_GETBOOTSTATUS:
188 return put_user(sc1200wdt_status(), p); 208 return put_user(0, p);
189 209
190 case WDIOC_GETBOOTSTATUS: 210 case WDIOC_SETOPTIONS:
191 return put_user(0, p); 211 {
212 int options, retval = -EINVAL;
192 213
193 case WDIOC_KEEPALIVE: 214 if (get_user(options, p))
194 sc1200wdt_write_data(WDTO, timeout); 215 return -EFAULT;
195 return 0;
196 216
197 case WDIOC_SETTIMEOUT: 217 if (options & WDIOS_DISABLECARD) {
198 if (get_user(new_timeout, p)) 218 sc1200wdt_stop();
199 return -EFAULT; 219 retval = 0;
200 220 }
201 /* the API states this is given in secs */
202 new_timeout /= 60;
203 if (new_timeout < 0 || new_timeout > MAX_TIMEOUT)
204 return -EINVAL;
205
206 timeout = new_timeout;
207 sc1200wdt_write_data(WDTO, timeout);
208 /* fall through and return the new timeout */
209
210 case WDIOC_GETTIMEOUT:
211 return put_user(timeout * 60, p);
212
213 case WDIOC_SETOPTIONS:
214 {
215 int options, retval = -EINVAL;
216 221
217 if (get_user(options, p)) 222 if (options & WDIOS_ENABLECARD) {
218 return -EFAULT; 223 sc1200wdt_start();
224 retval = 0;
225 }
219 226
220 if (options & WDIOS_DISABLECARD) { 227 return retval;
221 sc1200wdt_stop(); 228 }
222 retval = 0; 229 case WDIOC_KEEPALIVE:
223 } 230 sc1200wdt_write_data(WDTO, timeout);
231 return 0;
232
233 case WDIOC_SETTIMEOUT:
234 if (get_user(new_timeout, p))
235 return -EFAULT;
236 /* the API states this is given in secs */
237 new_timeout /= 60;
238 if (new_timeout < 0 || new_timeout > MAX_TIMEOUT)
239 return -EINVAL;
240 timeout = new_timeout;
241 sc1200wdt_write_data(WDTO, timeout);
242 /* fall through and return the new timeout */
224 243
225 if (options & WDIOS_ENABLECARD) { 244 case WDIOC_GETTIMEOUT:
226 sc1200wdt_start(); 245 return put_user(timeout * 60, p);
227 retval = 0;
228 }
229 246
230 return retval; 247 default:
231 } 248 return -ENOTTY;
232 } 249 }
233} 250}
234 251
@@ -240,16 +257,18 @@ static int sc1200wdt_release(struct inode *inode, struct file *file)
240 printk(KERN_INFO PFX "Watchdog disabled\n"); 257 printk(KERN_INFO PFX "Watchdog disabled\n");
241 } else { 258 } else {
242 sc1200wdt_write_data(WDTO, timeout); 259 sc1200wdt_write_data(WDTO, timeout);
243 printk(KERN_CRIT PFX "Unexpected close!, timeout = %d min(s)\n", timeout); 260 printk(KERN_CRIT PFX
261 "Unexpected close!, timeout = %d min(s)\n", timeout);
244 } 262 }
245 up(&open_sem); 263 clear_bit(0, &open_flag);
246 expect_close = 0; 264 expect_close = 0;
247 265
248 return 0; 266 return 0;
249} 267}
250 268
251 269
252static ssize_t sc1200wdt_write(struct file *file, const char __user *data, size_t len, loff_t *ppos) 270static ssize_t sc1200wdt_write(struct file *file, const char __user *data,
271 size_t len, loff_t *ppos)
253{ 272{
254 if (len) { 273 if (len) {
255 if (!nowayout) { 274 if (!nowayout) {
@@ -260,7 +279,7 @@ static ssize_t sc1200wdt_write(struct file *file, const char __user *data, size_
260 for (i = 0; i != len; i++) { 279 for (i = 0; i != len; i++) {
261 char c; 280 char c;
262 281
263 if (get_user(c, data+i)) 282 if (get_user(c, data + i))
264 return -EFAULT; 283 return -EFAULT;
265 if (c == 'V') 284 if (c == 'V')
266 expect_close = 42; 285 expect_close = 42;
@@ -275,7 +294,8 @@ static ssize_t sc1200wdt_write(struct file *file, const char __user *data, size_
275} 294}
276 295
277 296
278static int sc1200wdt_notify_sys(struct notifier_block *this, unsigned long code, void *unused) 297static int sc1200wdt_notify_sys(struct notifier_block *this,
298 unsigned long code, void *unused)
279{ 299{
280 if (code == SYS_DOWN || code == SYS_HALT) 300 if (code == SYS_DOWN || code == SYS_HALT)
281 sc1200wdt_stop(); 301 sc1200wdt_stop();
@@ -284,23 +304,20 @@ static int sc1200wdt_notify_sys(struct notifier_block *this, unsigned long code,
284} 304}
285 305
286 306
287static struct notifier_block sc1200wdt_notifier = 307static struct notifier_block sc1200wdt_notifier = {
288{
289 .notifier_call = sc1200wdt_notify_sys, 308 .notifier_call = sc1200wdt_notify_sys,
290}; 309};
291 310
292static const struct file_operations sc1200wdt_fops = 311static const struct file_operations sc1200wdt_fops = {
293{
294 .owner = THIS_MODULE, 312 .owner = THIS_MODULE,
295 .llseek = no_llseek, 313 .llseek = no_llseek,
296 .write = sc1200wdt_write, 314 .write = sc1200wdt_write,
297 .ioctl = sc1200wdt_ioctl, 315 .unlocked_ioctl = sc1200wdt_ioctl,
298 .open = sc1200wdt_open, 316 .open = sc1200wdt_open,
299 .release = sc1200wdt_release, 317 .release = sc1200wdt_release,
300}; 318};
301 319
302static struct miscdevice sc1200wdt_miscdev = 320static struct miscdevice sc1200wdt_miscdev = {
303{
304 .minor = WATCHDOG_MINOR, 321 .minor = WATCHDOG_MINOR,
305 .name = "watchdog", 322 .name = "watchdog",
306 .fops = &sc1200wdt_fops, 323 .fops = &sc1200wdt_fops,
@@ -312,14 +329,14 @@ static int __init sc1200wdt_probe(void)
312 /* The probe works by reading the PMC3 register's default value of 0x0e 329 /* The probe works by reading the PMC3 register's default value of 0x0e
313 * there is one caveat, if the device disables the parallel port or any 330 * there is one caveat, if the device disables the parallel port or any
314 * of the UARTs we won't be able to detect it. 331 * of the UARTs we won't be able to detect it.
315 * Nb. This could be done with accuracy by reading the SID registers, but 332 * NB. This could be done with accuracy by reading the SID registers,
316 * we don't have access to those io regions. 333 * but we don't have access to those io regions.
317 */ 334 */
318 335
319 unsigned char reg; 336 unsigned char reg;
320 337
321 sc1200wdt_read_data(PMC3, &reg); 338 sc1200wdt_read_data(PMC3, &reg);
322 reg &= 0x0f; /* we don't want the UART busy bits */ 339 reg &= 0x0f; /* we don't want the UART busy bits */
323 return (reg == 0x0e) ? 0 : -ENODEV; 340 return (reg == 0x0e) ? 0 : -ENODEV;
324} 341}
325 342
@@ -332,7 +349,8 @@ static struct pnp_device_id scl200wdt_pnp_devices[] = {
332 {.id = ""}, 349 {.id = ""},
333}; 350};
334 351
335static int scl200wdt_pnp_probe(struct pnp_dev * dev, const struct pnp_device_id *dev_id) 352static int scl200wdt_pnp_probe(struct pnp_dev *dev,
353 const struct pnp_device_id *dev_id)
336{ 354{
337 /* this driver only supports one card at a time */ 355 /* this driver only supports one card at a time */
338 if (wdt_dev || !isapnp) 356 if (wdt_dev || !isapnp)
@@ -347,13 +365,14 @@ static int scl200wdt_pnp_probe(struct pnp_dev * dev, const struct pnp_device_id
347 return -EBUSY; 365 return -EBUSY;
348 } 366 }
349 367
350 printk(KERN_INFO "scl200wdt: PnP device found at io port %#x/%d\n", io, io_len); 368 printk(KERN_INFO "scl200wdt: PnP device found at io port %#x/%d\n",
369 io, io_len);
351 return 0; 370 return 0;
352} 371}
353 372
354static void scl200wdt_pnp_remove(struct pnp_dev * dev) 373static void scl200wdt_pnp_remove(struct pnp_dev *dev)
355{ 374{
356 if (wdt_dev){ 375 if (wdt_dev) {
357 release_region(io, io_len); 376 release_region(io, io_len);
358 wdt_dev = NULL; 377 wdt_dev = NULL;
359 } 378 }
@@ -375,8 +394,6 @@ static int __init sc1200wdt_init(void)
375 394
376 printk("%s\n", banner); 395 printk("%s\n", banner);
377 396
378 sema_init(&open_sem, 1);
379
380#if defined CONFIG_PNP 397#if defined CONFIG_PNP
381 if (isapnp) { 398 if (isapnp) {
382 ret = pnp_register_driver(&scl200wdt_pnp_driver); 399 ret = pnp_register_driver(&scl200wdt_pnp_driver);
@@ -410,13 +427,16 @@ static int __init sc1200wdt_init(void)
410 427
411 ret = register_reboot_notifier(&sc1200wdt_notifier); 428 ret = register_reboot_notifier(&sc1200wdt_notifier);
412 if (ret) { 429 if (ret) {
413 printk(KERN_ERR PFX "Unable to register reboot notifier err = %d\n", ret); 430 printk(KERN_ERR PFX
431 "Unable to register reboot notifier err = %d\n", ret);
414 goto out_io; 432 goto out_io;
415 } 433 }
416 434
417 ret = misc_register(&sc1200wdt_miscdev); 435 ret = misc_register(&sc1200wdt_miscdev);
418 if (ret) { 436 if (ret) {
419 printk(KERN_ERR PFX "Unable to register miscdev on minor %d\n", WATCHDOG_MINOR); 437 printk(KERN_ERR PFX
438 "Unable to register miscdev on minor %d\n",
439 WATCHDOG_MINOR);
420 goto out_rbt; 440 goto out_rbt;
421 } 441 }
422 442
@@ -446,7 +466,7 @@ static void __exit sc1200wdt_exit(void)
446 unregister_reboot_notifier(&sc1200wdt_notifier); 466 unregister_reboot_notifier(&sc1200wdt_notifier);
447 467
448#if defined CONFIG_PNP 468#if defined CONFIG_PNP
449 if(isapnp) 469 if (isapnp)
450 pnp_unregister_driver(&scl200wdt_pnp_driver); 470 pnp_unregister_driver(&scl200wdt_pnp_driver);
451 else 471 else
452#endif 472#endif
diff --git a/drivers/watchdog/sc520_wdt.c b/drivers/watchdog/sc520_wdt.c
index 2847324a2be2..a2b6c1067ec5 100644
--- a/drivers/watchdog/sc520_wdt.c
+++ b/drivers/watchdog/sc520_wdt.c
@@ -64,9 +64,9 @@
64#include <linux/reboot.h> 64#include <linux/reboot.h>
65#include <linux/init.h> 65#include <linux/init.h>
66#include <linux/jiffies.h> 66#include <linux/jiffies.h>
67#include <linux/io.h>
68#include <linux/uaccess.h>
67 69
68#include <asm/io.h>
69#include <asm/uaccess.h>
70#include <asm/system.h> 70#include <asm/system.h>
71 71
72#define OUR_NAME "sc520_wdt" 72#define OUR_NAME "sc520_wdt"
@@ -91,13 +91,18 @@
91 */ 91 */
92 92
93#define WATCHDOG_TIMEOUT 30 /* 30 sec default timeout */ 93#define WATCHDOG_TIMEOUT 30 /* 30 sec default timeout */
94static int timeout = WATCHDOG_TIMEOUT; /* in seconds, will be multiplied by HZ to get seconds to wait for a ping */ 94/* in seconds, will be multiplied by HZ to get seconds to wait for a ping */
95static int timeout = WATCHDOG_TIMEOUT;
95module_param(timeout, int, 0); 96module_param(timeout, int, 0);
96MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds. (1<=timeout<=3600, default=" __MODULE_STRING(WATCHDOG_TIMEOUT) ")"); 97MODULE_PARM_DESC(timeout,
98 "Watchdog timeout in seconds. (1 <= timeout <= 3600, default="
99 __MODULE_STRING(WATCHDOG_TIMEOUT) ")");
97 100
98static int nowayout = WATCHDOG_NOWAYOUT; 101static int nowayout = WATCHDOG_NOWAYOUT;
99module_param(nowayout, int, 0); 102module_param(nowayout, int, 0);
100MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 103MODULE_PARM_DESC(nowayout,
104 "Watchdog cannot be stopped once started (default="
105 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
101 106
102/* 107/*
103 * AMD Elan SC520 - Watchdog Timer Registers 108 * AMD Elan SC520 - Watchdog Timer Registers
@@ -136,8 +141,7 @@ static void wdt_timer_ping(unsigned long data)
136 /* If we got a heartbeat pulse within the WDT_US_INTERVAL 141 /* If we got a heartbeat pulse within the WDT_US_INTERVAL
137 * we agree to ping the WDT 142 * we agree to ping the WDT
138 */ 143 */
139 if(time_before(jiffies, next_heartbeat)) 144 if (time_before(jiffies, next_heartbeat)) {
140 {
141 /* Ping the WDT */ 145 /* Ping the WDT */
142 spin_lock(&wdt_spinlock); 146 spin_lock(&wdt_spinlock);
143 writew(0xAAAA, wdtmrctl); 147 writew(0xAAAA, wdtmrctl);
@@ -146,9 +150,9 @@ static void wdt_timer_ping(unsigned long data)
146 150
147 /* Re-set the timer interval */ 151 /* Re-set the timer interval */
148 mod_timer(&timer, jiffies + WDT_INTERVAL); 152 mod_timer(&timer, jiffies + WDT_INTERVAL);
149 } else { 153 } else
150 printk(KERN_WARNING PFX "Heartbeat lost! Will not ping the watchdog\n"); 154 printk(KERN_WARNING PFX
151 } 155 "Heartbeat lost! Will not ping the watchdog\n");
152} 156}
153 157
154/* 158/*
@@ -162,7 +166,7 @@ static void wdt_config(int writeval)
162 166
163 /* buy some time (ping) */ 167 /* buy some time (ping) */
164 spin_lock_irqsave(&wdt_spinlock, flags); 168 spin_lock_irqsave(&wdt_spinlock, flags);
165 dummy=readw(wdtmrctl); /* ensure write synchronization */ 169 dummy = readw(wdtmrctl); /* ensure write synchronization */
166 writew(0xAAAA, wdtmrctl); 170 writew(0xAAAA, wdtmrctl);
167 writew(0x5555, wdtmrctl); 171 writew(0x5555, wdtmrctl);
168 /* unlock WDT = make WDT configuration register writable one time */ 172 /* unlock WDT = make WDT configuration register writable one time */
@@ -219,10 +223,11 @@ static int wdt_set_heartbeat(int t)
219 * /dev/watchdog handling 223 * /dev/watchdog handling
220 */ 224 */
221 225
222static ssize_t fop_write(struct file * file, const char __user * buf, size_t count, loff_t * ppos) 226static ssize_t fop_write(struct file *file, const char __user *buf,
227 size_t count, loff_t *ppos)
223{ 228{
224 /* See if we got the magic character 'V' and reload the timer */ 229 /* See if we got the magic character 'V' and reload the timer */
225 if(count) { 230 if (count) {
226 if (!nowayout) { 231 if (!nowayout) {
227 size_t ofs; 232 size_t ofs;
228 233
@@ -231,25 +236,26 @@ static ssize_t fop_write(struct file * file, const char __user * buf, size_t cou
231 wdt_expect_close = 0; 236 wdt_expect_close = 0;
232 237
233 /* now scan */ 238 /* now scan */
234 for(ofs = 0; ofs != count; ofs++) { 239 for (ofs = 0; ofs != count; ofs++) {
235 char c; 240 char c;
236 if (get_user(c, buf + ofs)) 241 if (get_user(c, buf + ofs))
237 return -EFAULT; 242 return -EFAULT;
238 if(c == 'V') 243 if (c == 'V')
239 wdt_expect_close = 42; 244 wdt_expect_close = 42;
240 } 245 }
241 } 246 }
242 247
243 /* Well, anyhow someone wrote to us, we should return that favour */ 248 /* Well, anyhow someone wrote to us, we should
249 return that favour */
244 wdt_keepalive(); 250 wdt_keepalive();
245 } 251 }
246 return count; 252 return count;
247} 253}
248 254
249static int fop_open(struct inode * inode, struct file * file) 255static int fop_open(struct inode *inode, struct file *file)
250{ 256{
251 /* Just in case we're already talking to someone... */ 257 /* Just in case we're already talking to someone... */
252 if(test_and_set_bit(0, &wdt_is_open)) 258 if (test_and_set_bit(0, &wdt_is_open))
253 return -EBUSY; 259 return -EBUSY;
254 if (nowayout) 260 if (nowayout)
255 __module_get(THIS_MODULE); 261 __module_get(THIS_MODULE);
@@ -259,12 +265,13 @@ static int fop_open(struct inode * inode, struct file * file)
259 return nonseekable_open(inode, file); 265 return nonseekable_open(inode, file);
260} 266}
261 267
262static int fop_close(struct inode * inode, struct file * file) 268static int fop_close(struct inode *inode, struct file *file)
263{ 269{
264 if(wdt_expect_close == 42) { 270 if (wdt_expect_close == 42)
265 wdt_turnoff(); 271 wdt_turnoff();
266 } else { 272 else {
267 printk(KERN_CRIT PFX "Unexpected close, not stopping watchdog!\n"); 273 printk(KERN_CRIT PFX
274 "Unexpected close, not stopping watchdog!\n");
268 wdt_keepalive(); 275 wdt_keepalive();
269 } 276 }
270 clear_bit(0, &wdt_is_open); 277 clear_bit(0, &wdt_is_open);
@@ -272,63 +279,62 @@ static int fop_close(struct inode * inode, struct file * file)
272 return 0; 279 return 0;
273} 280}
274 281
275static int fop_ioctl(struct inode *inode, struct file *file, unsigned int cmd, 282static long fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
276 unsigned long arg)
277{ 283{
278 void __user *argp = (void __user *)arg; 284 void __user *argp = (void __user *)arg;
279 int __user *p = argp; 285 int __user *p = argp;
280 static struct watchdog_info ident = { 286 static const struct watchdog_info ident = {
281 .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE, 287 .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT
288 | WDIOF_MAGICCLOSE,
282 .firmware_version = 1, 289 .firmware_version = 1,
283 .identity = "SC520", 290 .identity = "SC520",
284 }; 291 };
285 292
286 switch(cmd) 293 switch (cmd) {
294 case WDIOC_GETSUPPORT:
295 return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
296 case WDIOC_GETSTATUS:
297 case WDIOC_GETBOOTSTATUS:
298 return put_user(0, p);
299 case WDIOC_SETOPTIONS:
287 { 300 {
288 default: 301 int new_options, retval = -EINVAL;
289 return -ENOTTY;
290 case WDIOC_GETSUPPORT:
291 return copy_to_user(argp, &ident, sizeof(ident))?-EFAULT:0;
292 case WDIOC_GETSTATUS:
293 case WDIOC_GETBOOTSTATUS:
294 return put_user(0, p);
295 case WDIOC_KEEPALIVE:
296 wdt_keepalive();
297 return 0;
298 case WDIOC_SETOPTIONS:
299 {
300 int new_options, retval = -EINVAL;
301
302 if(get_user(new_options, p))
303 return -EFAULT;
304
305 if(new_options & WDIOS_DISABLECARD) {
306 wdt_turnoff();
307 retval = 0;
308 }
309 302
310 if(new_options & WDIOS_ENABLECARD) { 303 if (get_user(new_options, p))
311 wdt_startup(); 304 return -EFAULT;
312 retval = 0;
313 }
314 305
315 return retval; 306 if (new_options & WDIOS_DISABLECARD) {
307 wdt_turnoff();
308 retval = 0;
316 } 309 }
317 case WDIOC_SETTIMEOUT:
318 {
319 int new_timeout;
320 310
321 if(get_user(new_timeout, p)) 311 if (new_options & WDIOS_ENABLECARD) {
322 return -EFAULT; 312 wdt_startup();
313 retval = 0;
314 }
323 315
324 if(wdt_set_heartbeat(new_timeout)) 316 return retval;
325 return -EINVAL; 317 }
318 case WDIOC_KEEPALIVE:
319 wdt_keepalive();
320 return 0;
321 case WDIOC_SETTIMEOUT:
322 {
323 int new_timeout;
326 324
327 wdt_keepalive(); 325 if (get_user(new_timeout, p))
328 /* Fall through */ 326 return -EFAULT;
329 } 327
330 case WDIOC_GETTIMEOUT: 328 if (wdt_set_heartbeat(new_timeout))
331 return put_user(timeout, p); 329 return -EINVAL;
330
331 wdt_keepalive();
332 /* Fall through */
333 }
334 case WDIOC_GETTIMEOUT:
335 return put_user(timeout, p);
336 default:
337 return -ENOTTY;
332 } 338 }
333} 339}
334 340
@@ -338,7 +344,7 @@ static const struct file_operations wdt_fops = {
338 .write = fop_write, 344 .write = fop_write,
339 .open = fop_open, 345 .open = fop_open,
340 .release = fop_close, 346 .release = fop_close,
341 .ioctl = fop_ioctl, 347 .unlocked_ioctl = fop_ioctl,
342}; 348};
343 349
344static struct miscdevice wdt_miscdev = { 350static struct miscdevice wdt_miscdev = {
@@ -354,7 +360,7 @@ static struct miscdevice wdt_miscdev = {
354static int wdt_notify_sys(struct notifier_block *this, unsigned long code, 360static int wdt_notify_sys(struct notifier_block *this, unsigned long code,
355 void *unused) 361 void *unused)
356{ 362{
357 if(code==SYS_DOWN || code==SYS_HALT) 363 if (code == SYS_DOWN || code == SYS_HALT)
358 wdt_turnoff(); 364 wdt_turnoff();
359 return NOTIFY_DONE; 365 return NOTIFY_DONE;
360} 366}
@@ -383,11 +389,13 @@ static int __init sc520_wdt_init(void)
383{ 389{
384 int rc = -EBUSY; 390 int rc = -EBUSY;
385 391
386 /* Check that the timeout value is within it's range ; if not reset to the default */ 392 /* Check that the timeout value is within it's range ;
393 if not reset to the default */
387 if (wdt_set_heartbeat(timeout)) { 394 if (wdt_set_heartbeat(timeout)) {
388 wdt_set_heartbeat(WATCHDOG_TIMEOUT); 395 wdt_set_heartbeat(WATCHDOG_TIMEOUT);
389 printk(KERN_INFO PFX "timeout value must be 1<=timeout<=3600, using %d\n", 396 printk(KERN_INFO PFX
390 WATCHDOG_TIMEOUT); 397 "timeout value must be 1 <= timeout <= 3600, using %d\n",
398 WATCHDOG_TIMEOUT);
391 } 399 }
392 400
393 wdtmrctl = ioremap((unsigned long)(MMCR_BASE + OFFS_WDTMRCTL), 2); 401 wdtmrctl = ioremap((unsigned long)(MMCR_BASE + OFFS_WDTMRCTL), 2);
@@ -399,20 +407,22 @@ static int __init sc520_wdt_init(void)
399 407
400 rc = register_reboot_notifier(&wdt_notifier); 408 rc = register_reboot_notifier(&wdt_notifier);
401 if (rc) { 409 if (rc) {
402 printk(KERN_ERR PFX "cannot register reboot notifier (err=%d)\n", 410 printk(KERN_ERR PFX
403 rc); 411 "cannot register reboot notifier (err=%d)\n", rc);
404 goto err_out_ioremap; 412 goto err_out_ioremap;
405 } 413 }
406 414
407 rc = misc_register(&wdt_miscdev); 415 rc = misc_register(&wdt_miscdev);
408 if (rc) { 416 if (rc) {
409 printk(KERN_ERR PFX "cannot register miscdev on minor=%d (err=%d)\n", 417 printk(KERN_ERR PFX
410 WATCHDOG_MINOR, rc); 418 "cannot register miscdev on minor=%d (err=%d)\n",
419 WATCHDOG_MINOR, rc);
411 goto err_out_notifier; 420 goto err_out_notifier;
412 } 421 }
413 422
414 printk(KERN_INFO PFX "WDT driver for SC520 initialised. timeout=%d sec (nowayout=%d)\n", 423 printk(KERN_INFO PFX
415 timeout,nowayout); 424 "WDT driver for SC520 initialised. timeout=%d sec (nowayout=%d)\n",
425 timeout, nowayout);
416 426
417 return 0; 427 return 0;
418 428
diff --git a/drivers/watchdog/scx200_wdt.c b/drivers/watchdog/scx200_wdt.c
index d55882bca319..9e19a10a5bb9 100644
--- a/drivers/watchdog/scx200_wdt.c
+++ b/drivers/watchdog/scx200_wdt.c
@@ -27,9 +27,8 @@
27#include <linux/fs.h> 27#include <linux/fs.h>
28#include <linux/ioport.h> 28#include <linux/ioport.h>
29#include <linux/scx200.h> 29#include <linux/scx200.h>
30 30#include <linux/uaccess.h>
31#include <asm/uaccess.h> 31#include <linux/io.h>
32#include <asm/io.h>
33 32
34#define NAME "scx200_wdt" 33#define NAME "scx200_wdt"
35 34
@@ -47,8 +46,9 @@ module_param(nowayout, int, 0);
47MODULE_PARM_DESC(nowayout, "Disable watchdog shutdown on close"); 46MODULE_PARM_DESC(nowayout, "Disable watchdog shutdown on close");
48 47
49static u16 wdto_restart; 48static u16 wdto_restart;
50static struct semaphore open_semaphore;
51static char expect_close; 49static char expect_close;
50static unsigned long open_lock;
51static DEFINE_SPINLOCK(scx_lock);
52 52
53/* Bits of the WDCNFG register */ 53/* Bits of the WDCNFG register */
54#define W_ENABLE 0x00fa /* Enable watchdog */ 54#define W_ENABLE 0x00fa /* Enable watchdog */
@@ -59,7 +59,9 @@ static char expect_close;
59 59
60static void scx200_wdt_ping(void) 60static void scx200_wdt_ping(void)
61{ 61{
62 spin_lock(&scx_lock);
62 outw(wdto_restart, scx200_cb_base + SCx200_WDT_WDTO); 63 outw(wdto_restart, scx200_cb_base + SCx200_WDT_WDTO);
64 spin_unlock(&scx_lock);
63} 65}
64 66
65static void scx200_wdt_update_margin(void) 67static void scx200_wdt_update_margin(void)
@@ -73,9 +75,11 @@ static void scx200_wdt_enable(void)
73 printk(KERN_DEBUG NAME ": enabling watchdog timer, wdto_restart = %d\n", 75 printk(KERN_DEBUG NAME ": enabling watchdog timer, wdto_restart = %d\n",
74 wdto_restart); 76 wdto_restart);
75 77
78 spin_lock(&scx_lock);
76 outw(0, scx200_cb_base + SCx200_WDT_WDTO); 79 outw(0, scx200_cb_base + SCx200_WDT_WDTO);
77 outb(SCx200_WDT_WDSTS_WDOVF, scx200_cb_base + SCx200_WDT_WDSTS); 80 outb(SCx200_WDT_WDSTS_WDOVF, scx200_cb_base + SCx200_WDT_WDSTS);
78 outw(W_ENABLE, scx200_cb_base + SCx200_WDT_WDCNFG); 81 outw(W_ENABLE, scx200_cb_base + SCx200_WDT_WDCNFG);
82 spin_unlock(&scx_lock);
79 83
80 scx200_wdt_ping(); 84 scx200_wdt_ping();
81} 85}
@@ -84,15 +88,17 @@ static void scx200_wdt_disable(void)
84{ 88{
85 printk(KERN_DEBUG NAME ": disabling watchdog timer\n"); 89 printk(KERN_DEBUG NAME ": disabling watchdog timer\n");
86 90
91 spin_lock(&scx_lock);
87 outw(0, scx200_cb_base + SCx200_WDT_WDTO); 92 outw(0, scx200_cb_base + SCx200_WDT_WDTO);
88 outb(SCx200_WDT_WDSTS_WDOVF, scx200_cb_base + SCx200_WDT_WDSTS); 93 outb(SCx200_WDT_WDSTS_WDOVF, scx200_cb_base + SCx200_WDT_WDSTS);
89 outw(W_DISABLE, scx200_cb_base + SCx200_WDT_WDCNFG); 94 outw(W_DISABLE, scx200_cb_base + SCx200_WDT_WDCNFG);
95 spin_unlock(&scx_lock);
90} 96}
91 97
92static int scx200_wdt_open(struct inode *inode, struct file *file) 98static int scx200_wdt_open(struct inode *inode, struct file *file)
93{ 99{
94 /* only allow one at a time */ 100 /* only allow one at a time */
95 if (down_trylock(&open_semaphore)) 101 if (test_and_set_bit(0, &open_lock))
96 return -EBUSY; 102 return -EBUSY;
97 scx200_wdt_enable(); 103 scx200_wdt_enable();
98 104
@@ -101,13 +107,12 @@ static int scx200_wdt_open(struct inode *inode, struct file *file)
101 107
102static int scx200_wdt_release(struct inode *inode, struct file *file) 108static int scx200_wdt_release(struct inode *inode, struct file *file)
103{ 109{
104 if (expect_close != 42) { 110 if (expect_close != 42)
105 printk(KERN_WARNING NAME ": watchdog device closed unexpectedly, will not disable the watchdog timer\n"); 111 printk(KERN_WARNING NAME ": watchdog device closed unexpectedly, will not disable the watchdog timer\n");
106 } else if (!nowayout) { 112 else if (!nowayout)
107 scx200_wdt_disable(); 113 scx200_wdt_disable();
108 }
109 expect_close = 0; 114 expect_close = 0;
110 up(&open_semaphore); 115 clear_bit(0, &open_lock);
111 116
112 return 0; 117 return 0;
113} 118}
@@ -122,8 +127,7 @@ static int scx200_wdt_notify_sys(struct notifier_block *this,
122 return NOTIFY_DONE; 127 return NOTIFY_DONE;
123} 128}
124 129
125static struct notifier_block scx200_wdt_notifier = 130static struct notifier_block scx200_wdt_notifier = {
126{
127 .notifier_call = scx200_wdt_notify_sys, 131 .notifier_call = scx200_wdt_notify_sys,
128}; 132};
129 133
@@ -131,8 +135,7 @@ static ssize_t scx200_wdt_write(struct file *file, const char __user *data,
131 size_t len, loff_t *ppos) 135 size_t len, loff_t *ppos)
132{ 136{
133 /* check for a magic close character */ 137 /* check for a magic close character */
134 if (len) 138 if (len) {
135 {
136 size_t i; 139 size_t i;
137 140
138 scx200_wdt_ping(); 141 scx200_wdt_ping();
@@ -140,7 +143,7 @@ static ssize_t scx200_wdt_write(struct file *file, const char __user *data,
140 expect_close = 0; 143 expect_close = 0;
141 for (i = 0; i < len; ++i) { 144 for (i = 0; i < len; ++i) {
142 char c; 145 char c;
143 if (get_user(c, data+i)) 146 if (get_user(c, data + i))
144 return -EFAULT; 147 return -EFAULT;
145 if (c == 'V') 148 if (c == 'V')
146 expect_close = 42; 149 expect_close = 42;
@@ -152,23 +155,21 @@ static ssize_t scx200_wdt_write(struct file *file, const char __user *data,
152 return 0; 155 return 0;
153} 156}
154 157
155static int scx200_wdt_ioctl(struct inode *inode, struct file *file, 158static long scx200_wdt_ioctl(struct file *file, unsigned int cmd,
156 unsigned int cmd, unsigned long arg) 159 unsigned long arg)
157{ 160{
158 void __user *argp = (void __user *)arg; 161 void __user *argp = (void __user *)arg;
159 int __user *p = argp; 162 int __user *p = argp;
160 static struct watchdog_info ident = { 163 static const struct watchdog_info ident = {
161 .identity = "NatSemi SCx200 Watchdog", 164 .identity = "NatSemi SCx200 Watchdog",
162 .firmware_version = 1, 165 .firmware_version = 1,
163 .options = (WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING), 166 .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
164 }; 167 };
165 int new_margin; 168 int new_margin;
166 169
167 switch (cmd) { 170 switch (cmd) {
168 default:
169 return -ENOTTY;
170 case WDIOC_GETSUPPORT: 171 case WDIOC_GETSUPPORT:
171 if(copy_to_user(argp, &ident, sizeof(ident))) 172 if (copy_to_user(argp, &ident, sizeof(ident)))
172 return -EFAULT; 173 return -EFAULT;
173 return 0; 174 return 0;
174 case WDIOC_GETSTATUS: 175 case WDIOC_GETSTATUS:
@@ -191,22 +192,24 @@ static int scx200_wdt_ioctl(struct inode *inode, struct file *file,
191 if (put_user(margin, p)) 192 if (put_user(margin, p))
192 return -EFAULT; 193 return -EFAULT;
193 return 0; 194 return 0;
195 default:
196 return -ENOTTY;
194 } 197 }
195} 198}
196 199
197static const struct file_operations scx200_wdt_fops = { 200static const struct file_operations scx200_wdt_fops = {
198 .owner = THIS_MODULE, 201 .owner = THIS_MODULE,
199 .llseek = no_llseek, 202 .llseek = no_llseek,
200 .write = scx200_wdt_write, 203 .write = scx200_wdt_write,
201 .ioctl = scx200_wdt_ioctl, 204 .unlocked_ioctl = scx200_wdt_ioctl,
202 .open = scx200_wdt_open, 205 .open = scx200_wdt_open,
203 .release = scx200_wdt_release, 206 .release = scx200_wdt_release,
204}; 207};
205 208
206static struct miscdevice scx200_wdt_miscdev = { 209static struct miscdevice scx200_wdt_miscdev = {
207 .minor = WATCHDOG_MINOR, 210 .minor = WATCHDOG_MINOR,
208 .name = "watchdog", 211 .name = "watchdog",
209 .fops = &scx200_wdt_fops, 212 .fops = &scx200_wdt_fops,
210}; 213};
211 214
212static int __init scx200_wdt_init(void) 215static int __init scx200_wdt_init(void)
@@ -229,8 +232,6 @@ static int __init scx200_wdt_init(void)
229 scx200_wdt_update_margin(); 232 scx200_wdt_update_margin();
230 scx200_wdt_disable(); 233 scx200_wdt_disable();
231 234
232 sema_init(&open_semaphore, 1);
233
234 r = register_reboot_notifier(&scx200_wdt_notifier); 235 r = register_reboot_notifier(&scx200_wdt_notifier);
235 if (r) { 236 if (r) {
236 printk(KERN_ERR NAME ": unable to register reboot notifier"); 237 printk(KERN_ERR NAME ": unable to register reboot notifier");
@@ -263,7 +264,7 @@ module_exit(scx200_wdt_cleanup);
263 264
264/* 265/*
265 Local variables: 266 Local variables:
266 compile-command: "make -k -C ../.. SUBDIRS=drivers/char modules" 267 compile-command: "make -k -C ../.. SUBDIRS=drivers/char modules"
267 c-basic-offset: 8 268 c-basic-offset: 8
268 End: 269 End:
269*/ 270*/
diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c
index 1277f7e9cc54..824125adf90a 100644
--- a/drivers/watchdog/shwdt.c
+++ b/drivers/watchdog/shwdt.c
@@ -28,9 +28,9 @@
28#include <linux/ioport.h> 28#include <linux/ioport.h>
29#include <linux/fs.h> 29#include <linux/fs.h>
30#include <linux/mm.h> 30#include <linux/mm.h>
31#include <asm/io.h> 31#include <linux/io.h>
32#include <asm/uaccess.h> 32#include <linux/uaccess.h>
33#include <asm/watchdog.h> 33#include <linux/watchdog.h>
34 34
35#define PFX "shwdt: " 35#define PFX "shwdt: "
36 36
@@ -72,6 +72,7 @@ static struct watchdog_info sh_wdt_info;
72static char shwdt_expect_close; 72static char shwdt_expect_close;
73static DEFINE_TIMER(timer, sh_wdt_ping, 0, 0); 73static DEFINE_TIMER(timer, sh_wdt_ping, 0, 0);
74static unsigned long next_heartbeat; 74static unsigned long next_heartbeat;
75static DEFINE_SPINLOCK(shwdt_lock);
75 76
76#define WATCHDOG_HEARTBEAT 30 /* 30 sec default heartbeat */ 77#define WATCHDOG_HEARTBEAT 30 /* 30 sec default heartbeat */
77static int heartbeat = WATCHDOG_HEARTBEAT; /* in seconds */ 78static int heartbeat = WATCHDOG_HEARTBEAT; /* in seconds */
@@ -86,6 +87,9 @@ static int nowayout = WATCHDOG_NOWAYOUT;
86static void sh_wdt_start(void) 87static void sh_wdt_start(void)
87{ 88{
88 __u8 csr; 89 __u8 csr;
90 unsigned long flags;
91
92 spin_lock_irqsave(&wdt_lock, flags);
89 93
90 next_heartbeat = jiffies + (heartbeat * HZ); 94 next_heartbeat = jiffies + (heartbeat * HZ);
91 mod_timer(&timer, next_ping_period(clock_division_ratio)); 95 mod_timer(&timer, next_ping_period(clock_division_ratio));
@@ -123,6 +127,7 @@ static void sh_wdt_start(void)
123 csr &= ~RSTCSR_RSTS; 127 csr &= ~RSTCSR_RSTS;
124 sh_wdt_write_rstcsr(csr); 128 sh_wdt_write_rstcsr(csr);
125#endif 129#endif
130 spin_unlock_irqrestore(&wdt_lock, flags);
126} 131}
127 132
128/** 133/**
@@ -132,12 +137,16 @@ static void sh_wdt_start(void)
132static void sh_wdt_stop(void) 137static void sh_wdt_stop(void)
133{ 138{
134 __u8 csr; 139 __u8 csr;
140 unsigned long flags;
141
142 spin_lock_irqsave(&wdt_lock, flags);
135 143
136 del_timer(&timer); 144 del_timer(&timer);
137 145
138 csr = sh_wdt_read_csr(); 146 csr = sh_wdt_read_csr();
139 csr &= ~WTCSR_TME; 147 csr &= ~WTCSR_TME;
140 sh_wdt_write_csr(csr); 148 sh_wdt_write_csr(csr);
149 spin_unlock_irqrestore(&wdt_lock, flags);
141} 150}
142 151
143/** 152/**
@@ -146,7 +155,11 @@ static void sh_wdt_stop(void)
146 */ 155 */
147static inline void sh_wdt_keepalive(void) 156static inline void sh_wdt_keepalive(void)
148{ 157{
158 unsigned long flags;
159
160 spin_lock_irqsave(&wdt_lock, flags);
149 next_heartbeat = jiffies + (heartbeat * HZ); 161 next_heartbeat = jiffies + (heartbeat * HZ);
162 spin_unlock_irqrestore(&wdt_lock, flags);
150} 163}
151 164
152/** 165/**
@@ -155,10 +168,14 @@ static inline void sh_wdt_keepalive(void)
155 */ 168 */
156static int sh_wdt_set_heartbeat(int t) 169static int sh_wdt_set_heartbeat(int t)
157{ 170{
158 if (unlikely((t < 1) || (t > 3600))) /* arbitrary upper limit */ 171 unsigned long flags;
172
173 if (unlikely(t < 1 || t > 3600)) /* arbitrary upper limit */
159 return -EINVAL; 174 return -EINVAL;
160 175
176 spin_lock_irqsave(&wdt_lock, flags);
161 heartbeat = t; 177 heartbeat = t;
178 spin_unlock_irqrestore(&wdt_lock, flags);
162 return 0; 179 return 0;
163} 180}
164 181
@@ -170,6 +187,9 @@ static int sh_wdt_set_heartbeat(int t)
170 */ 187 */
171static void sh_wdt_ping(unsigned long data) 188static void sh_wdt_ping(unsigned long data)
172{ 189{
190 unsigned long flags;
191
192 spin_lock_irqsave(&wdt_lock, flags);
173 if (time_before(jiffies, next_heartbeat)) { 193 if (time_before(jiffies, next_heartbeat)) {
174 __u8 csr; 194 __u8 csr;
175 195
@@ -183,6 +203,7 @@ static void sh_wdt_ping(unsigned long data)
183 } else 203 } else
184 printk(KERN_WARNING PFX "Heartbeat lost! Will not ping " 204 printk(KERN_WARNING PFX "Heartbeat lost! Will not ping "
185 "the watchdog\n"); 205 "the watchdog\n");
206 spin_unlock_irqrestore(&wdt_lock, flags);
186} 207}
187 208
188/** 209/**
@@ -310,7 +331,6 @@ static int sh_wdt_mmap(struct file *file, struct vm_area_struct *vma)
310 331
311/** 332/**
312 * sh_wdt_ioctl - Query Device 333 * sh_wdt_ioctl - Query Device
313 * @inode: inode of device
314 * @file: file handle of device 334 * @file: file handle of device
315 * @cmd: watchdog command 335 * @cmd: watchdog command
316 * @arg: argument 336 * @arg: argument
@@ -318,53 +338,51 @@ static int sh_wdt_mmap(struct file *file, struct vm_area_struct *vma)
318 * Query basic information from the device or ping it, as outlined by the 338 * Query basic information from the device or ping it, as outlined by the
319 * watchdog API. 339 * watchdog API.
320 */ 340 */
321static int sh_wdt_ioctl(struct inode *inode, struct file *file, 341static long sh_wdt_ioctl(struct file *file, unsigned int cmd,
322 unsigned int cmd, unsigned long arg) 342 unsigned long arg)
323{ 343{
324 int new_heartbeat; 344 int new_heartbeat;
325 int options, retval = -EINVAL; 345 int options, retval = -EINVAL;
326 346
327 switch (cmd) { 347 switch (cmd) {
328 case WDIOC_GETSUPPORT: 348 case WDIOC_GETSUPPORT:
329 return copy_to_user((struct watchdog_info *)arg, 349 return copy_to_user((struct watchdog_info *)arg,
330 &sh_wdt_info, 350 &sh_wdt_info, sizeof(sh_wdt_info)) ? -EFAULT : 0;
331 sizeof(sh_wdt_info)) ? -EFAULT : 0; 351 case WDIOC_GETSTATUS:
332 case WDIOC_GETSTATUS: 352 case WDIOC_GETBOOTSTATUS:
333 case WDIOC_GETBOOTSTATUS: 353 return put_user(0, (int *)arg);
334 return put_user(0, (int *)arg); 354 case WDIOC_SETOPTIONS:
335 case WDIOC_KEEPALIVE: 355 if (get_user(options, (int *)arg))
336 sh_wdt_keepalive(); 356 return -EFAULT;
337 return 0; 357
338 case WDIOC_SETTIMEOUT: 358 if (options & WDIOS_DISABLECARD) {
339 if (get_user(new_heartbeat, (int *)arg)) 359 sh_wdt_stop();
340 return -EFAULT; 360 retval = 0;
341 361 }
342 if (sh_wdt_set_heartbeat(new_heartbeat))
343 return -EINVAL;
344
345 sh_wdt_keepalive();
346 /* Fall */
347 case WDIOC_GETTIMEOUT:
348 return put_user(heartbeat, (int *)arg);
349 case WDIOC_SETOPTIONS:
350 if (get_user(options, (int *)arg))
351 return -EFAULT;
352
353 if (options & WDIOS_DISABLECARD) {
354 sh_wdt_stop();
355 retval = 0;
356 }
357 362
358 if (options & WDIOS_ENABLECARD) { 363 if (options & WDIOS_ENABLECARD) {
359 sh_wdt_start(); 364 sh_wdt_start();
360 retval = 0; 365 retval = 0;
361 } 366 }
362 367
363 return retval; 368 return retval;
364 default: 369 case WDIOC_KEEPALIVE:
365 return -ENOTTY; 370 sh_wdt_keepalive();
366 } 371 return 0;
372 case WDIOC_SETTIMEOUT:
373 if (get_user(new_heartbeat, (int *)arg))
374 return -EFAULT;
375
376 if (sh_wdt_set_heartbeat(new_heartbeat))
377 return -EINVAL;
367 378
379 sh_wdt_keepalive();
380 /* Fall */
381 case WDIOC_GETTIMEOUT:
382 return put_user(heartbeat, (int *)arg);
383 default:
384 return -ENOTTY;
385 }
368 return 0; 386 return 0;
369} 387}
370 388
@@ -390,13 +408,13 @@ static const struct file_operations sh_wdt_fops = {
390 .owner = THIS_MODULE, 408 .owner = THIS_MODULE,
391 .llseek = no_llseek, 409 .llseek = no_llseek,
392 .write = sh_wdt_write, 410 .write = sh_wdt_write,
393 .ioctl = sh_wdt_ioctl, 411 .unlocked_ioctl = sh_wdt_ioctl,
394 .open = sh_wdt_open, 412 .open = sh_wdt_open,
395 .release = sh_wdt_close, 413 .release = sh_wdt_close,
396 .mmap = sh_wdt_mmap, 414 .mmap = sh_wdt_mmap,
397}; 415};
398 416
399static struct watchdog_info sh_wdt_info = { 417static const struct watchdog_info sh_wdt_info = {
400 .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | 418 .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT |
401 WDIOF_MAGICCLOSE, 419 WDIOF_MAGICCLOSE,
402 .firmware_version = 1, 420 .firmware_version = 1,
@@ -422,30 +440,33 @@ static int __init sh_wdt_init(void)
422{ 440{
423 int rc; 441 int rc;
424 442
425 if ((clock_division_ratio < 0x5) || (clock_division_ratio > 0x7)) { 443 if (clock_division_ratio < 0x5 || clock_division_ratio > 0x7) {
426 clock_division_ratio = WTCSR_CKS_4096; 444 clock_division_ratio = WTCSR_CKS_4096;
427 printk(KERN_INFO PFX "clock_division_ratio value must " 445 printk(KERN_INFO PFX
428 "be 0x5<=x<=0x7, using %d\n", clock_division_ratio); 446 "clock_division_ratio value must be 0x5<=x<=0x7, using %d\n",
447 clock_division_ratio);
429 } 448 }
430 449
431 rc = sh_wdt_set_heartbeat(heartbeat); 450 rc = sh_wdt_set_heartbeat(heartbeat);
432 if (unlikely(rc)) { 451 if (unlikely(rc)) {
433 heartbeat = WATCHDOG_HEARTBEAT; 452 heartbeat = WATCHDOG_HEARTBEAT;
434 printk(KERN_INFO PFX "heartbeat value must " 453 printk(KERN_INFO PFX
435 "be 1<=x<=3600, using %d\n", heartbeat); 454 "heartbeat value must be 1<=x<=3600, using %d\n",
455 heartbeat);
436 } 456 }
437 457
438 rc = register_reboot_notifier(&sh_wdt_notifier); 458 rc = register_reboot_notifier(&sh_wdt_notifier);
439 if (unlikely(rc)) { 459 if (unlikely(rc)) {
440 printk(KERN_ERR PFX "Can't register reboot notifier (err=%d)\n", 460 printk(KERN_ERR PFX
441 rc); 461 "Can't register reboot notifier (err=%d)\n", rc);
442 return rc; 462 return rc;
443 } 463 }
444 464
445 rc = misc_register(&sh_wdt_miscdev); 465 rc = misc_register(&sh_wdt_miscdev);
446 if (unlikely(rc)) { 466 if (unlikely(rc)) {
447 printk(KERN_ERR PFX "Can't register miscdev on " 467 printk(KERN_ERR PFX
448 "minor=%d (err=%d)\n", sh_wdt_miscdev.minor, rc); 468 "Can't register miscdev on minor=%d (err=%d)\n",
469 sh_wdt_miscdev.minor, rc);
449 unregister_reboot_notifier(&sh_wdt_notifier); 470 unregister_reboot_notifier(&sh_wdt_notifier);
450 return rc; 471 return rc;
451 } 472 }
@@ -476,10 +497,14 @@ module_param(clock_division_ratio, int, 0);
476MODULE_PARM_DESC(clock_division_ratio, "Clock division ratio. Valid ranges are from 0x5 (1.31ms) to 0x7 (5.25ms). (default=" __MODULE_STRING(clock_division_ratio) ")"); 497MODULE_PARM_DESC(clock_division_ratio, "Clock division ratio. Valid ranges are from 0x5 (1.31ms) to 0x7 (5.25ms). (default=" __MODULE_STRING(clock_division_ratio) ")");
477 498
478module_param(heartbeat, int, 0); 499module_param(heartbeat, int, 0);
479MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. (1<=heartbeat<=3600, default=" __MODULE_STRING(WATCHDOG_HEARTBEAT) ")"); 500MODULE_PARM_DESC(heartbeat,
501 "Watchdog heartbeat in seconds. (1 <= heartbeat <= 3600, default="
502 __MODULE_STRING(WATCHDOG_HEARTBEAT) ")");
480 503
481module_param(nowayout, int, 0); 504module_param(nowayout, int, 0);
482MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 505MODULE_PARM_DESC(nowayout,
506 "Watchdog cannot be stopped once started (default="
507 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
483 508
484module_init(sh_wdt_init); 509module_init(sh_wdt_init);
485module_exit(sh_wdt_exit); 510module_exit(sh_wdt_exit);
diff --git a/drivers/watchdog/smsc37b787_wdt.c b/drivers/watchdog/smsc37b787_wdt.c
index 5d2b5ba61414..988ff1d5b4be 100644
--- a/drivers/watchdog/smsc37b787_wdt.c
+++ b/drivers/watchdog/smsc37b787_wdt.c
@@ -18,7 +18,7 @@
18 * History: 18 * History:
19 * 2003 - Created version 1.0 for Linux 2.4.x. 19 * 2003 - Created version 1.0 for Linux 2.4.x.
20 * 2006 - Ported to Linux 2.6, added nowayout and MAGICCLOSE 20 * 2006 - Ported to Linux 2.6, added nowayout and MAGICCLOSE
21 * features. Released version 1.1 21 * features. Released version 1.1
22 * 22 *
23 * Theory of operation: 23 * Theory of operation:
24 * 24 *
@@ -55,9 +55,9 @@
55#include <linux/reboot.h> 55#include <linux/reboot.h>
56#include <linux/init.h> 56#include <linux/init.h>
57#include <linux/spinlock.h> 57#include <linux/spinlock.h>
58#include <linux/io.h>
59#include <linux/uaccess.h>
58 60
59#include <asm/io.h>
60#include <asm/uaccess.h>
61#include <asm/system.h> 61#include <asm/system.h>
62 62
63/* enable support for minutes as units? */ 63/* enable support for minutes as units? */
@@ -71,15 +71,15 @@
71#define UNIT_MINUTE 1 71#define UNIT_MINUTE 1
72 72
73#define MODNAME "smsc37b787_wdt: " 73#define MODNAME "smsc37b787_wdt: "
74#define VERSION "1.1" 74#define VERSION "1.1"
75 75
76#define IOPORT 0x3F0 76#define IOPORT 0x3F0
77#define IOPORT_SIZE 2 77#define IOPORT_SIZE 2
78#define IODEV_NO 8 78#define IODEV_NO 8
79 79
80static int unit = UNIT_SECOND; /* timer's unit */ 80static int unit = UNIT_SECOND; /* timer's unit */
81static int timeout = 60; /* timeout value: default is 60 "units" */ 81static int timeout = 60; /* timeout value: default is 60 "units" */
82static unsigned long timer_enabled = 0; /* is the timer enabled? */ 82static unsigned long timer_enabled; /* is the timer enabled? */
83 83
84static char expect_close; /* is the close expected? */ 84static char expect_close; /* is the close expected? */
85 85
@@ -93,114 +93,121 @@ static int nowayout = WATCHDOG_NOWAYOUT;
93 93
94static inline void open_io_config(void) 94static inline void open_io_config(void)
95{ 95{
96 outb(0x55, IOPORT); 96 outb(0x55, IOPORT);
97 mdelay(1); 97 mdelay(1);
98 outb(0x55, IOPORT); 98 outb(0x55, IOPORT);
99} 99}
100 100
101/* lock the IO chip */ 101/* lock the IO chip */
102static inline void close_io_config(void) 102static inline void close_io_config(void)
103{ 103{
104 outb(0xAA, IOPORT); 104 outb(0xAA, IOPORT);
105} 105}
106 106
107/* select the IO device */ 107/* select the IO device */
108static inline void select_io_device(unsigned char devno) 108static inline void select_io_device(unsigned char devno)
109{ 109{
110 outb(0x07, IOPORT); 110 outb(0x07, IOPORT);
111 outb(devno, IOPORT+1); 111 outb(devno, IOPORT+1);
112} 112}
113 113
114/* write to the control register */ 114/* write to the control register */
115static inline void write_io_cr(unsigned char reg, unsigned char data) 115static inline void write_io_cr(unsigned char reg, unsigned char data)
116{ 116{
117 outb(reg, IOPORT); 117 outb(reg, IOPORT);
118 outb(data, IOPORT+1); 118 outb(data, IOPORT+1);
119} 119}
120 120
121/* read from the control register */ 121/* read from the control register */
122static inline char read_io_cr(unsigned char reg) 122static inline char read_io_cr(unsigned char reg)
123{ 123{
124 outb(reg, IOPORT); 124 outb(reg, IOPORT);
125 return inb(IOPORT+1); 125 return inb(IOPORT+1);
126} 126}
127 127
128/* -- Medium level functions ------------------------------------*/ 128/* -- Medium level functions ------------------------------------*/
129 129
130static inline void gpio_bit12(unsigned char reg) 130static inline void gpio_bit12(unsigned char reg)
131{ 131{
132 // -- General Purpose I/O Bit 1.2 -- 132 /* -- General Purpose I/O Bit 1.2 --
133 // Bit 0, In/Out: 0 = Output, 1 = Input 133 * Bit 0, In/Out: 0 = Output, 1 = Input
134 // Bit 1, Polarity: 0 = No Invert, 1 = Invert 134 * Bit 1, Polarity: 0 = No Invert, 1 = Invert
135 // Bit 2, Group Enable Intr.: 0 = Disable, 1 = Enable 135 * Bit 2, Group Enable Intr.: 0 = Disable, 1 = Enable
136 // Bit 3/4, Function select: 00 = GPI/O, 01 = WDT, 10 = P17, 136 * Bit 3/4, Function select: 00 = GPI/O, 01 = WDT, 10 = P17,
137 // 11 = Either Edge Triggered Intr. 2 137 * 11 = Either Edge Triggered Intr. 2
138 // Bit 5/6 (Reserved) 138 * Bit 5/6 (Reserved)
139 // Bit 7, Output Type: 0 = Push Pull Bit, 1 = Open Drain 139 * Bit 7, Output Type: 0 = Push Pull Bit, 1 = Open Drain
140 write_io_cr(0xE2, reg); 140 */
141 write_io_cr(0xE2, reg);
141} 142}
142 143
143static inline void gpio_bit13(unsigned char reg) 144static inline void gpio_bit13(unsigned char reg)
144{ 145{
145 // -- General Purpose I/O Bit 1.3 -- 146 /* -- General Purpose I/O Bit 1.3 --
146 // Bit 0, In/Out: 0 = Output, 1 = Input 147 * Bit 0, In/Out: 0 = Output, 1 = Input
147 // Bit 1, Polarity: 0 = No Invert, 1 = Invert 148 * Bit 1, Polarity: 0 = No Invert, 1 = Invert
148 // Bit 2, Group Enable Intr.: 0 = Disable, 1 = Enable 149 * Bit 2, Group Enable Intr.: 0 = Disable, 1 = Enable
149 // Bit 3, Function select: 0 = GPI/O, 1 = LED 150 * Bit 3, Function select: 0 = GPI/O, 1 = LED
150 // Bit 4-6 (Reserved) 151 * Bit 4-6 (Reserved)
151 // Bit 7, Output Type: 0 = Push Pull Bit, 1 = Open Drain 152 * Bit 7, Output Type: 0 = Push Pull Bit, 1 = Open Drain
152 write_io_cr(0xE3, reg); 153 */
154 write_io_cr(0xE3, reg);
153} 155}
154 156
155static inline void wdt_timer_units(unsigned char new_units) 157static inline void wdt_timer_units(unsigned char new_units)
156{ 158{
157 // -- Watchdog timer units -- 159 /* -- Watchdog timer units --
158 // Bit 0-6 (Reserved) 160 * Bit 0-6 (Reserved)
159 // Bit 7, WDT Time-out Value Units Select 161 * Bit 7, WDT Time-out Value Units Select
160 // (0 = Minutes, 1 = Seconds) 162 * (0 = Minutes, 1 = Seconds)
161 write_io_cr(0xF1, new_units); 163 */
164 write_io_cr(0xF1, new_units);
162} 165}
163 166
164static inline void wdt_timeout_value(unsigned char new_timeout) 167static inline void wdt_timeout_value(unsigned char new_timeout)
165{ 168{
166 // -- Watchdog Timer Time-out Value -- 169 /* -- Watchdog Timer Time-out Value --
167 // Bit 0-7 Binary coded units (0=Disabled, 1..255) 170 * Bit 0-7 Binary coded units (0=Disabled, 1..255)
168 write_io_cr(0xF2, new_timeout); 171 */
172 write_io_cr(0xF2, new_timeout);
169} 173}
170 174
171static inline void wdt_timer_conf(unsigned char conf) 175static inline void wdt_timer_conf(unsigned char conf)
172{ 176{
173 // -- Watchdog timer configuration -- 177 /* -- Watchdog timer configuration --
174 // Bit 0 Joystick enable: 0* = No Reset, 1 = Reset WDT upon Gameport I/O 178 * Bit 0 Joystick enable: 0* = No Reset, 1 = Reset WDT upon
175 // Bit 1 Keyboard enable: 0* = No Reset, 1 = Reset WDT upon KBD Intr. 179 * Gameport I/O
176 // Bit 2 Mouse enable: 0* = No Reset, 1 = Reset WDT upon Mouse Intr. 180 * Bit 1 Keyboard enable: 0* = No Reset, 1 = Reset WDT upon KBD Intr.
177 // Bit 3 Reset the timer 181 * Bit 2 Mouse enable: 0* = No Reset, 1 = Reset WDT upon Mouse Intr
178 // (Wrong in SMsC documentation? Given as: PowerLED Timout Enabled) 182 * Bit 3 Reset the timer
179 // Bit 4-7 WDT Interrupt Mapping: (0000* = Disabled, 183 * (Wrong in SMsC documentation? Given as: PowerLED Timout
180 // 0001=IRQ1, 0010=(Invalid), 0011=IRQ3 to 1111=IRQ15) 184 * Enabled)
181 write_io_cr(0xF3, conf); 185 * Bit 4-7 WDT Interrupt Mapping: (0000* = Disabled,
186 * 0001=IRQ1, 0010=(Invalid), 0011=IRQ3 to 1111=IRQ15)
187 */
188 write_io_cr(0xF3, conf);
182} 189}
183 190
184static inline void wdt_timer_ctrl(unsigned char reg) 191static inline void wdt_timer_ctrl(unsigned char reg)
185{ 192{
186 // -- Watchdog timer control -- 193 /* -- Watchdog timer control --
187 // Bit 0 Status Bit: 0 = Timer counting, 1 = Timeout occured 194 * Bit 0 Status Bit: 0 = Timer counting, 1 = Timeout occured
188 // Bit 1 Power LED Toggle: 0 = Disable Toggle, 1 = Toggle at 1 Hz 195 * Bit 1 Power LED Toggle: 0 = Disable Toggle, 1 = Toggle at 1 Hz
189 // Bit 2 Force Timeout: 1 = Forces WD timeout event (self-cleaning) 196 * Bit 2 Force Timeout: 1 = Forces WD timeout event (self-cleaning)
190 // Bit 3 P20 Force Timeout enabled: 197 * Bit 3 P20 Force Timeout enabled:
191 // 0 = P20 activity does not generate the WD timeout event 198 * 0 = P20 activity does not generate the WD timeout event
192 // 1 = P20 Allows rising edge of P20, from the keyboard 199 * 1 = P20 Allows rising edge of P20, from the keyboard
193 // controller, to force the WD timeout event. 200 * controller, to force the WD timeout event.
194 // Bit 4 (Reserved) 201 * Bit 4 (Reserved)
195 // -- Soft power management -- 202 * -- Soft power management --
196 // Bit 5 Stop Counter: 1 = Stop software power down counter 203 * Bit 5 Stop Counter: 1 = Stop software power down counter
197 // set via register 0xB8, (self-cleaning) 204 * set via register 0xB8, (self-cleaning)
198 // (Upon read: 0 = Counter running, 1 = Counter stopped) 205 * (Upon read: 0 = Counter running, 1 = Counter stopped)
199 // Bit 6 Restart Counter: 1 = Restart software power down counter 206 * Bit 6 Restart Counter: 1 = Restart software power down counter
200 // set via register 0xB8, (self-cleaning) 207 * set via register 0xB8, (self-cleaning)
201 // Bit 7 SPOFF: 1 = Force software power down (self-cleaning) 208 * Bit 7 SPOFF: 1 = Force software power down (self-cleaning)
202 209 */
203 write_io_cr(0xF4, reg); 210 write_io_cr(0xF4, reg);
204} 211}
205 212
206/* -- Higher level functions ------------------------------------*/ 213/* -- Higher level functions ------------------------------------*/
@@ -209,33 +216,34 @@ static inline void wdt_timer_ctrl(unsigned char reg)
209 216
210static void wb_smsc_wdt_initialize(void) 217static void wb_smsc_wdt_initialize(void)
211{ 218{
212 unsigned char old; 219 unsigned char old;
213 220
214 spin_lock(&io_lock); 221 spin_lock(&io_lock);
215 open_io_config(); 222 open_io_config();
216 select_io_device(IODEV_NO); 223 select_io_device(IODEV_NO);
217 224
218 // enable the watchdog 225 /* enable the watchdog */
219 gpio_bit13(0x08); // Select pin 80 = LED not GPIO 226 gpio_bit13(0x08); /* Select pin 80 = LED not GPIO */
220 gpio_bit12(0x0A); // Set pin 79 = WDT not GPIO/Output/Polarity=Invert 227 gpio_bit12(0x0A); /* Set pin 79 = WDT not
228 GPIO/Output/Polarity=Invert */
229 /* disable the timeout */
230 wdt_timeout_value(0);
221 231
222 // disable the timeout 232 /* reset control register */
223 wdt_timeout_value(0); 233 wdt_timer_ctrl(0x00);
224 234
225 // reset control register 235 /* reset configuration register */
226 wdt_timer_ctrl(0x00);
227
228 // reset configuration register
229 wdt_timer_conf(0x00); 236 wdt_timer_conf(0x00);
230 237
231 // read old (timer units) register 238 /* read old (timer units) register */
232 old = read_io_cr(0xF1) & 0x7F; 239 old = read_io_cr(0xF1) & 0x7F;
233 if (unit == UNIT_SECOND) old |= 0x80; // set to seconds 240 if (unit == UNIT_SECOND)
241 old |= 0x80; /* set to seconds */
234 242
235 // set the watchdog timer units 243 /* set the watchdog timer units */
236 wdt_timer_units(old); 244 wdt_timer_units(old);
237 245
238 close_io_config(); 246 close_io_config();
239 spin_unlock(&io_lock); 247 spin_unlock(&io_lock);
240} 248}
241 249
@@ -244,23 +252,23 @@ static void wb_smsc_wdt_initialize(void)
244static void wb_smsc_wdt_shutdown(void) 252static void wb_smsc_wdt_shutdown(void)
245{ 253{
246 spin_lock(&io_lock); 254 spin_lock(&io_lock);
247 open_io_config(); 255 open_io_config();
248 select_io_device(IODEV_NO); 256 select_io_device(IODEV_NO);
249 257
250 // disable the watchdog 258 /* disable the watchdog */
251 gpio_bit13(0x09); 259 gpio_bit13(0x09);
252 gpio_bit12(0x09); 260 gpio_bit12(0x09);
253 261
254 // reset watchdog config register 262 /* reset watchdog config register */
255 wdt_timer_conf(0x00); 263 wdt_timer_conf(0x00);
256 264
257 // reset watchdog control register 265 /* reset watchdog control register */
258 wdt_timer_ctrl(0x00); 266 wdt_timer_ctrl(0x00);
259 267
260 // disable timeout 268 /* disable timeout */
261 wdt_timeout_value(0x00); 269 wdt_timeout_value(0x00);
262 270
263 close_io_config(); 271 close_io_config();
264 spin_unlock(&io_lock); 272 spin_unlock(&io_lock);
265} 273}
266 274
@@ -269,16 +277,16 @@ static void wb_smsc_wdt_shutdown(void)
269static void wb_smsc_wdt_set_timeout(unsigned char new_timeout) 277static void wb_smsc_wdt_set_timeout(unsigned char new_timeout)
270{ 278{
271 spin_lock(&io_lock); 279 spin_lock(&io_lock);
272 open_io_config(); 280 open_io_config();
273 select_io_device(IODEV_NO); 281 select_io_device(IODEV_NO);
274 282
275 // set Power LED to blink, if we enable the timeout 283 /* set Power LED to blink, if we enable the timeout */
276 wdt_timer_ctrl((new_timeout == 0) ? 0x00 : 0x02); 284 wdt_timer_ctrl((new_timeout == 0) ? 0x00 : 0x02);
277 285
278 // set timeout value 286 /* set timeout value */
279 wdt_timeout_value(new_timeout); 287 wdt_timeout_value(new_timeout);
280 288
281 close_io_config(); 289 close_io_config();
282 spin_unlock(&io_lock); 290 spin_unlock(&io_lock);
283} 291}
284 292
@@ -286,32 +294,32 @@ static void wb_smsc_wdt_set_timeout(unsigned char new_timeout)
286 294
287static unsigned char wb_smsc_wdt_get_timeout(void) 295static unsigned char wb_smsc_wdt_get_timeout(void)
288{ 296{
289 unsigned char set_timeout; 297 unsigned char set_timeout;
290 298
291 spin_lock(&io_lock); 299 spin_lock(&io_lock);
292 open_io_config(); 300 open_io_config();
293 select_io_device(IODEV_NO); 301 select_io_device(IODEV_NO);
294 set_timeout = read_io_cr(0xF2); 302 set_timeout = read_io_cr(0xF2);
295 close_io_config(); 303 close_io_config();
296 spin_unlock(&io_lock); 304 spin_unlock(&io_lock);
297 305
298 return set_timeout; 306 return set_timeout;
299} 307}
300 308
301/* disable watchdog */ 309/* disable watchdog */
302 310
303static void wb_smsc_wdt_disable(void) 311static void wb_smsc_wdt_disable(void)
304{ 312{
305 // set the timeout to 0 to disable the watchdog 313 /* set the timeout to 0 to disable the watchdog */
306 wb_smsc_wdt_set_timeout(0); 314 wb_smsc_wdt_set_timeout(0);
307} 315}
308 316
309/* enable watchdog by setting the current timeout */ 317/* enable watchdog by setting the current timeout */
310 318
311static void wb_smsc_wdt_enable(void) 319static void wb_smsc_wdt_enable(void)
312{ 320{
313 // set the current timeout... 321 /* set the current timeout... */
314 wb_smsc_wdt_set_timeout(timeout); 322 wb_smsc_wdt_set_timeout(timeout);
315} 323}
316 324
317/* reset the timer */ 325/* reset the timer */
@@ -319,14 +327,14 @@ static void wb_smsc_wdt_enable(void)
319static void wb_smsc_wdt_reset_timer(void) 327static void wb_smsc_wdt_reset_timer(void)
320{ 328{
321 spin_lock(&io_lock); 329 spin_lock(&io_lock);
322 open_io_config(); 330 open_io_config();
323 select_io_device(IODEV_NO); 331 select_io_device(IODEV_NO);
324 332
325 // reset the timer 333 /* reset the timer */
326 wdt_timeout_value(timeout); 334 wdt_timeout_value(timeout);
327 wdt_timer_conf(0x08); 335 wdt_timer_conf(0x08);
328 336
329 close_io_config(); 337 close_io_config();
330 spin_unlock(&io_lock); 338 spin_unlock(&io_lock);
331} 339}
332 340
@@ -355,7 +363,9 @@ static int wb_smsc_wdt_open(struct inode *inode, struct file *file)
355 /* Reload and activate timer */ 363 /* Reload and activate timer */
356 wb_smsc_wdt_enable(); 364 wb_smsc_wdt_enable();
357 365
358 printk(KERN_INFO MODNAME "Watchdog enabled. Timeout set to %d %s.\n", timeout, (unit == UNIT_SECOND) ? "second(s)" : "minute(s)"); 366 printk(KERN_INFO MODNAME
367 "Watchdog enabled. Timeout set to %d %s.\n",
368 timeout, (unit == UNIT_SECOND) ? "second(s)" : "minute(s)");
359 369
360 return nonseekable_open(inode, file); 370 return nonseekable_open(inode, file);
361} 371}
@@ -367,10 +377,12 @@ static int wb_smsc_wdt_release(struct inode *inode, struct file *file)
367 /* Shut off the timer. */ 377 /* Shut off the timer. */
368 378
369 if (expect_close == 42) { 379 if (expect_close == 42) {
370 wb_smsc_wdt_disable(); 380 wb_smsc_wdt_disable();
371 printk(KERN_INFO MODNAME "Watchdog disabled, sleeping again...\n"); 381 printk(KERN_INFO MODNAME
382 "Watchdog disabled, sleeping again...\n");
372 } else { 383 } else {
373 printk(KERN_CRIT MODNAME "Unexpected close, not stopping watchdog!\n"); 384 printk(KERN_CRIT MODNAME
385 "Unexpected close, not stopping watchdog!\n");
374 wb_smsc_wdt_reset_timer(); 386 wb_smsc_wdt_reset_timer();
375 } 387 }
376 388
@@ -392,10 +404,11 @@ static ssize_t wb_smsc_wdt_write(struct file *file, const char __user *data,
392 /* reset expect flag */ 404 /* reset expect flag */
393 expect_close = 0; 405 expect_close = 0;
394 406
395 /* scan to see whether or not we got the magic character */ 407 /* scan to see whether or not we got the
408 magic character */
396 for (i = 0; i != len; i++) { 409 for (i = 0; i != len; i++) {
397 char c; 410 char c;
398 if (get_user(c, data+i)) 411 if (get_user(c, data + i))
399 return -EFAULT; 412 return -EFAULT;
400 if (c == 'V') 413 if (c == 'V')
401 expect_close = 42; 414 expect_close = 42;
@@ -410,8 +423,8 @@ static ssize_t wb_smsc_wdt_write(struct file *file, const char __user *data,
410 423
411/* ioctl => control interface */ 424/* ioctl => control interface */
412 425
413static int wb_smsc_wdt_ioctl(struct inode *inode, struct file *file, 426static long wb_smsc_wdt_ioctl(struct file *file,
414 unsigned int cmd, unsigned long arg) 427 unsigned int cmd, unsigned long arg)
415{ 428{
416 int new_timeout; 429 int new_timeout;
417 430
@@ -420,89 +433,73 @@ static int wb_smsc_wdt_ioctl(struct inode *inode, struct file *file,
420 int __user *i; 433 int __user *i;
421 } uarg; 434 } uarg;
422 435
423 static struct watchdog_info ident = { 436 static const struct watchdog_info ident = {
424 .options = WDIOF_KEEPALIVEPING | 437 .options = WDIOF_KEEPALIVEPING |
425 WDIOF_SETTIMEOUT | 438 WDIOF_SETTIMEOUT |
426 WDIOF_MAGICCLOSE, 439 WDIOF_MAGICCLOSE,
427 .firmware_version = 0, 440 .firmware_version = 0,
428 .identity = "SMsC 37B787 Watchdog" 441 .identity = "SMsC 37B787 Watchdog",
429 }; 442 };
430 443
431 uarg.i = (int __user *)arg; 444 uarg.i = (int __user *)arg;
432 445
433 switch (cmd) { 446 switch (cmd) {
434 default: 447 case WDIOC_GETSUPPORT:
435 return -ENOTTY; 448 return copy_to_user(uarg.ident, &ident, sizeof(ident))
436 449 ? -EFAULT : 0;
437 case WDIOC_GETSUPPORT: 450 case WDIOC_GETSTATUS:
438 return copy_to_user(uarg.ident, &ident, 451 return put_user(wb_smsc_wdt_status(), uarg.i);
439 sizeof(ident)) ? -EFAULT : 0; 452 case WDIOC_GETBOOTSTATUS:
440 453 return put_user(0, uarg.i);
441 case WDIOC_GETSTATUS: 454 case WDIOC_SETOPTIONS:
442 return put_user(wb_smsc_wdt_status(), uarg.i); 455 {
443 456 int options, retval = -EINVAL;
444 case WDIOC_GETBOOTSTATUS:
445 return put_user(0, uarg.i);
446
447 case WDIOC_KEEPALIVE:
448 wb_smsc_wdt_reset_timer();
449 return 0;
450
451 case WDIOC_SETTIMEOUT:
452 if (get_user(new_timeout, uarg.i))
453 return -EFAULT;
454
455 // the API states this is given in secs
456 if (unit == UNIT_MINUTE)
457 new_timeout /= 60;
458
459 if (new_timeout < 0 || new_timeout > MAX_TIMEOUT)
460 return -EINVAL;
461
462 timeout = new_timeout;
463 wb_smsc_wdt_set_timeout(timeout);
464
465 // fall through and return the new timeout...
466
467 case WDIOC_GETTIMEOUT:
468
469 new_timeout = timeout;
470
471 if (unit == UNIT_MINUTE)
472 new_timeout *= 60;
473
474 return put_user(new_timeout, uarg.i);
475
476 case WDIOC_SETOPTIONS:
477 {
478 int options, retval = -EINVAL;
479
480 if (get_user(options, uarg.i))
481 return -EFAULT;
482
483 if (options & WDIOS_DISABLECARD) {
484 wb_smsc_wdt_disable();
485 retval = 0;
486 }
487 457
488 if (options & WDIOS_ENABLECARD) { 458 if (get_user(options, uarg.i))
489 wb_smsc_wdt_enable(); 459 return -EFAULT;
490 retval = 0;
491 }
492 460
493 return retval; 461 if (options & WDIOS_DISABLECARD) {
462 wb_smsc_wdt_disable();
463 retval = 0;
464 }
465 if (options & WDIOS_ENABLECARD) {
466 wb_smsc_wdt_enable();
467 retval = 0;
494 } 468 }
469 return retval;
470 }
471 case WDIOC_KEEPALIVE:
472 wb_smsc_wdt_reset_timer();
473 return 0;
474 case WDIOC_SETTIMEOUT:
475 if (get_user(new_timeout, uarg.i))
476 return -EFAULT;
477 /* the API states this is given in secs */
478 if (unit == UNIT_MINUTE)
479 new_timeout /= 60;
480 if (new_timeout < 0 || new_timeout > MAX_TIMEOUT)
481 return -EINVAL;
482 timeout = new_timeout;
483 wb_smsc_wdt_set_timeout(timeout);
484 /* fall through and return the new timeout... */
485 case WDIOC_GETTIMEOUT:
486 new_timeout = timeout;
487 if (unit == UNIT_MINUTE)
488 new_timeout *= 60;
489 return put_user(new_timeout, uarg.i);
490 default:
491 return -ENOTTY;
495 } 492 }
496} 493}
497 494
498/* -- Notifier funtions -----------------------------------------*/ 495/* -- Notifier funtions -----------------------------------------*/
499 496
500static int wb_smsc_wdt_notify_sys(struct notifier_block *this, unsigned long code, void *unused) 497static int wb_smsc_wdt_notify_sys(struct notifier_block *this,
498 unsigned long code, void *unused)
501{ 499{
502 if (code == SYS_DOWN || code == SYS_HALT) 500 if (code == SYS_DOWN || code == SYS_HALT) {
503 { 501 /* set timeout to 0, to avoid possible race-condition */
504 // set timeout to 0, to avoid possible race-condition 502 timeout = 0;
505 timeout = 0;
506 wb_smsc_wdt_disable(); 503 wb_smsc_wdt_disable();
507 } 504 }
508 return NOTIFY_DONE; 505 return NOTIFY_DONE;
@@ -510,23 +507,20 @@ static int wb_smsc_wdt_notify_sys(struct notifier_block *this, unsigned long cod
510 507
511/* -- Module's structures ---------------------------------------*/ 508/* -- Module's structures ---------------------------------------*/
512 509
513static const struct file_operations wb_smsc_wdt_fops = 510static const struct file_operations wb_smsc_wdt_fops = {
514{ 511 .owner = THIS_MODULE,
515 .owner = THIS_MODULE,
516 .llseek = no_llseek, 512 .llseek = no_llseek,
517 .write = wb_smsc_wdt_write, 513 .write = wb_smsc_wdt_write,
518 .ioctl = wb_smsc_wdt_ioctl, 514 .unlocked_ioctl = wb_smsc_wdt_ioctl,
519 .open = wb_smsc_wdt_open, 515 .open = wb_smsc_wdt_open,
520 .release = wb_smsc_wdt_release, 516 .release = wb_smsc_wdt_release,
521}; 517};
522 518
523static struct notifier_block wb_smsc_wdt_notifier = 519static struct notifier_block wb_smsc_wdt_notifier = {
524{
525 .notifier_call = wb_smsc_wdt_notify_sys, 520 .notifier_call = wb_smsc_wdt_notify_sys,
526}; 521};
527 522
528static struct miscdevice wb_smsc_wdt_miscdev = 523static struct miscdevice wb_smsc_wdt_miscdev = {
529{
530 .minor = WATCHDOG_MINOR, 524 .minor = WATCHDOG_MINOR,
531 .name = "watchdog", 525 .name = "watchdog",
532 .fops = &wb_smsc_wdt_fops, 526 .fops = &wb_smsc_wdt_fops,
@@ -540,39 +534,44 @@ static int __init wb_smsc_wdt_init(void)
540{ 534{
541 int ret; 535 int ret;
542 536
543 printk("SMsC 37B787 watchdog component driver " VERSION " initialising...\n"); 537 printk(KERN_INFO "SMsC 37B787 watchdog component driver "
538 VERSION " initialising...\n");
544 539
545 if (!request_region(IOPORT, IOPORT_SIZE, "SMsC 37B787 watchdog")) { 540 if (!request_region(IOPORT, IOPORT_SIZE, "SMsC 37B787 watchdog")) {
546 printk(KERN_ERR MODNAME "Unable to register IO port %#x\n", IOPORT); 541 printk(KERN_ERR MODNAME "Unable to register IO port %#x\n",
542 IOPORT);
547 ret = -EBUSY; 543 ret = -EBUSY;
548 goto out_pnp; 544 goto out_pnp;
549 } 545 }
550 546
551 // set new maximum, if it's too big 547 /* set new maximum, if it's too big */
552 if (timeout > MAX_TIMEOUT) 548 if (timeout > MAX_TIMEOUT)
553 timeout = MAX_TIMEOUT; 549 timeout = MAX_TIMEOUT;
554 550
555 // init the watchdog timer 551 /* init the watchdog timer */
556 wb_smsc_wdt_initialize(); 552 wb_smsc_wdt_initialize();
557 553
558 ret = register_reboot_notifier(&wb_smsc_wdt_notifier); 554 ret = register_reboot_notifier(&wb_smsc_wdt_notifier);
559 if (ret) { 555 if (ret) {
560 printk(KERN_ERR MODNAME "Unable to register reboot notifier err = %d\n", ret); 556 printk(KERN_ERR MODNAME
557 "Unable to register reboot notifier err = %d\n", ret);
561 goto out_io; 558 goto out_io;
562 } 559 }
563 560
564 ret = misc_register(&wb_smsc_wdt_miscdev); 561 ret = misc_register(&wb_smsc_wdt_miscdev);
565 if (ret) { 562 if (ret) {
566 printk(KERN_ERR MODNAME "Unable to register miscdev on minor %d\n", WATCHDOG_MINOR); 563 printk(KERN_ERR MODNAME
564 "Unable to register miscdev on minor %d\n",
565 WATCHDOG_MINOR);
567 goto out_rbt; 566 goto out_rbt;
568 } 567 }
569 568
570 // output info 569 /* output info */
571 printk(KERN_INFO MODNAME "Timeout set to %d %s.\n", timeout, (unit == UNIT_SECOND) ? "second(s)" : "minute(s)"); 570 printk(KERN_INFO MODNAME "Timeout set to %d %s.\n",
572 printk(KERN_INFO MODNAME "Watchdog initialized and sleeping (nowayout=%d)...\n", nowayout); 571 timeout, (unit == UNIT_SECOND) ? "second(s)" : "minute(s)");
573 572 printk(KERN_INFO MODNAME
574 // ret = 0 573 "Watchdog initialized and sleeping (nowayout=%d)...\n",
575 574 nowayout);
576out_clean: 575out_clean:
577 return ret; 576 return ret;
578 577
@@ -591,8 +590,7 @@ out_pnp:
591static void __exit wb_smsc_wdt_exit(void) 590static void __exit wb_smsc_wdt_exit(void)
592{ 591{
593 /* Stop the timer before we leave */ 592 /* Stop the timer before we leave */
594 if (!nowayout) 593 if (!nowayout) {
595 {
596 wb_smsc_wdt_shutdown(); 594 wb_smsc_wdt_shutdown();
597 printk(KERN_INFO MODNAME "Watchdog disabled.\n"); 595 printk(KERN_INFO MODNAME "Watchdog disabled.\n");
598 } 596 }
@@ -601,25 +599,29 @@ static void __exit wb_smsc_wdt_exit(void)
601 unregister_reboot_notifier(&wb_smsc_wdt_notifier); 599 unregister_reboot_notifier(&wb_smsc_wdt_notifier);
602 release_region(IOPORT, IOPORT_SIZE); 600 release_region(IOPORT, IOPORT_SIZE);
603 601
604 printk("SMsC 37B787 watchdog component driver removed.\n"); 602 printk(KERN_INFO "SMsC 37B787 watchdog component driver removed.\n");
605} 603}
606 604
607module_init(wb_smsc_wdt_init); 605module_init(wb_smsc_wdt_init);
608module_exit(wb_smsc_wdt_exit); 606module_exit(wb_smsc_wdt_exit);
609 607
610MODULE_AUTHOR("Sven Anders <anders@anduras.de>"); 608MODULE_AUTHOR("Sven Anders <anders@anduras.de>");
611MODULE_DESCRIPTION("Driver for SMsC 37B787 watchdog component (Version " VERSION ")"); 609MODULE_DESCRIPTION("Driver for SMsC 37B787 watchdog component (Version "
610 VERSION ")");
612MODULE_LICENSE("GPL"); 611MODULE_LICENSE("GPL");
613 612
614MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); 613MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
615 614
616#ifdef SMSC_SUPPORT_MINUTES 615#ifdef SMSC_SUPPORT_MINUTES
617module_param(unit, int, 0); 616module_param(unit, int, 0);
618MODULE_PARM_DESC(unit, "set unit to use, 0=seconds or 1=minutes, default is 0"); 617MODULE_PARM_DESC(unit,
618 "set unit to use, 0=seconds or 1=minutes, default is 0");
619#endif 619#endif
620 620
621module_param(timeout, int, 0); 621module_param(timeout, int, 0);
622MODULE_PARM_DESC(timeout, "range is 1-255 units, default is 60"); 622MODULE_PARM_DESC(timeout, "range is 1-255 units, default is 60");
623 623
624module_param(nowayout, int, 0); 624module_param(nowayout, int, 0);
625MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 625MODULE_PARM_DESC(nowayout,
626 "Watchdog cannot be stopped once started (default="
627 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
diff --git a/drivers/watchdog/softdog.c b/drivers/watchdog/softdog.c
index 9c3694909243..c650464c5c63 100644
--- a/drivers/watchdog/softdog.c
+++ b/drivers/watchdog/softdog.c
@@ -47,19 +47,22 @@
47#include <linux/reboot.h> 47#include <linux/reboot.h>
48#include <linux/init.h> 48#include <linux/init.h>
49#include <linux/jiffies.h> 49#include <linux/jiffies.h>
50 50#include <linux/uaccess.h>
51#include <asm/uaccess.h>
52 51
53#define PFX "SoftDog: " 52#define PFX "SoftDog: "
54 53
55#define TIMER_MARGIN 60 /* Default is 60 seconds */ 54#define TIMER_MARGIN 60 /* Default is 60 seconds */
56static int soft_margin = TIMER_MARGIN; /* in seconds */ 55static int soft_margin = TIMER_MARGIN; /* in seconds */
57module_param(soft_margin, int, 0); 56module_param(soft_margin, int, 0);
58MODULE_PARM_DESC(soft_margin, "Watchdog soft_margin in seconds. (0<soft_margin<65536, default=" __MODULE_STRING(TIMER_MARGIN) ")"); 57MODULE_PARM_DESC(soft_margin,
58 "Watchdog soft_margin in seconds. (0 < soft_margin < 65536, default="
59 __MODULE_STRING(TIMER_MARGIN) ")");
59 60
60static int nowayout = WATCHDOG_NOWAYOUT; 61static int nowayout = WATCHDOG_NOWAYOUT;
61module_param(nowayout, int, 0); 62module_param(nowayout, int, 0);
62MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 63MODULE_PARM_DESC(nowayout,
64 "Watchdog cannot be stopped once started (default="
65 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
63 66
64#ifdef ONLY_TESTING 67#ifdef ONLY_TESTING
65static int soft_noboot = 1; 68static int soft_noboot = 1;
@@ -93,8 +96,7 @@ static void watchdog_fire(unsigned long data)
93 96
94 if (soft_noboot) 97 if (soft_noboot)
95 printk(KERN_CRIT PFX "Triggered - Reboot ignored.\n"); 98 printk(KERN_CRIT PFX "Triggered - Reboot ignored.\n");
96 else 99 else {
97 {
98 printk(KERN_CRIT PFX "Initiating system reboot.\n"); 100 printk(KERN_CRIT PFX "Initiating system reboot.\n");
99 emergency_restart(); 101 emergency_restart();
100 printk(KERN_CRIT PFX "Reboot didn't ?????\n"); 102 printk(KERN_CRIT PFX "Reboot didn't ?????\n");
@@ -153,7 +155,8 @@ static int softdog_release(struct inode *inode, struct file *file)
153 softdog_stop(); 155 softdog_stop();
154 module_put(THIS_MODULE); 156 module_put(THIS_MODULE);
155 } else { 157 } else {
156 printk(KERN_CRIT PFX "Unexpected close, not stopping watchdog!\n"); 158 printk(KERN_CRIT PFX
159 "Unexpected close, not stopping watchdog!\n");
157 set_bit(0, &orphan_timer); 160 set_bit(0, &orphan_timer);
158 softdog_keepalive(); 161 softdog_keepalive();
159 } 162 }
@@ -162,12 +165,13 @@ static int softdog_release(struct inode *inode, struct file *file)
162 return 0; 165 return 0;
163} 166}
164 167
165static ssize_t softdog_write(struct file *file, const char __user *data, size_t len, loff_t *ppos) 168static ssize_t softdog_write(struct file *file, const char __user *data,
169 size_t len, loff_t *ppos)
166{ 170{
167 /* 171 /*
168 * Refresh the timer. 172 * Refresh the timer.
169 */ 173 */
170 if(len) { 174 if (len) {
171 if (!nowayout) { 175 if (!nowayout) {
172 size_t i; 176 size_t i;
173 177
@@ -188,13 +192,13 @@ static ssize_t softdog_write(struct file *file, const char __user *data, size_t
188 return len; 192 return len;
189} 193}
190 194
191static int softdog_ioctl(struct inode *inode, struct file *file, 195static long softdog_ioctl(struct file *file, unsigned int cmd,
192 unsigned int cmd, unsigned long arg) 196 unsigned long arg)
193{ 197{
194 void __user *argp = (void __user *)arg; 198 void __user *argp = (void __user *)arg;
195 int __user *p = argp; 199 int __user *p = argp;
196 int new_margin; 200 int new_margin;
197 static struct watchdog_info ident = { 201 static const struct watchdog_info ident = {
198 .options = WDIOF_SETTIMEOUT | 202 .options = WDIOF_SETTIMEOUT |
199 WDIOF_KEEPALIVEPING | 203 WDIOF_KEEPALIVEPING |
200 WDIOF_MAGICCLOSE, 204 WDIOF_MAGICCLOSE,
@@ -202,26 +206,25 @@ static int softdog_ioctl(struct inode *inode, struct file *file,
202 .identity = "Software Watchdog", 206 .identity = "Software Watchdog",
203 }; 207 };
204 switch (cmd) { 208 switch (cmd) {
205 default: 209 case WDIOC_GETSUPPORT:
206 return -ENOTTY; 210 return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
207 case WDIOC_GETSUPPORT: 211 case WDIOC_GETSTATUS:
208 return copy_to_user(argp, &ident, 212 case WDIOC_GETBOOTSTATUS:
209 sizeof(ident)) ? -EFAULT : 0; 213 return put_user(0, p);
210 case WDIOC_GETSTATUS: 214 case WDIOC_KEEPALIVE:
211 case WDIOC_GETBOOTSTATUS: 215 softdog_keepalive();
212 return put_user(0, p); 216 return 0;
213 case WDIOC_KEEPALIVE: 217 case WDIOC_SETTIMEOUT:
214 softdog_keepalive(); 218 if (get_user(new_margin, p))
215 return 0; 219 return -EFAULT;
216 case WDIOC_SETTIMEOUT: 220 if (softdog_set_heartbeat(new_margin))
217 if (get_user(new_margin, p)) 221 return -EINVAL;
218 return -EFAULT; 222 softdog_keepalive();
219 if (softdog_set_heartbeat(new_margin)) 223 /* Fall */
220 return -EINVAL; 224 case WDIOC_GETTIMEOUT:
221 softdog_keepalive(); 225 return put_user(soft_margin, p);
222 /* Fall */ 226 default:
223 case WDIOC_GETTIMEOUT: 227 return -ENOTTY;
224 return put_user(soft_margin, p);
225 } 228 }
226} 229}
227 230
@@ -232,10 +235,9 @@ static int softdog_ioctl(struct inode *inode, struct file *file,
232static int softdog_notify_sys(struct notifier_block *this, unsigned long code, 235static int softdog_notify_sys(struct notifier_block *this, unsigned long code,
233 void *unused) 236 void *unused)
234{ 237{
235 if(code==SYS_DOWN || code==SYS_HALT) { 238 if (code == SYS_DOWN || code == SYS_HALT)
236 /* Turn the WDT off */ 239 /* Turn the WDT off */
237 softdog_stop(); 240 softdog_stop();
238 }
239 return NOTIFY_DONE; 241 return NOTIFY_DONE;
240} 242}
241 243
@@ -247,7 +249,7 @@ static const struct file_operations softdog_fops = {
247 .owner = THIS_MODULE, 249 .owner = THIS_MODULE,
248 .llseek = no_llseek, 250 .llseek = no_llseek,
249 .write = softdog_write, 251 .write = softdog_write,
250 .ioctl = softdog_ioctl, 252 .unlocked_ioctl = softdog_ioctl,
251 .open = softdog_open, 253 .open = softdog_open,
252 .release = softdog_release, 254 .release = softdog_release,
253}; 255};
@@ -268,24 +270,27 @@ static int __init watchdog_init(void)
268{ 270{
269 int ret; 271 int ret;
270 272
271 /* Check that the soft_margin value is within it's range ; if not reset to the default */ 273 /* Check that the soft_margin value is within it's range;
274 if not reset to the default */
272 if (softdog_set_heartbeat(soft_margin)) { 275 if (softdog_set_heartbeat(soft_margin)) {
273 softdog_set_heartbeat(TIMER_MARGIN); 276 softdog_set_heartbeat(TIMER_MARGIN);
274 printk(KERN_INFO PFX "soft_margin value must be 0<soft_margin<65536, using %d\n", 277 printk(KERN_INFO PFX
278 "soft_margin must be 0 < soft_margin < 65536, using %d\n",
275 TIMER_MARGIN); 279 TIMER_MARGIN);
276 } 280 }
277 281
278 ret = register_reboot_notifier(&softdog_notifier); 282 ret = register_reboot_notifier(&softdog_notifier);
279 if (ret) { 283 if (ret) {
280 printk (KERN_ERR PFX "cannot register reboot notifier (err=%d)\n", 284 printk(KERN_ERR PFX
281 ret); 285 "cannot register reboot notifier (err=%d)\n", ret);
282 return ret; 286 return ret;
283 } 287 }
284 288
285 ret = misc_register(&softdog_miscdev); 289 ret = misc_register(&softdog_miscdev);
286 if (ret) { 290 if (ret) {
287 printk (KERN_ERR PFX "cannot register miscdev on minor=%d (err=%d)\n", 291 printk(KERN_ERR PFX
288 WATCHDOG_MINOR, ret); 292 "cannot register miscdev on minor=%d (err=%d)\n",
293 WATCHDOG_MINOR, ret);
289 unregister_reboot_notifier(&softdog_notifier); 294 unregister_reboot_notifier(&softdog_notifier);
290 return ret; 295 return ret;
291 } 296 }
diff --git a/drivers/watchdog/txx9wdt.c b/drivers/watchdog/txx9wdt.c
index 57cefef27ce3..dbbc018a5f46 100644
--- a/drivers/watchdog/txx9wdt.c
+++ b/drivers/watchdog/txx9wdt.c
@@ -45,27 +45,34 @@ static unsigned long txx9wdt_alive;
45static int expect_close; 45static int expect_close;
46static struct txx9_tmr_reg __iomem *txx9wdt_reg; 46static struct txx9_tmr_reg __iomem *txx9wdt_reg;
47static struct clk *txx9_imclk; 47static struct clk *txx9_imclk;
48static DECLARE_LOCK(txx9_lock);
48 49
49static void txx9wdt_ping(void) 50static void txx9wdt_ping(void)
50{ 51{
52 spin_lock(&txx9_lock);
51 __raw_writel(TXx9_TMWTMR_TWIE | TXx9_TMWTMR_TWC, &txx9wdt_reg->wtmr); 53 __raw_writel(TXx9_TMWTMR_TWIE | TXx9_TMWTMR_TWC, &txx9wdt_reg->wtmr);
54 spin_unlock(&txx9_lock);
52} 55}
53 56
54static void txx9wdt_start(void) 57static void txx9wdt_start(void)
55{ 58{
59 spin_lock(&txx9_lock);
56 __raw_writel(WD_TIMER_CLK * timeout, &txx9wdt_reg->cpra); 60 __raw_writel(WD_TIMER_CLK * timeout, &txx9wdt_reg->cpra);
57 __raw_writel(WD_TIMER_CCD, &txx9wdt_reg->ccdr); 61 __raw_writel(WD_TIMER_CCD, &txx9wdt_reg->ccdr);
58 __raw_writel(0, &txx9wdt_reg->tisr); /* clear pending interrupt */ 62 __raw_writel(0, &txx9wdt_reg->tisr); /* clear pending interrupt */
59 __raw_writel(TXx9_TMTCR_TCE | TXx9_TMTCR_CCDE | TXx9_TMTCR_TMODE_WDOG, 63 __raw_writel(TXx9_TMTCR_TCE | TXx9_TMTCR_CCDE | TXx9_TMTCR_TMODE_WDOG,
60 &txx9wdt_reg->tcr); 64 &txx9wdt_reg->tcr);
61 __raw_writel(TXx9_TMWTMR_TWIE | TXx9_TMWTMR_TWC, &txx9wdt_reg->wtmr); 65 __raw_writel(TXx9_TMWTMR_TWIE | TXx9_TMWTMR_TWC, &txx9wdt_reg->wtmr);
66 spin_unlock(&txx9_lock);
62} 67}
63 68
64static void txx9wdt_stop(void) 69static void txx9wdt_stop(void)
65{ 70{
71 spin_lock(&txx9_lock);
66 __raw_writel(TXx9_TMWTMR_WDIS, &txx9wdt_reg->wtmr); 72 __raw_writel(TXx9_TMWTMR_WDIS, &txx9wdt_reg->wtmr);
67 __raw_writel(__raw_readl(&txx9wdt_reg->tcr) & ~TXx9_TMTCR_TCE, 73 __raw_writel(__raw_readl(&txx9wdt_reg->tcr) & ~TXx9_TMTCR_TCE,
68 &txx9wdt_reg->tcr); 74 &txx9wdt_reg->tcr);
75 spin_unlock(&txx9_lock);
69} 76}
70 77
71static int txx9wdt_open(struct inode *inode, struct file *file) 78static int txx9wdt_open(struct inode *inode, struct file *file)
@@ -120,13 +127,13 @@ static ssize_t txx9wdt_write(struct file *file, const char __user *data,
120 return len; 127 return len;
121} 128}
122 129
123static int txx9wdt_ioctl(struct inode *inode, struct file *file, 130static long txx9wdt_ioctl(struct file *file, unsigned int cmd,
124 unsigned int cmd, unsigned long arg) 131 unsigned long arg)
125{ 132{
126 void __user *argp = (void __user *)arg; 133 void __user *argp = (void __user *)arg;
127 int __user *p = argp; 134 int __user *p = argp;
128 int new_timeout; 135 int new_timeout;
129 static struct watchdog_info ident = { 136 static const struct watchdog_info ident = {
130 .options = WDIOF_SETTIMEOUT | 137 .options = WDIOF_SETTIMEOUT |
131 WDIOF_KEEPALIVEPING | 138 WDIOF_KEEPALIVEPING |
132 WDIOF_MAGICCLOSE, 139 WDIOF_MAGICCLOSE,
@@ -135,8 +142,6 @@ static int txx9wdt_ioctl(struct inode *inode, struct file *file,
135 }; 142 };
136 143
137 switch (cmd) { 144 switch (cmd) {
138 default:
139 return -ENOTTY;
140 case WDIOC_GETSUPPORT: 145 case WDIOC_GETSUPPORT:
141 return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0; 146 return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
142 case WDIOC_GETSTATUS: 147 case WDIOC_GETSTATUS:
@@ -156,6 +161,8 @@ static int txx9wdt_ioctl(struct inode *inode, struct file *file,
156 /* Fall */ 161 /* Fall */
157 case WDIOC_GETTIMEOUT: 162 case WDIOC_GETTIMEOUT:
158 return put_user(timeout, p); 163 return put_user(timeout, p);
164 default:
165 return -ENOTTY;
159 } 166 }
160} 167}
161 168
@@ -168,22 +175,22 @@ static int txx9wdt_notify_sys(struct notifier_block *this, unsigned long code,
168} 175}
169 176
170static const struct file_operations txx9wdt_fops = { 177static const struct file_operations txx9wdt_fops = {
171 .owner = THIS_MODULE, 178 .owner = THIS_MODULE,
172 .llseek = no_llseek, 179 .llseek = no_llseek,
173 .write = txx9wdt_write, 180 .write = txx9wdt_write,
174 .ioctl = txx9wdt_ioctl, 181 .unlocked_ioctl = txx9wdt_ioctl,
175 .open = txx9wdt_open, 182 .open = txx9wdt_open,
176 .release = txx9wdt_release, 183 .release = txx9wdt_release,
177}; 184};
178 185
179static struct miscdevice txx9wdt_miscdev = { 186static struct miscdevice txx9wdt_miscdev = {
180 .minor = WATCHDOG_MINOR, 187 .minor = WATCHDOG_MINOR,
181 .name = "watchdog", 188 .name = "watchdog",
182 .fops = &txx9wdt_fops, 189 .fops = &txx9wdt_fops,
183}; 190};
184 191
185static struct notifier_block txx9wdt_notifier = { 192static struct notifier_block txx9wdt_notifier = {
186 .notifier_call = txx9wdt_notify_sys 193 .notifier_call = txx9wdt_notify_sys,
187}; 194};
188 195
189static int __init txx9wdt_probe(struct platform_device *dev) 196static int __init txx9wdt_probe(struct platform_device *dev)
diff --git a/drivers/watchdog/w83627hf_wdt.c b/drivers/watchdog/w83627hf_wdt.c
index 386492821fc2..69396adaa5c3 100644
--- a/drivers/watchdog/w83627hf_wdt.c
+++ b/drivers/watchdog/w83627hf_wdt.c
@@ -37,9 +37,9 @@
37#include <linux/reboot.h> 37#include <linux/reboot.h>
38#include <linux/init.h> 38#include <linux/init.h>
39#include <linux/spinlock.h> 39#include <linux/spinlock.h>
40#include <linux/io.h>
41#include <linux/uaccess.h>
40 42
41#include <asm/io.h>
42#include <asm/uaccess.h>
43#include <asm/system.h> 43#include <asm/system.h>
44 44
45#define WATCHDOG_NAME "w83627hf/thf/hg WDT" 45#define WATCHDOG_NAME "w83627hf/thf/hg WDT"
@@ -57,22 +57,26 @@ MODULE_PARM_DESC(wdt_io, "w83627hf/thf WDT io port (default 0x2E)");
57 57
58static int timeout = WATCHDOG_TIMEOUT; /* in seconds */ 58static int timeout = WATCHDOG_TIMEOUT; /* in seconds */
59module_param(timeout, int, 0); 59module_param(timeout, int, 0);
60MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds. 1<= timeout <=255, default=" __MODULE_STRING(WATCHDOG_TIMEOUT) "."); 60MODULE_PARM_DESC(timeout,
61 "Watchdog timeout in seconds. 1 <= timeout <= 255, default="
62 __MODULE_STRING(WATCHDOG_TIMEOUT) ".");
61 63
62static int nowayout = WATCHDOG_NOWAYOUT; 64static int nowayout = WATCHDOG_NOWAYOUT;
63module_param(nowayout, int, 0); 65module_param(nowayout, int, 0);
64MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 66MODULE_PARM_DESC(nowayout,
67 "Watchdog cannot be stopped once started (default="
68 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
65 69
66/* 70/*
67 * Kernel methods. 71 * Kernel methods.
68 */ 72 */
69 73
70#define WDT_EFER (wdt_io+0) /* Extended Function Enable Registers */ 74#define WDT_EFER (wdt_io+0) /* Extended Function Enable Registers */
71#define WDT_EFIR (wdt_io+0) /* Extended Function Index Register (same as EFER) */ 75#define WDT_EFIR (wdt_io+0) /* Extended Function Index Register
76 (same as EFER) */
72#define WDT_EFDR (WDT_EFIR+1) /* Extended Function Data Register */ 77#define WDT_EFDR (WDT_EFIR+1) /* Extended Function Data Register */
73 78
74static void 79static void w83627hf_select_wd_register(void)
75w83627hf_select_wd_register(void)
76{ 80{
77 unsigned char c; 81 unsigned char c;
78 outb_p(0x87, WDT_EFER); /* Enter extended function mode */ 82 outb_p(0x87, WDT_EFER); /* Enter extended function mode */
@@ -93,43 +97,45 @@ w83627hf_select_wd_register(void)
93 outb_p(0x01, WDT_EFDR); /* set bit 0 to activate GPIO2 */ 97 outb_p(0x01, WDT_EFDR); /* set bit 0 to activate GPIO2 */
94} 98}
95 99
96static void 100static void w83627hf_unselect_wd_register(void)
97w83627hf_unselect_wd_register(void)
98{ 101{
99 outb_p(0xAA, WDT_EFER); /* Leave extended function mode */ 102 outb_p(0xAA, WDT_EFER); /* Leave extended function mode */
100} 103}
101 104
102/* tyan motherboards seem to set F5 to 0x4C ? 105/* tyan motherboards seem to set F5 to 0x4C ?
103 * So explicitly init to appropriate value. */ 106 * So explicitly init to appropriate value. */
104static void 107
105w83627hf_init(void) 108static void w83627hf_init(void)
106{ 109{
107 unsigned char t; 110 unsigned char t;
108 111
109 w83627hf_select_wd_register(); 112 w83627hf_select_wd_register();
110 113
111 outb_p(0xF6, WDT_EFER); /* Select CRF6 */ 114 outb_p(0xF6, WDT_EFER); /* Select CRF6 */
112 t=inb_p(WDT_EFDR); /* read CRF6 */ 115 t = inb_p(WDT_EFDR); /* read CRF6 */
113 if (t != 0) { 116 if (t != 0) {
114 printk (KERN_INFO PFX "Watchdog already running. Resetting timeout to %d sec\n", timeout); 117 printk(KERN_INFO PFX
118 "Watchdog already running. Resetting timeout to %d sec\n",
119 timeout);
115 outb_p(timeout, WDT_EFDR); /* Write back to CRF6 */ 120 outb_p(timeout, WDT_EFDR); /* Write back to CRF6 */
116 } 121 }
117 122
118 outb_p(0xF5, WDT_EFER); /* Select CRF5 */ 123 outb_p(0xF5, WDT_EFER); /* Select CRF5 */
119 t=inb_p(WDT_EFDR); /* read CRF5 */ 124 t = inb_p(WDT_EFDR); /* read CRF5 */
120 t&=~0x0C; /* set second mode & disable keyboard turning off watchdog */ 125 t &= ~0x0C; /* set second mode & disable keyboard
126 turning off watchdog */
121 outb_p(t, WDT_EFDR); /* Write back to CRF5 */ 127 outb_p(t, WDT_EFDR); /* Write back to CRF5 */
122 128
123 outb_p(0xF7, WDT_EFER); /* Select CRF7 */ 129 outb_p(0xF7, WDT_EFER); /* Select CRF7 */
124 t=inb_p(WDT_EFDR); /* read CRF7 */ 130 t = inb_p(WDT_EFDR); /* read CRF7 */
125 t&=~0xC0; /* disable keyboard & mouse turning off watchdog */ 131 t &= ~0xC0; /* disable keyboard & mouse turning off
132 watchdog */
126 outb_p(t, WDT_EFDR); /* Write back to CRF7 */ 133 outb_p(t, WDT_EFDR); /* Write back to CRF7 */
127 134
128 w83627hf_unselect_wd_register(); 135 w83627hf_unselect_wd_register();
129} 136}
130 137
131static void 138static void wdt_ctrl(int timeout)
132wdt_ctrl(int timeout)
133{ 139{
134 spin_lock(&io_lock); 140 spin_lock(&io_lock);
135 141
@@ -143,32 +149,28 @@ wdt_ctrl(int timeout)
143 spin_unlock(&io_lock); 149 spin_unlock(&io_lock);
144} 150}
145 151
146static int 152static int wdt_ping(void)
147wdt_ping(void)
148{ 153{
149 wdt_ctrl(timeout); 154 wdt_ctrl(timeout);
150 return 0; 155 return 0;
151} 156}
152 157
153static int 158static int wdt_disable(void)
154wdt_disable(void)
155{ 159{
156 wdt_ctrl(0); 160 wdt_ctrl(0);
157 return 0; 161 return 0;
158} 162}
159 163
160static int 164static int wdt_set_heartbeat(int t)
161wdt_set_heartbeat(int t)
162{ 165{
163 if ((t < 1) || (t > 255)) 166 if (t < 1 || t > 255)
164 return -EINVAL; 167 return -EINVAL;
165
166 timeout = t; 168 timeout = t;
167 return 0; 169 return 0;
168} 170}
169 171
170static ssize_t 172static ssize_t wdt_write(struct file *file, const char __user *buf,
171wdt_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) 173 size_t count, loff_t *ppos)
172{ 174{
173 if (count) { 175 if (count) {
174 if (!nowayout) { 176 if (!nowayout) {
@@ -178,7 +180,7 @@ wdt_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
178 180
179 for (i = 0; i != count; i++) { 181 for (i = 0; i != count; i++) {
180 char c; 182 char c;
181 if (get_user(c, buf+i)) 183 if (get_user(c, buf + i))
182 return -EFAULT; 184 return -EFAULT;
183 if (c == 'V') 185 if (c == 'V')
184 expect_close = 42; 186 expect_close = 42;
@@ -189,72 +191,61 @@ wdt_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
189 return count; 191 return count;
190} 192}
191 193
192static int 194static long wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
193wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
194 unsigned long arg)
195{ 195{
196 void __user *argp = (void __user *)arg; 196 void __user *argp = (void __user *)arg;
197 int __user *p = argp; 197 int __user *p = argp;
198 int new_timeout; 198 int new_timeout;
199 static struct watchdog_info ident = { 199 static struct watchdog_info ident = {
200 .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE, 200 .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT |
201 WDIOF_MAGICCLOSE,
201 .firmware_version = 1, 202 .firmware_version = 1,
202 .identity = "W83627HF WDT", 203 .identity = "W83627HF WDT",
203 }; 204 };
204 205
205 switch (cmd) { 206 switch (cmd) {
206 case WDIOC_GETSUPPORT: 207 case WDIOC_GETSUPPORT:
207 if (copy_to_user(argp, &ident, sizeof(ident))) 208 if (copy_to_user(argp, &ident, sizeof(ident)))
208 return -EFAULT; 209 return -EFAULT;
209 break; 210 break;
210
211 case WDIOC_GETSTATUS: 211 case WDIOC_GETSTATUS:
212 case WDIOC_GETBOOTSTATUS: 212 case WDIOC_GETBOOTSTATUS:
213 return put_user(0, p); 213 return put_user(0, p);
214
215 case WDIOC_KEEPALIVE:
216 wdt_ping();
217 break;
218
219 case WDIOC_SETTIMEOUT:
220 if (get_user(new_timeout, p))
221 return -EFAULT;
222 if (wdt_set_heartbeat(new_timeout))
223 return -EINVAL;
224 wdt_ping();
225 /* Fall */
226
227 case WDIOC_GETTIMEOUT:
228 return put_user(timeout, p);
229
230 case WDIOC_SETOPTIONS: 214 case WDIOC_SETOPTIONS:
231 { 215 {
232 int options, retval = -EINVAL; 216 int options, retval = -EINVAL;
233
234 if (get_user(options, p))
235 return -EFAULT;
236
237 if (options & WDIOS_DISABLECARD) {
238 wdt_disable();
239 retval = 0;
240 }
241
242 if (options & WDIOS_ENABLECARD) {
243 wdt_ping();
244 retval = 0;
245 }
246 217
247 return retval; 218 if (get_user(options, p))
219 return -EFAULT;
220 if (options & WDIOS_DISABLECARD) {
221 wdt_disable();
222 retval = 0;
223 }
224 if (options & WDIOS_ENABLECARD) {
225 wdt_ping();
226 retval = 0;
227 }
228 return retval;
248 } 229 }
249 230 case WDIOC_KEEPALIVE:
231 wdt_ping();
232 break;
233 case WDIOC_SETTIMEOUT:
234 if (get_user(new_timeout, p))
235 return -EFAULT;
236 if (wdt_set_heartbeat(new_timeout))
237 return -EINVAL;
238 wdt_ping();
239 /* Fall */
240 case WDIOC_GETTIMEOUT:
241 return put_user(timeout, p);
250 default: 242 default:
251 return -ENOTTY; 243 return -ENOTTY;
252 } 244 }
253 return 0; 245 return 0;
254} 246}
255 247
256static int 248static int wdt_open(struct inode *inode, struct file *file)
257wdt_open(struct inode *inode, struct file *file)
258{ 249{
259 if (test_and_set_bit(0, &wdt_is_open)) 250 if (test_and_set_bit(0, &wdt_is_open))
260 return -EBUSY; 251 return -EBUSY;
@@ -266,13 +257,13 @@ wdt_open(struct inode *inode, struct file *file)
266 return nonseekable_open(inode, file); 257 return nonseekable_open(inode, file);
267} 258}
268 259
269static int 260static int wdt_close(struct inode *inode, struct file *file)
270wdt_close(struct inode *inode, struct file *file)
271{ 261{
272 if (expect_close == 42) { 262 if (expect_close == 42)
273 wdt_disable(); 263 wdt_disable();
274 } else { 264 else {
275 printk(KERN_CRIT PFX "Unexpected close, not stopping watchdog!\n"); 265 printk(KERN_CRIT PFX
266 "Unexpected close, not stopping watchdog!\n");
276 wdt_ping(); 267 wdt_ping();
277 } 268 }
278 expect_close = 0; 269 expect_close = 0;
@@ -284,14 +275,12 @@ wdt_close(struct inode *inode, struct file *file)
284 * Notifier for system down 275 * Notifier for system down
285 */ 276 */
286 277
287static int 278static int wdt_notify_sys(struct notifier_block *this, unsigned long code,
288wdt_notify_sys(struct notifier_block *this, unsigned long code,
289 void *unused) 279 void *unused)
290{ 280{
291 if (code == SYS_DOWN || code == SYS_HALT) { 281 if (code == SYS_DOWN || code == SYS_HALT)
292 /* Turn the WDT off */ 282 wdt_disable(); /* Turn the WDT off */
293 wdt_disable(); 283
294 }
295 return NOTIFY_DONE; 284 return NOTIFY_DONE;
296} 285}
297 286
@@ -303,7 +292,7 @@ static const struct file_operations wdt_fops = {
303 .owner = THIS_MODULE, 292 .owner = THIS_MODULE,
304 .llseek = no_llseek, 293 .llseek = no_llseek,
305 .write = wdt_write, 294 .write = wdt_write,
306 .ioctl = wdt_ioctl, 295 .unlocked_ioctl = wdt_ioctl,
307 .open = wdt_open, 296 .open = wdt_open,
308 .release = wdt_close, 297 .release = wdt_close,
309}; 298};
@@ -323,8 +312,7 @@ static struct notifier_block wdt_notifier = {
323 .notifier_call = wdt_notify_sys, 312 .notifier_call = wdt_notify_sys,
324}; 313};
325 314
326static int __init 315static int __init wdt_init(void)
327wdt_init(void)
328{ 316{
329 int ret; 317 int ret;
330 318
@@ -332,12 +320,13 @@ wdt_init(void)
332 320
333 if (wdt_set_heartbeat(timeout)) { 321 if (wdt_set_heartbeat(timeout)) {
334 wdt_set_heartbeat(WATCHDOG_TIMEOUT); 322 wdt_set_heartbeat(WATCHDOG_TIMEOUT);
335 printk (KERN_INFO PFX "timeout value must be 1<=timeout<=255, using %d\n", 323 printk(KERN_INFO PFX
336 WATCHDOG_TIMEOUT); 324 "timeout value must be 1 <= timeout <= 255, using %d\n",
325 WATCHDOG_TIMEOUT);
337 } 326 }
338 327
339 if (!request_region(wdt_io, 1, WATCHDOG_NAME)) { 328 if (!request_region(wdt_io, 1, WATCHDOG_NAME)) {
340 printk (KERN_ERR PFX "I/O address 0x%04x already in use\n", 329 printk(KERN_ERR PFX "I/O address 0x%04x already in use\n",
341 wdt_io); 330 wdt_io);
342 ret = -EIO; 331 ret = -EIO;
343 goto out; 332 goto out;
@@ -347,20 +336,22 @@ wdt_init(void)
347 336
348 ret = register_reboot_notifier(&wdt_notifier); 337 ret = register_reboot_notifier(&wdt_notifier);
349 if (ret != 0) { 338 if (ret != 0) {
350 printk (KERN_ERR PFX "cannot register reboot notifier (err=%d)\n", 339 printk(KERN_ERR PFX
351 ret); 340 "cannot register reboot notifier (err=%d)\n", ret);
352 goto unreg_regions; 341 goto unreg_regions;
353 } 342 }
354 343
355 ret = misc_register(&wdt_miscdev); 344 ret = misc_register(&wdt_miscdev);
356 if (ret != 0) { 345 if (ret != 0) {
357 printk (KERN_ERR PFX "cannot register miscdev on minor=%d (err=%d)\n", 346 printk(KERN_ERR PFX
358 WATCHDOG_MINOR, ret); 347 "cannot register miscdev on minor=%d (err=%d)\n",
348 WATCHDOG_MINOR, ret);
359 goto unreg_reboot; 349 goto unreg_reboot;
360 } 350 }
361 351
362 printk (KERN_INFO PFX "initialized. timeout=%d sec (nowayout=%d)\n", 352 printk(KERN_INFO PFX
363 timeout, nowayout); 353 "initialized. timeout=%d sec (nowayout=%d)\n",
354 timeout, nowayout);
364 355
365out: 356out:
366 return ret; 357 return ret;
@@ -371,12 +362,11 @@ unreg_regions:
371 goto out; 362 goto out;
372} 363}
373 364
374static void __exit 365static void __exit wdt_exit(void)
375wdt_exit(void)
376{ 366{
377 misc_deregister(&wdt_miscdev); 367 misc_deregister(&wdt_miscdev);
378 unregister_reboot_notifier(&wdt_notifier); 368 unregister_reboot_notifier(&wdt_notifier);
379 release_region(wdt_io,1); 369 release_region(wdt_io, 1);
380} 370}
381 371
382module_init(wdt_init); 372module_init(wdt_init);
diff --git a/drivers/watchdog/w83697hf_wdt.c b/drivers/watchdog/w83697hf_wdt.c
index 528b882420b6..445d30a01ed3 100644
--- a/drivers/watchdog/w83697hf_wdt.c
+++ b/drivers/watchdog/w83697hf_wdt.c
@@ -36,9 +36,9 @@
36#include <linux/reboot.h> 36#include <linux/reboot.h>
37#include <linux/init.h> 37#include <linux/init.h>
38#include <linux/spinlock.h> 38#include <linux/spinlock.h>
39#include <linux/io.h>
40#include <linux/uaccess.h>
39 41
40#include <asm/io.h>
41#include <asm/uaccess.h>
42#include <asm/system.h> 42#include <asm/system.h>
43 43
44#define WATCHDOG_NAME "w83697hf/hg WDT" 44#define WATCHDOG_NAME "w83697hf/hg WDT"
@@ -53,37 +53,43 @@ static DEFINE_SPINLOCK(io_lock);
53/* You must set this - there is no sane way to probe for this board. */ 53/* You must set this - there is no sane way to probe for this board. */
54static int wdt_io = 0x2e; 54static int wdt_io = 0x2e;
55module_param(wdt_io, int, 0); 55module_param(wdt_io, int, 0);
56MODULE_PARM_DESC(wdt_io, "w83697hf/hg WDT io port (default 0x2e, 0 = autodetect)"); 56MODULE_PARM_DESC(wdt_io,
57 "w83697hf/hg WDT io port (default 0x2e, 0 = autodetect)");
57 58
58static int timeout = WATCHDOG_TIMEOUT; /* in seconds */ 59static int timeout = WATCHDOG_TIMEOUT; /* in seconds */
59module_param(timeout, int, 0); 60module_param(timeout, int, 0);
60MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds. 1<= timeout <=255 (default=" __MODULE_STRING(WATCHDOG_TIMEOUT) ")"); 61MODULE_PARM_DESC(timeout,
62 "Watchdog timeout in seconds. 1<= timeout <=255 (default="
63 __MODULE_STRING(WATCHDOG_TIMEOUT) ")");
61 64
62static int nowayout = WATCHDOG_NOWAYOUT; 65static int nowayout = WATCHDOG_NOWAYOUT;
63module_param(nowayout, int, 0); 66module_param(nowayout, int, 0);
64MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 67MODULE_PARM_DESC(nowayout,
68 "Watchdog cannot be stopped once started (default="
69 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
65 70
66static int early_disable = WATCHDOG_EARLY_DISABLE; 71static int early_disable = WATCHDOG_EARLY_DISABLE;
67module_param(early_disable, int, 0); 72module_param(early_disable, int, 0);
68MODULE_PARM_DESC(early_disable, "Watchdog gets disabled at boot time (default=" __MODULE_STRING(WATCHDOG_EARLY_DISABLE) ")"); 73MODULE_PARM_DESC(early_disable,
74 "Watchdog gets disabled at boot time (default="
75 __MODULE_STRING(WATCHDOG_EARLY_DISABLE) ")");
69 76
70/* 77/*
71 * Kernel methods. 78 * Kernel methods.
72 */ 79 */
73 80
74#define W83697HF_EFER (wdt_io+0) /* Extended Function Enable Register */ 81#define W83697HF_EFER (wdt_io + 0) /* Extended Function Enable Register */
75#define W83697HF_EFIR (wdt_io+0) /* Extended Function Index Register (same as EFER) */ 82#define W83697HF_EFIR (wdt_io + 0) /* Extended Function Index Register
76#define W83697HF_EFDR (wdt_io+1) /* Extended Function Data Register */ 83 (same as EFER) */
84#define W83697HF_EFDR (wdt_io + 1) /* Extended Function Data Register */
77 85
78static inline void 86static inline void w83697hf_unlock(void)
79w83697hf_unlock(void)
80{ 87{
81 outb_p(0x87, W83697HF_EFER); /* Enter extended function mode */ 88 outb_p(0x87, W83697HF_EFER); /* Enter extended function mode */
82 outb_p(0x87, W83697HF_EFER); /* Again according to manual */ 89 outb_p(0x87, W83697HF_EFER); /* Again according to manual */
83} 90}
84 91
85static inline void 92static inline void w83697hf_lock(void)
86w83697hf_lock(void)
87{ 93{
88 outb_p(0xAA, W83697HF_EFER); /* Leave extended function mode */ 94 outb_p(0xAA, W83697HF_EFER); /* Leave extended function mode */
89} 95}
@@ -93,41 +99,36 @@ w83697hf_lock(void)
93 * w83697hf_write_timeout() must be called with the device unlocked. 99 * w83697hf_write_timeout() must be called with the device unlocked.
94 */ 100 */
95 101
96static unsigned char 102static unsigned char w83697hf_get_reg(unsigned char reg)
97w83697hf_get_reg(unsigned char reg)
98{ 103{
99 outb_p(reg, W83697HF_EFIR); 104 outb_p(reg, W83697HF_EFIR);
100 return inb_p(W83697HF_EFDR); 105 return inb_p(W83697HF_EFDR);
101} 106}
102 107
103static void 108static void w83697hf_set_reg(unsigned char reg, unsigned char data)
104w83697hf_set_reg(unsigned char reg, unsigned char data)
105{ 109{
106 outb_p(reg, W83697HF_EFIR); 110 outb_p(reg, W83697HF_EFIR);
107 outb_p(data, W83697HF_EFDR); 111 outb_p(data, W83697HF_EFDR);
108} 112}
109 113
110static void 114static void w83697hf_write_timeout(int timeout)
111w83697hf_write_timeout(int timeout)
112{ 115{
113 w83697hf_set_reg(0xF4, timeout); /* Write Timeout counter to CRF4 */ 116 /* Write Timeout counter to CRF4 */
117 w83697hf_set_reg(0xF4, timeout);
114} 118}
115 119
116static void 120static void w83697hf_select_wdt(void)
117w83697hf_select_wdt(void)
118{ 121{
119 w83697hf_unlock(); 122 w83697hf_unlock();
120 w83697hf_set_reg(0x07, 0x08); /* Switch to logic device 8 (GPIO2) */ 123 w83697hf_set_reg(0x07, 0x08); /* Switch to logic device 8 (GPIO2) */
121} 124}
122 125
123static inline void 126static inline void w83697hf_deselect_wdt(void)
124w83697hf_deselect_wdt(void)
125{ 127{
126 w83697hf_lock(); 128 w83697hf_lock();
127} 129}
128 130
129static void 131static void w83697hf_init(void)
130w83697hf_init(void)
131{ 132{
132 unsigned char bbuf; 133 unsigned char bbuf;
133 134
@@ -136,7 +137,9 @@ w83697hf_init(void)
136 bbuf = w83697hf_get_reg(0x29); 137 bbuf = w83697hf_get_reg(0x29);
137 bbuf &= ~0x60; 138 bbuf &= ~0x60;
138 bbuf |= 0x20; 139 bbuf |= 0x20;
139 w83697hf_set_reg(0x29, bbuf); /* Set pin 119 to WDTO# mode (= CR29, WDT0) */ 140
141 /* Set pin 119 to WDTO# mode (= CR29, WDT0) */
142 w83697hf_set_reg(0x29, bbuf);
140 143
141 bbuf = w83697hf_get_reg(0xF3); 144 bbuf = w83697hf_get_reg(0xF3);
142 bbuf &= ~0x04; 145 bbuf &= ~0x04;
@@ -145,8 +148,7 @@ w83697hf_init(void)
145 w83697hf_deselect_wdt(); 148 w83697hf_deselect_wdt();
146} 149}
147 150
148static void 151static void wdt_ping(void)
149wdt_ping(void)
150{ 152{
151 spin_lock(&io_lock); 153 spin_lock(&io_lock);
152 w83697hf_select_wdt(); 154 w83697hf_select_wdt();
@@ -157,8 +159,7 @@ wdt_ping(void)
157 spin_unlock(&io_lock); 159 spin_unlock(&io_lock);
158} 160}
159 161
160static void 162static void wdt_enable(void)
161wdt_enable(void)
162{ 163{
163 spin_lock(&io_lock); 164 spin_lock(&io_lock);
164 w83697hf_select_wdt(); 165 w83697hf_select_wdt();
@@ -170,8 +171,7 @@ wdt_enable(void)
170 spin_unlock(&io_lock); 171 spin_unlock(&io_lock);
171} 172}
172 173
173static void 174static void wdt_disable(void)
174wdt_disable(void)
175{ 175{
176 spin_lock(&io_lock); 176 spin_lock(&io_lock);
177 w83697hf_select_wdt(); 177 w83697hf_select_wdt();
@@ -183,8 +183,7 @@ wdt_disable(void)
183 spin_unlock(&io_lock); 183 spin_unlock(&io_lock);
184} 184}
185 185
186static unsigned char 186static unsigned char wdt_running(void)
187wdt_running(void)
188{ 187{
189 unsigned char t; 188 unsigned char t;
190 189
@@ -199,18 +198,17 @@ wdt_running(void)
199 return t; 198 return t;
200} 199}
201 200
202static int 201static int wdt_set_heartbeat(int t)
203wdt_set_heartbeat(int t)
204{ 202{
205 if ((t < 1) || (t > 255)) 203 if (t < 1 || t > 255)
206 return -EINVAL; 204 return -EINVAL;
207 205
208 timeout = t; 206 timeout = t;
209 return 0; 207 return 0;
210} 208}
211 209
212static ssize_t 210static ssize_t wdt_write(struct file *file, const char __user *buf,
213wdt_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) 211 size_t count, loff_t *ppos)
214{ 212{
215 if (count) { 213 if (count) {
216 if (!nowayout) { 214 if (!nowayout) {
@@ -220,7 +218,7 @@ wdt_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
220 218
221 for (i = 0; i != count; i++) { 219 for (i = 0; i != count; i++) {
222 char c; 220 char c;
223 if (get_user(c, buf+i)) 221 if (get_user(c, buf + i))
224 return -EFAULT; 222 return -EFAULT;
225 if (c == 'V') 223 if (c == 'V')
226 expect_close = 42; 224 expect_close = 42;
@@ -231,15 +229,14 @@ wdt_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
231 return count; 229 return count;
232} 230}
233 231
234static int 232static long wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
235wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
236 unsigned long arg)
237{ 233{
238 void __user *argp = (void __user *)arg; 234 void __user *argp = (void __user *)arg;
239 int __user *p = argp; 235 int __user *p = argp;
240 int new_timeout; 236 int new_timeout;
241 static struct watchdog_info ident = { 237 static const struct watchdog_info ident = {
242 .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE, 238 .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT
239 | WDIOF_MAGICCLOSE,
243 .firmware_version = 1, 240 .firmware_version = 1,
244 .identity = "W83697HF WDT", 241 .identity = "W83697HF WDT",
245 }; 242 };
@@ -254,21 +251,6 @@ wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
254 case WDIOC_GETBOOTSTATUS: 251 case WDIOC_GETBOOTSTATUS:
255 return put_user(0, p); 252 return put_user(0, p);
256 253
257 case WDIOC_KEEPALIVE:
258 wdt_ping();
259 break;
260
261 case WDIOC_SETTIMEOUT:
262 if (get_user(new_timeout, p))
263 return -EFAULT;
264 if (wdt_set_heartbeat(new_timeout))
265 return -EINVAL;
266 wdt_ping();
267 /* Fall */
268
269 case WDIOC_GETTIMEOUT:
270 return put_user(timeout, p);
271
272 case WDIOC_SETOPTIONS: 254 case WDIOC_SETOPTIONS:
273 { 255 {
274 int options, retval = -EINVAL; 256 int options, retval = -EINVAL;
@@ -289,14 +271,28 @@ wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
289 return retval; 271 return retval;
290 } 272 }
291 273
274 case WDIOC_KEEPALIVE:
275 wdt_ping();
276 break;
277
278 case WDIOC_SETTIMEOUT:
279 if (get_user(new_timeout, p))
280 return -EFAULT;
281 if (wdt_set_heartbeat(new_timeout))
282 return -EINVAL;
283 wdt_ping();
284 /* Fall */
285
286 case WDIOC_GETTIMEOUT:
287 return put_user(timeout, p);
288
292 default: 289 default:
293 return -ENOTTY; 290 return -ENOTTY;
294 } 291 }
295 return 0; 292 return 0;
296} 293}
297 294
298static int 295static int wdt_open(struct inode *inode, struct file *file)
299wdt_open(struct inode *inode, struct file *file)
300{ 296{
301 if (test_and_set_bit(0, &wdt_is_open)) 297 if (test_and_set_bit(0, &wdt_is_open))
302 return -EBUSY; 298 return -EBUSY;
@@ -308,13 +304,13 @@ wdt_open(struct inode *inode, struct file *file)
308 return nonseekable_open(inode, file); 304 return nonseekable_open(inode, file);
309} 305}
310 306
311static int 307static int wdt_close(struct inode *inode, struct file *file)
312wdt_close(struct inode *inode, struct file *file)
313{ 308{
314 if (expect_close == 42) { 309 if (expect_close == 42)
315 wdt_disable(); 310 wdt_disable();
316 } else { 311 else {
317 printk (KERN_CRIT PFX "Unexpected close, not stopping watchdog!\n"); 312 printk(KERN_CRIT PFX
313 "Unexpected close, not stopping watchdog!\n");
318 wdt_ping(); 314 wdt_ping();
319 } 315 }
320 expect_close = 0; 316 expect_close = 0;
@@ -326,14 +322,12 @@ wdt_close(struct inode *inode, struct file *file)
326 * Notifier for system down 322 * Notifier for system down
327 */ 323 */
328 324
329static int 325static int wdt_notify_sys(struct notifier_block *this, unsigned long code,
330wdt_notify_sys(struct notifier_block *this, unsigned long code,
331 void *unused) 326 void *unused)
332{ 327{
333 if (code == SYS_DOWN || code == SYS_HALT) { 328 if (code == SYS_DOWN || code == SYS_HALT)
334 /* Turn the WDT off */ 329 wdt_disable(); /* Turn the WDT off */
335 wdt_disable(); 330
336 }
337 return NOTIFY_DONE; 331 return NOTIFY_DONE;
338} 332}
339 333
@@ -345,7 +339,7 @@ static const struct file_operations wdt_fops = {
345 .owner = THIS_MODULE, 339 .owner = THIS_MODULE,
346 .llseek = no_llseek, 340 .llseek = no_llseek,
347 .write = wdt_write, 341 .write = wdt_write,
348 .ioctl = wdt_ioctl, 342 .unlocked_ioctl = wdt_ioctl,
349 .open = wdt_open, 343 .open = wdt_open,
350 .release = wdt_close, 344 .release = wdt_close,
351}; 345};
@@ -365,36 +359,38 @@ static struct notifier_block wdt_notifier = {
365 .notifier_call = wdt_notify_sys, 359 .notifier_call = wdt_notify_sys,
366}; 360};
367 361
368static int 362static int w83697hf_check_wdt(void)
369w83697hf_check_wdt(void)
370{ 363{
371 if (!request_region(wdt_io, 2, WATCHDOG_NAME)) { 364 if (!request_region(wdt_io, 2, WATCHDOG_NAME)) {
372 printk (KERN_ERR PFX "I/O address 0x%x already in use\n", wdt_io); 365 printk(KERN_ERR PFX
366 "I/O address 0x%x already in use\n", wdt_io);
373 return -EIO; 367 return -EIO;
374 } 368 }
375 369
376 printk (KERN_DEBUG PFX "Looking for watchdog at address 0x%x\n", wdt_io); 370 printk(KERN_DEBUG PFX
371 "Looking for watchdog at address 0x%x\n", wdt_io);
377 w83697hf_unlock(); 372 w83697hf_unlock();
378 if (w83697hf_get_reg(0x20) == 0x60) { 373 if (w83697hf_get_reg(0x20) == 0x60) {
379 printk (KERN_INFO PFX "watchdog found at address 0x%x\n", wdt_io); 374 printk(KERN_INFO PFX
375 "watchdog found at address 0x%x\n", wdt_io);
380 w83697hf_lock(); 376 w83697hf_lock();
381 return 0; 377 return 0;
382 } 378 }
383 w83697hf_lock(); /* Reprotect in case it was a compatible device */ 379 /* Reprotect in case it was a compatible device */
380 w83697hf_lock();
384 381
385 printk (KERN_INFO PFX "watchdog not found at address 0x%x\n", wdt_io); 382 printk(KERN_INFO PFX "watchdog not found at address 0x%x\n", wdt_io);
386 release_region(wdt_io, 2); 383 release_region(wdt_io, 2);
387 return -EIO; 384 return -EIO;
388} 385}
389 386
390static int w83697hf_ioports[] = { 0x2e, 0x4e, 0x00 }; 387static int w83697hf_ioports[] = { 0x2e, 0x4e, 0x00 };
391 388
392static int __init 389static int __init wdt_init(void)
393wdt_init(void)
394{ 390{
395 int ret, i, found = 0; 391 int ret, i, found = 0;
396 392
397 printk (KERN_INFO PFX "WDT driver for W83697HF/HG initializing\n"); 393 printk(KERN_INFO PFX "WDT driver for W83697HF/HG initializing\n");
398 394
399 if (wdt_io == 0) { 395 if (wdt_io == 0) {
400 /* we will autodetect the W83697HF/HG watchdog */ 396 /* we will autodetect the W83697HF/HG watchdog */
@@ -409,7 +405,7 @@ wdt_init(void)
409 } 405 }
410 406
411 if (!found) { 407 if (!found) {
412 printk (KERN_ERR PFX "No W83697HF/HG could be found\n"); 408 printk(KERN_ERR PFX "No W83697HF/HG could be found\n");
413 ret = -EIO; 409 ret = -EIO;
414 goto out; 410 goto out;
415 } 411 }
@@ -417,31 +413,33 @@ wdt_init(void)
417 w83697hf_init(); 413 w83697hf_init();
418 if (early_disable) { 414 if (early_disable) {
419 if (wdt_running()) 415 if (wdt_running())
420 printk (KERN_WARNING PFX "Stopping previously enabled watchdog until userland kicks in\n"); 416 printk(KERN_WARNING PFX "Stopping previously enabled watchdog until userland kicks in\n");
421 wdt_disable(); 417 wdt_disable();
422 } 418 }
423 419
424 if (wdt_set_heartbeat(timeout)) { 420 if (wdt_set_heartbeat(timeout)) {
425 wdt_set_heartbeat(WATCHDOG_TIMEOUT); 421 wdt_set_heartbeat(WATCHDOG_TIMEOUT);
426 printk (KERN_INFO PFX "timeout value must be 1<=timeout<=255, using %d\n", 422 printk(KERN_INFO PFX
427 WATCHDOG_TIMEOUT); 423 "timeout value must be 1 <= timeout <= 255, using %d\n",
424 WATCHDOG_TIMEOUT);
428 } 425 }
429 426
430 ret = register_reboot_notifier(&wdt_notifier); 427 ret = register_reboot_notifier(&wdt_notifier);
431 if (ret != 0) { 428 if (ret != 0) {
432 printk (KERN_ERR PFX "cannot register reboot notifier (err=%d)\n", 429 printk(KERN_ERR PFX
433 ret); 430 "cannot register reboot notifier (err=%d)\n", ret);
434 goto unreg_regions; 431 goto unreg_regions;
435 } 432 }
436 433
437 ret = misc_register(&wdt_miscdev); 434 ret = misc_register(&wdt_miscdev);
438 if (ret != 0) { 435 if (ret != 0) {
439 printk (KERN_ERR PFX "cannot register miscdev on minor=%d (err=%d)\n", 436 printk(KERN_ERR PFX
440 WATCHDOG_MINOR, ret); 437 "cannot register miscdev on minor=%d (err=%d)\n",
438 WATCHDOG_MINOR, ret);
441 goto unreg_reboot; 439 goto unreg_reboot;
442 } 440 }
443 441
444 printk (KERN_INFO PFX "initialized. timeout=%d sec (nowayout=%d)\n", 442 printk(KERN_INFO PFX "initialized. timeout=%d sec (nowayout=%d)\n",
445 timeout, nowayout); 443 timeout, nowayout);
446 444
447out: 445out:
@@ -453,8 +451,7 @@ unreg_regions:
453 goto out; 451 goto out;
454} 452}
455 453
456static void __exit 454static void __exit wdt_exit(void)
457wdt_exit(void)
458{ 455{
459 misc_deregister(&wdt_miscdev); 456 misc_deregister(&wdt_miscdev);
460 unregister_reboot_notifier(&wdt_notifier); 457 unregister_reboot_notifier(&wdt_notifier);
diff --git a/drivers/watchdog/w83877f_wdt.c b/drivers/watchdog/w83877f_wdt.c
index f510a3a595e6..24587d2060c4 100644
--- a/drivers/watchdog/w83877f_wdt.c
+++ b/drivers/watchdog/w83877f_wdt.c
@@ -23,13 +23,16 @@
23 * Added KERN_* tags to printks 23 * Added KERN_* tags to printks
24 * add CONFIG_WATCHDOG_NOWAYOUT support 24 * add CONFIG_WATCHDOG_NOWAYOUT support
25 * fix possible wdt_is_open race 25 * fix possible wdt_is_open race
26 * changed watchdog_info to correctly reflect what the driver offers 26 * changed watchdog_info to correctly reflect what
27 * added WDIOC_GETSTATUS, WDIOC_GETBOOTSTATUS, WDIOC_SETTIMEOUT, 27 * the driver offers
28 * added WDIOC_GETSTATUS, WDIOC_GETBOOTSTATUS,
29 * WDIOC_SETTIMEOUT,
28 * WDIOC_GETTIMEOUT, and WDIOC_SETOPTIONS ioctls 30 * WDIOC_GETTIMEOUT, and WDIOC_SETOPTIONS ioctls
29 * 09/8 - 2003 [wim@iguana.be] cleanup of trailing spaces 31 * 09/8 - 2003 [wim@iguana.be] cleanup of trailing spaces
30 * added extra printk's for startup problems 32 * added extra printk's for startup problems
31 * use module_param 33 * use module_param
32 * made timeout (the emulated heartbeat) a module_param 34 * made timeout (the emulated heartbeat) a
35 * module_param
33 * made the keepalive ping an internal subroutine 36 * made the keepalive ping an internal subroutine
34 * 37 *
35 * This WDT driver is different from most other Linux WDT 38 * This WDT driver is different from most other Linux WDT
@@ -51,8 +54,8 @@
51#include <linux/notifier.h> 54#include <linux/notifier.h>
52#include <linux/reboot.h> 55#include <linux/reboot.h>
53#include <linux/init.h> 56#include <linux/init.h>
54#include <asm/io.h> 57#include <linux/io.h>
55#include <asm/uaccess.h> 58#include <linux/uaccess.h>
56#include <asm/system.h> 59#include <asm/system.h>
57 60
58#define OUR_NAME "w83877f_wdt" 61#define OUR_NAME "w83877f_wdt"
@@ -80,14 +83,19 @@
80 */ 83 */
81 84
82#define WATCHDOG_TIMEOUT 30 /* 30 sec default timeout */ 85#define WATCHDOG_TIMEOUT 30 /* 30 sec default timeout */
83static int timeout = WATCHDOG_TIMEOUT; /* in seconds, will be multiplied by HZ to get seconds to wait for a ping */ 86/* in seconds, will be multiplied by HZ to get seconds to wait for a ping */
87static int timeout = WATCHDOG_TIMEOUT;
84module_param(timeout, int, 0); 88module_param(timeout, int, 0);
85MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds. (1<=timeout<=3600, default=" __MODULE_STRING(WATCHDOG_TIMEOUT) ")"); 89MODULE_PARM_DESC(timeout,
90 "Watchdog timeout in seconds. (1<=timeout<=3600, default="
91 __MODULE_STRING(WATCHDOG_TIMEOUT) ")");
86 92
87 93
88static int nowayout = WATCHDOG_NOWAYOUT; 94static int nowayout = WATCHDOG_NOWAYOUT;
89module_param(nowayout, int, 0); 95module_param(nowayout, int, 0);
90MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 96MODULE_PARM_DESC(nowayout,
97 "Watchdog cannot be stopped once started (default="
98 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
91 99
92static void wdt_timer_ping(unsigned long); 100static void wdt_timer_ping(unsigned long);
93static DEFINE_TIMER(timer, wdt_timer_ping, 0, 0); 101static DEFINE_TIMER(timer, wdt_timer_ping, 0, 0);
@@ -105,8 +113,7 @@ static void wdt_timer_ping(unsigned long data)
105 /* If we got a heartbeat pulse within the WDT_US_INTERVAL 113 /* If we got a heartbeat pulse within the WDT_US_INTERVAL
106 * we agree to ping the WDT 114 * we agree to ping the WDT
107 */ 115 */
108 if(time_before(jiffies, next_heartbeat)) 116 if (time_before(jiffies, next_heartbeat)) {
109 {
110 /* Ping the WDT */ 117 /* Ping the WDT */
111 spin_lock(&wdt_spinlock); 118 spin_lock(&wdt_spinlock);
112 119
@@ -118,9 +125,9 @@ static void wdt_timer_ping(unsigned long data)
118 125
119 spin_unlock(&wdt_spinlock); 126 spin_unlock(&wdt_spinlock);
120 127
121 } else { 128 } else
122 printk(KERN_WARNING PFX "Heartbeat lost! Will not ping the watchdog\n"); 129 printk(KERN_WARNING PFX
123 } 130 "Heartbeat lost! Will not ping the watchdog\n");
124} 131}
125 132
126/* 133/*
@@ -181,22 +188,21 @@ static void wdt_keepalive(void)
181 * /dev/watchdog handling 188 * /dev/watchdog handling
182 */ 189 */
183 190
184static ssize_t fop_write(struct file * file, const char __user * buf, size_t count, loff_t * ppos) 191static ssize_t fop_write(struct file *file, const char __user *buf,
192 size_t count, loff_t *ppos)
185{ 193{
186 /* See if we got the magic character 'V' and reload the timer */ 194 /* See if we got the magic character 'V' and reload the timer */
187 if(count) 195 if (count) {
188 { 196 if (!nowayout) {
189 if (!nowayout)
190 {
191 size_t ofs; 197 size_t ofs;
192 198
193 /* note: just in case someone wrote the magic character 199 /* note: just in case someone wrote the magic
194 * five months ago... */ 200 character five months ago... */
195 wdt_expect_close = 0; 201 wdt_expect_close = 0;
196 202
197 /* scan to see whether or not we got the magic character */ 203 /* scan to see whether or not we got the
198 for(ofs = 0; ofs != count; ofs++) 204 magic character */
199 { 205 for (ofs = 0; ofs != count; ofs++) {
200 char c; 206 char c;
201 if (get_user(c, buf + ofs)) 207 if (get_user(c, buf + ofs))
202 return -EFAULT; 208 return -EFAULT;
@@ -211,10 +217,10 @@ static ssize_t fop_write(struct file * file, const char __user * buf, size_t cou
211 return count; 217 return count;
212} 218}
213 219
214static int fop_open(struct inode * inode, struct file * file) 220static int fop_open(struct inode *inode, struct file *file)
215{ 221{
216 /* Just in case we're already talking to someone... */ 222 /* Just in case we're already talking to someone... */
217 if(test_and_set_bit(0, &wdt_is_open)) 223 if (test_and_set_bit(0, &wdt_is_open))
218 return -EBUSY; 224 return -EBUSY;
219 225
220 /* Good, fire up the show */ 226 /* Good, fire up the show */
@@ -222,78 +228,78 @@ static int fop_open(struct inode * inode, struct file * file)
222 return nonseekable_open(inode, file); 228 return nonseekable_open(inode, file);
223} 229}
224 230
225static int fop_close(struct inode * inode, struct file * file) 231static int fop_close(struct inode *inode, struct file *file)
226{ 232{
227 if(wdt_expect_close == 42) 233 if (wdt_expect_close == 42)
228 wdt_turnoff(); 234 wdt_turnoff();
229 else { 235 else {
230 del_timer(&timer); 236 del_timer(&timer);
231 printk(KERN_CRIT PFX "device file closed unexpectedly. Will not stop the WDT!\n"); 237 printk(KERN_CRIT PFX
238 "device file closed unexpectedly. Will not stop the WDT!\n");
232 } 239 }
233 clear_bit(0, &wdt_is_open); 240 clear_bit(0, &wdt_is_open);
234 wdt_expect_close = 0; 241 wdt_expect_close = 0;
235 return 0; 242 return 0;
236} 243}
237 244
238static int fop_ioctl(struct inode *inode, struct file *file, unsigned int cmd, 245static long fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
239 unsigned long arg)
240{ 246{
241 void __user *argp = (void __user *)arg; 247 void __user *argp = (void __user *)arg;
242 int __user *p = argp; 248 int __user *p = argp;
243 static struct watchdog_info ident= 249 static const struct watchdog_info ident = {
244 { 250 .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT
245 .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE, 251 | WDIOF_MAGICCLOSE,
246 .firmware_version = 1, 252 .firmware_version = 1,
247 .identity = "W83877F", 253 .identity = "W83877F",
248 }; 254 };
249 255
250 switch(cmd) 256 switch (cmd) {
257 case WDIOC_GETSUPPORT:
258 return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
259 case WDIOC_GETSTATUS:
260 case WDIOC_GETBOOTSTATUS:
261 return put_user(0, p);
262 case WDIOC_SETOPTIONS:
251 { 263 {
252 default: 264 int new_options, retval = -EINVAL;
253 return -ENOTTY;
254 case WDIOC_GETSUPPORT:
255 return copy_to_user(argp, &ident, sizeof(ident))?-EFAULT:0;
256 case WDIOC_GETSTATUS:
257 case WDIOC_GETBOOTSTATUS:
258 return put_user(0, p);
259 case WDIOC_KEEPALIVE:
260 wdt_keepalive();
261 return 0;
262 case WDIOC_SETOPTIONS:
263 {
264 int new_options, retval = -EINVAL;
265
266 if(get_user(new_options, p))
267 return -EFAULT;
268
269 if(new_options & WDIOS_DISABLECARD) {
270 wdt_turnoff();
271 retval = 0;
272 }
273 265
274 if(new_options & WDIOS_ENABLECARD) { 266 if (get_user(new_options, p))
275 wdt_startup(); 267 return -EFAULT;
276 retval = 0;
277 }
278 268
279 return retval; 269 if (new_options & WDIOS_DISABLECARD) {
270 wdt_turnoff();
271 retval = 0;
280 } 272 }
281 case WDIOC_SETTIMEOUT:
282 {
283 int new_timeout;
284 273
285 if(get_user(new_timeout, p)) 274 if (new_options & WDIOS_ENABLECARD) {
286 return -EFAULT; 275 wdt_startup();
276 retval = 0;
277 }
287 278
288 if(new_timeout < 1 || new_timeout > 3600) /* arbitrary upper limit */ 279 return retval;
289 return -EINVAL; 280 }
281 case WDIOC_KEEPALIVE:
282 wdt_keepalive();
283 return 0;
284 case WDIOC_SETTIMEOUT:
285 {
286 int new_timeout;
290 287
291 timeout = new_timeout; 288 if (get_user(new_timeout, p))
292 wdt_keepalive(); 289 return -EFAULT;
293 /* Fall through */ 290
294 } 291 /* arbitrary upper limit */
295 case WDIOC_GETTIMEOUT: 292 if (new_timeout < 1 || new_timeout > 3600)
296 return put_user(timeout, p); 293 return -EINVAL;
294
295 timeout = new_timeout;
296 wdt_keepalive();
297 /* Fall through */
298 }
299 case WDIOC_GETTIMEOUT:
300 return put_user(timeout, p);
301 default:
302 return -ENOTTY;
297 } 303 }
298} 304}
299 305
@@ -303,7 +309,7 @@ static const struct file_operations wdt_fops = {
303 .write = fop_write, 309 .write = fop_write,
304 .open = fop_open, 310 .open = fop_open,
305 .release = fop_close, 311 .release = fop_close,
306 .ioctl = fop_ioctl, 312 .unlocked_ioctl = fop_ioctl,
307}; 313};
308 314
309static struct miscdevice wdt_miscdev = { 315static struct miscdevice wdt_miscdev = {
@@ -319,7 +325,7 @@ static struct miscdevice wdt_miscdev = {
319static int wdt_notify_sys(struct notifier_block *this, unsigned long code, 325static int wdt_notify_sys(struct notifier_block *this, unsigned long code,
320 void *unused) 326 void *unused)
321{ 327{
322 if(code==SYS_DOWN || code==SYS_HALT) 328 if (code == SYS_DOWN || code == SYS_HALT)
323 wdt_turnoff(); 329 wdt_turnoff();
324 return NOTIFY_DONE; 330 return NOTIFY_DONE;
325} 331}
@@ -329,8 +335,7 @@ static int wdt_notify_sys(struct notifier_block *this, unsigned long code,
329 * turn the timebomb registers off. 335 * turn the timebomb registers off.
330 */ 336 */
331 337
332static struct notifier_block wdt_notifier= 338static struct notifier_block wdt_notifier = {
333{
334 .notifier_call = wdt_notify_sys, 339 .notifier_call = wdt_notify_sys,
335}; 340};
336 341
@@ -342,31 +347,29 @@ static void __exit w83877f_wdt_unload(void)
342 misc_deregister(&wdt_miscdev); 347 misc_deregister(&wdt_miscdev);
343 348
344 unregister_reboot_notifier(&wdt_notifier); 349 unregister_reboot_notifier(&wdt_notifier);
345 release_region(WDT_PING,1); 350 release_region(WDT_PING, 1);
346 release_region(ENABLE_W83877F_PORT,2); 351 release_region(ENABLE_W83877F_PORT, 2);
347} 352}
348 353
349static int __init w83877f_wdt_init(void) 354static int __init w83877f_wdt_init(void)
350{ 355{
351 int rc = -EBUSY; 356 int rc = -EBUSY;
352 357
353 if(timeout < 1 || timeout > 3600) /* arbitrary upper limit */ 358 if (timeout < 1 || timeout > 3600) { /* arbitrary upper limit */
354 {
355 timeout = WATCHDOG_TIMEOUT; 359 timeout = WATCHDOG_TIMEOUT;
356 printk(KERN_INFO PFX "timeout value must be 1<=x<=3600, using %d\n", 360 printk(KERN_INFO PFX
357 timeout); 361 "timeout value must be 1 <= x <= 3600, using %d\n",
362 timeout);
358 } 363 }
359 364
360 if (!request_region(ENABLE_W83877F_PORT, 2, "W83877F WDT")) 365 if (!request_region(ENABLE_W83877F_PORT, 2, "W83877F WDT")) {
361 {
362 printk(KERN_ERR PFX "I/O address 0x%04x already in use\n", 366 printk(KERN_ERR PFX "I/O address 0x%04x already in use\n",
363 ENABLE_W83877F_PORT); 367 ENABLE_W83877F_PORT);
364 rc = -EIO; 368 rc = -EIO;
365 goto err_out; 369 goto err_out;
366 } 370 }
367 371
368 if (!request_region(WDT_PING, 1, "W8387FF WDT")) 372 if (!request_region(WDT_PING, 1, "W8387FF WDT")) {
369 {
370 printk(KERN_ERR PFX "I/O address 0x%04x already in use\n", 373 printk(KERN_ERR PFX "I/O address 0x%04x already in use\n",
371 WDT_PING); 374 WDT_PING);
372 rc = -EIO; 375 rc = -EIO;
@@ -374,22 +377,22 @@ static int __init w83877f_wdt_init(void)
374 } 377 }
375 378
376 rc = register_reboot_notifier(&wdt_notifier); 379 rc = register_reboot_notifier(&wdt_notifier);
377 if (rc) 380 if (rc) {
378 { 381 printk(KERN_ERR PFX
379 printk(KERN_ERR PFX "cannot register reboot notifier (err=%d)\n", 382 "cannot register reboot notifier (err=%d)\n", rc);
380 rc);
381 goto err_out_region2; 383 goto err_out_region2;
382 } 384 }
383 385
384 rc = misc_register(&wdt_miscdev); 386 rc = misc_register(&wdt_miscdev);
385 if (rc) 387 if (rc) {
386 { 388 printk(KERN_ERR PFX
387 printk(KERN_ERR PFX "cannot register miscdev on minor=%d (err=%d)\n", 389 "cannot register miscdev on minor=%d (err=%d)\n",
388 wdt_miscdev.minor, rc); 390 wdt_miscdev.minor, rc);
389 goto err_out_reboot; 391 goto err_out_reboot;
390 } 392 }
391 393
392 printk(KERN_INFO PFX "WDT driver for W83877F initialised. timeout=%d sec (nowayout=%d)\n", 394 printk(KERN_INFO PFX
395 "WDT driver for W83877F initialised. timeout=%d sec (nowayout=%d)\n",
393 timeout, nowayout); 396 timeout, nowayout);
394 397
395 return 0; 398 return 0;
@@ -397,9 +400,9 @@ static int __init w83877f_wdt_init(void)
397err_out_reboot: 400err_out_reboot:
398 unregister_reboot_notifier(&wdt_notifier); 401 unregister_reboot_notifier(&wdt_notifier);
399err_out_region2: 402err_out_region2:
400 release_region(WDT_PING,1); 403 release_region(WDT_PING, 1);
401err_out_region1: 404err_out_region1:
402 release_region(ENABLE_W83877F_PORT,2); 405 release_region(ENABLE_W83877F_PORT, 2);
403err_out: 406err_out:
404 return rc; 407 return rc;
405} 408}
diff --git a/drivers/watchdog/w83977f_wdt.c b/drivers/watchdog/w83977f_wdt.c
index b209bcd7f789..2525da5080ca 100644
--- a/drivers/watchdog/w83977f_wdt.c
+++ b/drivers/watchdog/w83977f_wdt.c
@@ -26,10 +26,10 @@
26#include <linux/watchdog.h> 26#include <linux/watchdog.h>
27#include <linux/notifier.h> 27#include <linux/notifier.h>
28#include <linux/reboot.h> 28#include <linux/reboot.h>
29#include <linux/uaccess.h>
30#include <linux/io.h>
29 31
30#include <asm/io.h>
31#include <asm/system.h> 32#include <asm/system.h>
32#include <asm/uaccess.h>
33 33
34#define WATCHDOG_VERSION "1.00" 34#define WATCHDOG_VERSION "1.00"
35#define WATCHDOG_NAME "W83977F WDT" 35#define WATCHDOG_NAME "W83977F WDT"
@@ -53,13 +53,17 @@ static char expect_close;
53static DEFINE_SPINLOCK(spinlock); 53static DEFINE_SPINLOCK(spinlock);
54 54
55module_param(timeout, int, 0); 55module_param(timeout, int, 0);
56MODULE_PARM_DESC(timeout,"Watchdog timeout in seconds (15..7635), default=" __MODULE_STRING(DEFAULT_TIMEOUT) ")"); 56MODULE_PARM_DESC(timeout,
57 "Watchdog timeout in seconds (15..7635), default="
58 __MODULE_STRING(DEFAULT_TIMEOUT) ")");
57module_param(testmode, int, 0); 59module_param(testmode, int, 0);
58MODULE_PARM_DESC(testmode,"Watchdog testmode (1 = no reboot), default=0"); 60MODULE_PARM_DESC(testmode, "Watchdog testmode (1 = no reboot), default=0");
59 61
60static int nowayout = WATCHDOG_NOWAYOUT; 62static int nowayout = WATCHDOG_NOWAYOUT;
61module_param(nowayout, int, 0); 63module_param(nowayout, int, 0);
62MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 64MODULE_PARM_DESC(nowayout,
65 "Watchdog cannot be stopped once started (default="
66 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
63 67
64/* 68/*
65 * Start the watchdog 69 * Start the watchdog
@@ -72,8 +76,8 @@ static int wdt_start(void)
72 spin_lock_irqsave(&spinlock, flags); 76 spin_lock_irqsave(&spinlock, flags);
73 77
74 /* Unlock the SuperIO chip */ 78 /* Unlock the SuperIO chip */
75 outb_p(UNLOCK_DATA,IO_INDEX_PORT); 79 outb_p(UNLOCK_DATA, IO_INDEX_PORT);
76 outb_p(UNLOCK_DATA,IO_INDEX_PORT); 80 outb_p(UNLOCK_DATA, IO_INDEX_PORT);
77 81
78 /* 82 /*
79 * Select device Aux2 (device=8) to set watchdog regs F2, F3 and F4. 83 * Select device Aux2 (device=8) to set watchdog regs F2, F3 and F4.
@@ -81,50 +85,49 @@ static int wdt_start(void)
81 * F3 is set to enable watchdog LED blink at timeout. 85 * F3 is set to enable watchdog LED blink at timeout.
82 * F4 is used to just clear the TIMEOUT'ed state (bit 0). 86 * F4 is used to just clear the TIMEOUT'ed state (bit 0).
83 */ 87 */
84 outb_p(DEVICE_REGISTER,IO_INDEX_PORT); 88 outb_p(DEVICE_REGISTER, IO_INDEX_PORT);
85 outb_p(0x08,IO_DATA_PORT); 89 outb_p(0x08, IO_DATA_PORT);
86 outb_p(0xF2,IO_INDEX_PORT); 90 outb_p(0xF2, IO_INDEX_PORT);
87 outb_p(timeoutW,IO_DATA_PORT); 91 outb_p(timeoutW, IO_DATA_PORT);
88 outb_p(0xF3,IO_INDEX_PORT); 92 outb_p(0xF3, IO_INDEX_PORT);
89 outb_p(0x08,IO_DATA_PORT); 93 outb_p(0x08, IO_DATA_PORT);
90 outb_p(0xF4,IO_INDEX_PORT); 94 outb_p(0xF4, IO_INDEX_PORT);
91 outb_p(0x00,IO_DATA_PORT); 95 outb_p(0x00, IO_DATA_PORT);
92 96
93 /* Set device Aux2 active */ 97 /* Set device Aux2 active */
94 outb_p(0x30,IO_INDEX_PORT); 98 outb_p(0x30, IO_INDEX_PORT);
95 outb_p(0x01,IO_DATA_PORT); 99 outb_p(0x01, IO_DATA_PORT);
96 100
97 /* 101 /*
98 * Select device Aux1 (dev=7) to set GP16 as the watchdog output 102 * Select device Aux1 (dev=7) to set GP16 as the watchdog output
99 * (in reg E6) and GP13 as the watchdog LED output (in reg E3). 103 * (in reg E6) and GP13 as the watchdog LED output (in reg E3).
100 * Map GP16 at pin 119. 104 * Map GP16 at pin 119.
101 * In test mode watch the bit 0 on F4 to indicate "triggered" or 105 * In test mode watch the bit 0 on F4 to indicate "triggered" or
102 * check watchdog LED on SBC. 106 * check watchdog LED on SBC.
103 */ 107 */
104 outb_p(DEVICE_REGISTER,IO_INDEX_PORT); 108 outb_p(DEVICE_REGISTER, IO_INDEX_PORT);
105 outb_p(0x07,IO_DATA_PORT); 109 outb_p(0x07, IO_DATA_PORT);
106 if (!testmode) 110 if (!testmode) {
107 {
108 unsigned pin_map; 111 unsigned pin_map;
109 112
110 outb_p(0xE6,IO_INDEX_PORT); 113 outb_p(0xE6, IO_INDEX_PORT);
111 outb_p(0x0A,IO_DATA_PORT); 114 outb_p(0x0A, IO_DATA_PORT);
112 outb_p(0x2C,IO_INDEX_PORT); 115 outb_p(0x2C, IO_INDEX_PORT);
113 pin_map = inb_p(IO_DATA_PORT); 116 pin_map = inb_p(IO_DATA_PORT);
114 pin_map |= 0x10; 117 pin_map |= 0x10;
115 pin_map &= ~(0x20); 118 pin_map &= ~(0x20);
116 outb_p(0x2C,IO_INDEX_PORT); 119 outb_p(0x2C, IO_INDEX_PORT);
117 outb_p(pin_map,IO_DATA_PORT); 120 outb_p(pin_map, IO_DATA_PORT);
118 } 121 }
119 outb_p(0xE3,IO_INDEX_PORT); 122 outb_p(0xE3, IO_INDEX_PORT);
120 outb_p(0x08,IO_DATA_PORT); 123 outb_p(0x08, IO_DATA_PORT);
121 124
122 /* Set device Aux1 active */ 125 /* Set device Aux1 active */
123 outb_p(0x30,IO_INDEX_PORT); 126 outb_p(0x30, IO_INDEX_PORT);
124 outb_p(0x01,IO_DATA_PORT); 127 outb_p(0x01, IO_DATA_PORT);
125 128
126 /* Lock the SuperIO chip */ 129 /* Lock the SuperIO chip */
127 outb_p(LOCK_DATA,IO_INDEX_PORT); 130 outb_p(LOCK_DATA, IO_INDEX_PORT);
128 131
129 spin_unlock_irqrestore(&spinlock, flags); 132 spin_unlock_irqrestore(&spinlock, flags);
130 133
@@ -144,42 +147,41 @@ static int wdt_stop(void)
144 spin_lock_irqsave(&spinlock, flags); 147 spin_lock_irqsave(&spinlock, flags);
145 148
146 /* Unlock the SuperIO chip */ 149 /* Unlock the SuperIO chip */
147 outb_p(UNLOCK_DATA,IO_INDEX_PORT); 150 outb_p(UNLOCK_DATA, IO_INDEX_PORT);
148 outb_p(UNLOCK_DATA,IO_INDEX_PORT); 151 outb_p(UNLOCK_DATA, IO_INDEX_PORT);
149 152
150 /* 153 /*
151 * Select device Aux2 (device=8) to set watchdog regs F2, F3 and F4. 154 * Select device Aux2 (device=8) to set watchdog regs F2, F3 and F4.
152 * F2 is reset to its default value (watchdog timer disabled). 155 * F2 is reset to its default value (watchdog timer disabled).
153 * F3 is reset to its default state. 156 * F3 is reset to its default state.
154 * F4 clears the TIMEOUT'ed state (bit 0) - back to default. 157 * F4 clears the TIMEOUT'ed state (bit 0) - back to default.
155 */ 158 */
156 outb_p(DEVICE_REGISTER,IO_INDEX_PORT); 159 outb_p(DEVICE_REGISTER, IO_INDEX_PORT);
157 outb_p(0x08,IO_DATA_PORT); 160 outb_p(0x08, IO_DATA_PORT);
158 outb_p(0xF2,IO_INDEX_PORT); 161 outb_p(0xF2, IO_INDEX_PORT);
159 outb_p(0xFF,IO_DATA_PORT); 162 outb_p(0xFF, IO_DATA_PORT);
160 outb_p(0xF3,IO_INDEX_PORT); 163 outb_p(0xF3, IO_INDEX_PORT);
161 outb_p(0x00,IO_DATA_PORT); 164 outb_p(0x00, IO_DATA_PORT);
162 outb_p(0xF4,IO_INDEX_PORT); 165 outb_p(0xF4, IO_INDEX_PORT);
163 outb_p(0x00,IO_DATA_PORT); 166 outb_p(0x00, IO_DATA_PORT);
164 outb_p(0xF2,IO_INDEX_PORT); 167 outb_p(0xF2, IO_INDEX_PORT);
165 outb_p(0x00,IO_DATA_PORT); 168 outb_p(0x00, IO_DATA_PORT);
166 169
167 /* 170 /*
168 * Select device Aux1 (dev=7) to set GP16 (in reg E6) and 171 * Select device Aux1 (dev=7) to set GP16 (in reg E6) and
169 * Gp13 (in reg E3) as inputs. 172 * Gp13 (in reg E3) as inputs.
170 */ 173 */
171 outb_p(DEVICE_REGISTER,IO_INDEX_PORT); 174 outb_p(DEVICE_REGISTER, IO_INDEX_PORT);
172 outb_p(0x07,IO_DATA_PORT); 175 outb_p(0x07, IO_DATA_PORT);
173 if (!testmode) 176 if (!testmode) {
174 { 177 outb_p(0xE6, IO_INDEX_PORT);
175 outb_p(0xE6,IO_INDEX_PORT); 178 outb_p(0x01, IO_DATA_PORT);
176 outb_p(0x01,IO_DATA_PORT);
177 } 179 }
178 outb_p(0xE3,IO_INDEX_PORT); 180 outb_p(0xE3, IO_INDEX_PORT);
179 outb_p(0x01,IO_DATA_PORT); 181 outb_p(0x01, IO_DATA_PORT);
180 182
181 /* Lock the SuperIO chip */ 183 /* Lock the SuperIO chip */
182 outb_p(LOCK_DATA,IO_INDEX_PORT); 184 outb_p(LOCK_DATA, IO_INDEX_PORT);
183 185
184 spin_unlock_irqrestore(&spinlock, flags); 186 spin_unlock_irqrestore(&spinlock, flags);
185 187
@@ -200,17 +202,17 @@ static int wdt_keepalive(void)
200 spin_lock_irqsave(&spinlock, flags); 202 spin_lock_irqsave(&spinlock, flags);
201 203
202 /* Unlock the SuperIO chip */ 204 /* Unlock the SuperIO chip */
203 outb_p(UNLOCK_DATA,IO_INDEX_PORT); 205 outb_p(UNLOCK_DATA, IO_INDEX_PORT);
204 outb_p(UNLOCK_DATA,IO_INDEX_PORT); 206 outb_p(UNLOCK_DATA, IO_INDEX_PORT);
205 207
206 /* Select device Aux2 (device=8) to kick watchdog reg F2 */ 208 /* Select device Aux2 (device=8) to kick watchdog reg F2 */
207 outb_p(DEVICE_REGISTER,IO_INDEX_PORT); 209 outb_p(DEVICE_REGISTER, IO_INDEX_PORT);
208 outb_p(0x08,IO_DATA_PORT); 210 outb_p(0x08, IO_DATA_PORT);
209 outb_p(0xF2,IO_INDEX_PORT); 211 outb_p(0xF2, IO_INDEX_PORT);
210 outb_p(timeoutW,IO_DATA_PORT); 212 outb_p(timeoutW, IO_DATA_PORT);
211 213
212 /* Lock the SuperIO chip */ 214 /* Lock the SuperIO chip */
213 outb_p(LOCK_DATA,IO_INDEX_PORT); 215 outb_p(LOCK_DATA, IO_INDEX_PORT);
214 216
215 spin_unlock_irqrestore(&spinlock, flags); 217 spin_unlock_irqrestore(&spinlock, flags);
216 218
@@ -227,7 +229,7 @@ static int wdt_set_timeout(int t)
227 229
228 /* 230 /*
229 * Convert seconds to watchdog counter time units, rounding up. 231 * Convert seconds to watchdog counter time units, rounding up.
230 * On PCM-5335 watchdog units are 30 seconds/step with 15 sec startup 232 * On PCM-5335 watchdog units are 30 seconds/step with 15 sec startup
231 * value. This information is supplied in the PCM-5335 manual and was 233 * value. This information is supplied in the PCM-5335 manual and was
232 * checked by me on a real board. This is a bit strange because W83977f 234 * checked by me on a real board. This is a bit strange because W83977f
233 * datasheet says counter unit is in minutes! 235 * datasheet says counter unit is in minutes!
@@ -241,7 +243,7 @@ static int wdt_set_timeout(int t)
241 return -EINVAL; 243 return -EINVAL;
242 244
243 /* 245 /*
244 * timeout is the timeout in seconds, 246 * timeout is the timeout in seconds,
245 * timeoutW is the timeout in watchdog counter units. 247 * timeoutW is the timeout in watchdog counter units.
246 */ 248 */
247 timeoutW = tmrval; 249 timeoutW = tmrval;
@@ -261,17 +263,17 @@ static int wdt_get_status(int *status)
261 spin_lock_irqsave(&spinlock, flags); 263 spin_lock_irqsave(&spinlock, flags);
262 264
263 /* Unlock the SuperIO chip */ 265 /* Unlock the SuperIO chip */
264 outb_p(UNLOCK_DATA,IO_INDEX_PORT); 266 outb_p(UNLOCK_DATA, IO_INDEX_PORT);
265 outb_p(UNLOCK_DATA,IO_INDEX_PORT); 267 outb_p(UNLOCK_DATA, IO_INDEX_PORT);
266 268
267 /* Select device Aux2 (device=8) to read watchdog reg F4 */ 269 /* Select device Aux2 (device=8) to read watchdog reg F4 */
268 outb_p(DEVICE_REGISTER,IO_INDEX_PORT); 270 outb_p(DEVICE_REGISTER, IO_INDEX_PORT);
269 outb_p(0x08,IO_DATA_PORT); 271 outb_p(0x08, IO_DATA_PORT);
270 outb_p(0xF4,IO_INDEX_PORT); 272 outb_p(0xF4, IO_INDEX_PORT);
271 new_status = inb_p(IO_DATA_PORT); 273 new_status = inb_p(IO_DATA_PORT);
272 274
273 /* Lock the SuperIO chip */ 275 /* Lock the SuperIO chip */
274 outb_p(LOCK_DATA,IO_INDEX_PORT); 276 outb_p(LOCK_DATA, IO_INDEX_PORT);
275 277
276 spin_unlock_irqrestore(&spinlock, flags); 278 spin_unlock_irqrestore(&spinlock, flags);
277 279
@@ -290,7 +292,7 @@ static int wdt_get_status(int *status)
290static int wdt_open(struct inode *inode, struct file *file) 292static int wdt_open(struct inode *inode, struct file *file)
291{ 293{
292 /* If the watchdog is alive we don't need to start it again */ 294 /* If the watchdog is alive we don't need to start it again */
293 if( test_and_set_bit(0, &timer_alive) ) 295 if (test_and_set_bit(0, &timer_alive))
294 return -EBUSY; 296 return -EBUSY;
295 297
296 if (nowayout) 298 if (nowayout)
@@ -306,13 +308,13 @@ static int wdt_release(struct inode *inode, struct file *file)
306 * Shut off the timer. 308 * Shut off the timer.
307 * Lock it in if it's a module and we set nowayout 309 * Lock it in if it's a module and we set nowayout
308 */ 310 */
309 if (expect_close == 42) 311 if (expect_close == 42) {
310 {
311 wdt_stop(); 312 wdt_stop();
312 clear_bit(0, &timer_alive); 313 clear_bit(0, &timer_alive);
313 } else { 314 } else {
314 wdt_keepalive(); 315 wdt_keepalive();
315 printk(KERN_CRIT PFX "unexpected close, not stopping watchdog!\n"); 316 printk(KERN_CRIT PFX
317 "unexpected close, not stopping watchdog!\n");
316 } 318 }
317 expect_close = 0; 319 expect_close = 0;
318 return 0; 320 return 0;
@@ -333,24 +335,22 @@ static ssize_t wdt_write(struct file *file, const char __user *buf,
333 size_t count, loff_t *ppos) 335 size_t count, loff_t *ppos)
334{ 336{
335 /* See if we got the magic character 'V' and reload the timer */ 337 /* See if we got the magic character 'V' and reload the timer */
336 if(count) 338 if (count) {
337 { 339 if (!nowayout) {
338 if (!nowayout)
339 {
340 size_t ofs; 340 size_t ofs;
341 341
342 /* note: just in case someone wrote the magic character long ago */ 342 /* note: just in case someone wrote the
343 magic character long ago */
343 expect_close = 0; 344 expect_close = 0;
344 345
345 /* scan to see whether or not we got the magic character */ 346 /* scan to see whether or not we got the
346 for(ofs = 0; ofs != count; ofs++) 347 magic character */
347 { 348 for (ofs = 0; ofs != count; ofs++) {
348 char c; 349 char c;
349 if (get_user(c, buf + ofs)) 350 if (get_user(c, buf + ofs))
350 return -EFAULT; 351 return -EFAULT;
351 if (c == 'V') { 352 if (c == 'V')
352 expect_close = 42; 353 expect_close = 42;
353 }
354 } 354 }
355 } 355 }
356 356
@@ -377,8 +377,7 @@ static struct watchdog_info ident = {
377 .identity = WATCHDOG_NAME, 377 .identity = WATCHDOG_NAME,
378}; 378};
379 379
380static int wdt_ioctl(struct inode *inode, struct file *file, 380static long wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
381 unsigned int cmd, unsigned long arg)
382{ 381{
383 int status; 382 int status;
384 int new_options, retval = -EINVAL; 383 int new_options, retval = -EINVAL;
@@ -390,13 +389,10 @@ static int wdt_ioctl(struct inode *inode, struct file *file,
390 389
391 uarg.i = (int __user *)arg; 390 uarg.i = (int __user *)arg;
392 391
393 switch(cmd) 392 switch (cmd) {
394 {
395 default:
396 return -ENOTTY;
397
398 case WDIOC_GETSUPPORT: 393 case WDIOC_GETSUPPORT:
399 return copy_to_user(uarg.ident, &ident, sizeof(ident)) ? -EFAULT : 0; 394 return copy_to_user(uarg.ident, &ident,
395 sizeof(ident)) ? -EFAULT : 0;
400 396
401 case WDIOC_GETSTATUS: 397 case WDIOC_GETSTATUS:
402 wdt_get_status(&status); 398 wdt_get_status(&status);
@@ -405,12 +401,8 @@ static int wdt_ioctl(struct inode *inode, struct file *file,
405 case WDIOC_GETBOOTSTATUS: 401 case WDIOC_GETBOOTSTATUS:
406 return put_user(0, uarg.i); 402 return put_user(0, uarg.i);
407 403
408 case WDIOC_KEEPALIVE:
409 wdt_keepalive();
410 return 0;
411
412 case WDIOC_SETOPTIONS: 404 case WDIOC_SETOPTIONS:
413 if (get_user (new_options, uarg.i)) 405 if (get_user(new_options, uarg.i))
414 return -EFAULT; 406 return -EFAULT;
415 407
416 if (new_options & WDIOS_DISABLECARD) { 408 if (new_options & WDIOS_DISABLECARD) {
@@ -425,6 +417,10 @@ static int wdt_ioctl(struct inode *inode, struct file *file,
425 417
426 return retval; 418 return retval;
427 419
420 case WDIOC_KEEPALIVE:
421 wdt_keepalive();
422 return 0;
423
428 case WDIOC_SETTIMEOUT: 424 case WDIOC_SETTIMEOUT:
429 if (get_user(new_timeout, uarg.i)) 425 if (get_user(new_timeout, uarg.i))
430 return -EFAULT; 426 return -EFAULT;
@@ -438,29 +434,30 @@ static int wdt_ioctl(struct inode *inode, struct file *file,
438 case WDIOC_GETTIMEOUT: 434 case WDIOC_GETTIMEOUT:
439 return put_user(timeout, uarg.i); 435 return put_user(timeout, uarg.i);
440 436
437 default:
438 return -ENOTTY;
439
441 } 440 }
442} 441}
443 442
444static int wdt_notify_sys(struct notifier_block *this, unsigned long code, 443static int wdt_notify_sys(struct notifier_block *this, unsigned long code,
445 void *unused) 444 void *unused)
446{ 445{
447 if (code==SYS_DOWN || code==SYS_HALT) 446 if (code == SYS_DOWN || code == SYS_HALT)
448 wdt_stop(); 447 wdt_stop();
449 return NOTIFY_DONE; 448 return NOTIFY_DONE;
450} 449}
451 450
452static const struct file_operations wdt_fops= 451static const struct file_operations wdt_fops = {
453{
454 .owner = THIS_MODULE, 452 .owner = THIS_MODULE,
455 .llseek = no_llseek, 453 .llseek = no_llseek,
456 .write = wdt_write, 454 .write = wdt_write,
457 .ioctl = wdt_ioctl, 455 .unlocked_ioctl = wdt_ioctl,
458 .open = wdt_open, 456 .open = wdt_open,
459 .release = wdt_release, 457 .release = wdt_release,
460}; 458};
461 459
462static struct miscdevice wdt_miscdev= 460static struct miscdevice wdt_miscdev = {
463{
464 .minor = WATCHDOG_MINOR, 461 .minor = WATCHDOG_MINOR,
465 .name = "watchdog", 462 .name = "watchdog",
466 .fops = &wdt_fops, 463 .fops = &wdt_fops,
@@ -474,20 +471,20 @@ static int __init w83977f_wdt_init(void)
474{ 471{
475 int rc; 472 int rc;
476 473
477 printk(KERN_INFO PFX DRIVER_VERSION); 474 printk(KERN_INFO PFX DRIVER_VERSION);
478 475
479 /* 476 /*
480 * Check that the timeout value is within it's range ; 477 * Check that the timeout value is within it's range;
481 * if not reset to the default 478 * if not reset to the default
482 */ 479 */
483 if (wdt_set_timeout(timeout)) { 480 if (wdt_set_timeout(timeout)) {
484 wdt_set_timeout(DEFAULT_TIMEOUT); 481 wdt_set_timeout(DEFAULT_TIMEOUT);
485 printk(KERN_INFO PFX "timeout value must be 15<=timeout<=7635, using %d\n", 482 printk(KERN_INFO PFX
486 DEFAULT_TIMEOUT); 483 "timeout value must be 15 <= timeout <= 7635, using %d\n",
484 DEFAULT_TIMEOUT);
487 } 485 }
488 486
489 if (!request_region(IO_INDEX_PORT, 2, WATCHDOG_NAME)) 487 if (!request_region(IO_INDEX_PORT, 2, WATCHDOG_NAME)) {
490 {
491 printk(KERN_ERR PFX "I/O address 0x%04x already in use\n", 488 printk(KERN_ERR PFX "I/O address 0x%04x already in use\n",
492 IO_INDEX_PORT); 489 IO_INDEX_PORT);
493 rc = -EIO; 490 rc = -EIO;
@@ -495,30 +492,30 @@ static int __init w83977f_wdt_init(void)
495 } 492 }
496 493
497 rc = register_reboot_notifier(&wdt_notifier); 494 rc = register_reboot_notifier(&wdt_notifier);
498 if (rc) 495 if (rc) {
499 { 496 printk(KERN_ERR PFX
500 printk(KERN_ERR PFX "cannot register reboot notifier (err=%d)\n", 497 "cannot register reboot notifier (err=%d)\n", rc);
501 rc);
502 goto err_out_region; 498 goto err_out_region;
503 } 499 }
504 500
505 rc = misc_register(&wdt_miscdev); 501 rc = misc_register(&wdt_miscdev);
506 if (rc) 502 if (rc) {
507 { 503 printk(KERN_ERR PFX
508 printk(KERN_ERR PFX "cannot register miscdev on minor=%d (err=%d)\n", 504 "cannot register miscdev on minor=%d (err=%d)\n",
509 wdt_miscdev.minor, rc); 505 wdt_miscdev.minor, rc);
510 goto err_out_reboot; 506 goto err_out_reboot;
511 } 507 }
512 508
513 printk(KERN_INFO PFX "initialized. timeout=%d sec (nowayout=%d testmode=%d)\n", 509 printk(KERN_INFO PFX
514 timeout, nowayout, testmode); 510 "initialized. timeout=%d sec (nowayout=%d testmode=%d)\n",
511 timeout, nowayout, testmode);
515 512
516 return 0; 513 return 0;
517 514
518err_out_reboot: 515err_out_reboot:
519 unregister_reboot_notifier(&wdt_notifier); 516 unregister_reboot_notifier(&wdt_notifier);
520err_out_region: 517err_out_region:
521 release_region(IO_INDEX_PORT,2); 518 release_region(IO_INDEX_PORT, 2);
522err_out: 519err_out:
523 return rc; 520 return rc;
524} 521}
@@ -528,7 +525,7 @@ static void __exit w83977f_wdt_exit(void)
528 wdt_stop(); 525 wdt_stop();
529 misc_deregister(&wdt_miscdev); 526 misc_deregister(&wdt_miscdev);
530 unregister_reboot_notifier(&wdt_notifier); 527 unregister_reboot_notifier(&wdt_notifier);
531 release_region(IO_INDEX_PORT,2); 528 release_region(IO_INDEX_PORT, 2);
532} 529}
533 530
534module_init(w83977f_wdt_init); 531module_init(w83977f_wdt_init);
diff --git a/drivers/watchdog/wafer5823wdt.c b/drivers/watchdog/wafer5823wdt.c
index 9e368091f799..68377ae171ff 100644
--- a/drivers/watchdog/wafer5823wdt.c
+++ b/drivers/watchdog/wafer5823wdt.c
@@ -1,11 +1,11 @@
1/* 1/*
2 * ICP Wafer 5823 Single Board Computer WDT driver 2 * ICP Wafer 5823 Single Board Computer WDT driver
3 * http://www.icpamerica.com/wafer_5823.php 3 * http://www.icpamerica.com/wafer_5823.php
4 * May also work on other similar models 4 * May also work on other similar models
5 * 5 *
6 * (c) Copyright 2002 Justin Cormack <justin@street-vision.com> 6 * (c) Copyright 2002 Justin Cormack <justin@street-vision.com>
7 * 7 *
8 * Release 0.02 8 * Release 0.02
9 * 9 *
10 * Based on advantechwdt.c which is based on wdt.c. 10 * Based on advantechwdt.c which is based on wdt.c.
11 * Original copyright messages: 11 * Original copyright messages:
@@ -36,8 +36,8 @@
36#include <linux/reboot.h> 36#include <linux/reboot.h>
37#include <linux/init.h> 37#include <linux/init.h>
38#include <linux/spinlock.h> 38#include <linux/spinlock.h>
39#include <asm/io.h> 39#include <linux/io.h>
40#include <asm/uaccess.h> 40#include <linux/uaccess.h>
41 41
42#define WATCHDOG_NAME "Wafer 5823 WDT" 42#define WATCHDOG_NAME "Wafer 5823 WDT"
43#define PFX WATCHDOG_NAME ": " 43#define PFX WATCHDOG_NAME ": "
@@ -50,10 +50,10 @@ static DEFINE_SPINLOCK(wafwdt_lock);
50/* 50/*
51 * You must set these - there is no sane way to probe for this board. 51 * You must set these - there is no sane way to probe for this board.
52 * 52 *
53 * To enable, write the timeout value in seconds (1 to 255) to I/O 53 * To enable, write the timeout value in seconds (1 to 255) to I/O
54 * port WDT_START, then read the port to start the watchdog. To pat 54 * port WDT_START, then read the port to start the watchdog. To pat
55 * the dog, read port WDT_STOP to stop the timer, then read WDT_START 55 * the dog, read port WDT_STOP to stop the timer, then read WDT_START
56 * to restart it again. 56 * to restart it again.
57 */ 57 */
58 58
59static int wdt_stop = 0x843; 59static int wdt_stop = 0x843;
@@ -61,11 +61,15 @@ static int wdt_start = 0x443;
61 61
62static int timeout = WD_TIMO; /* in seconds */ 62static int timeout = WD_TIMO; /* in seconds */
63module_param(timeout, int, 0); 63module_param(timeout, int, 0);
64MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds. 1<= timeout <=255, default=" __MODULE_STRING(WD_TIMO) "."); 64MODULE_PARM_DESC(timeout,
65 "Watchdog timeout in seconds. 1 <= timeout <= 255, default="
66 __MODULE_STRING(WD_TIMO) ".");
65 67
66static int nowayout = WATCHDOG_NOWAYOUT; 68static int nowayout = WATCHDOG_NOWAYOUT;
67module_param(nowayout, int, 0); 69module_param(nowayout, int, 0);
68MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 70MODULE_PARM_DESC(nowayout,
71 "Watchdog cannot be stopped once started (default="
72 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
69 73
70static void wafwdt_ping(void) 74static void wafwdt_ping(void)
71{ 75{
@@ -83,14 +87,14 @@ static void wafwdt_start(void)
83 inb_p(wdt_start); 87 inb_p(wdt_start);
84} 88}
85 89
86static void 90static void wafwdt_stop(void)
87wafwdt_stop(void)
88{ 91{
89 /* stop watchdog */ 92 /* stop watchdog */
90 inb_p(wdt_stop); 93 inb_p(wdt_stop);
91} 94}
92 95
93static ssize_t wafwdt_write(struct file *file, const char __user *buf, size_t count, loff_t * ppos) 96static ssize_t wafwdt_write(struct file *file, const char __user *buf,
97 size_t count, loff_t *ppos)
94{ 98{
95 /* See if we got the magic character 'V' and reload the timer */ 99 /* See if we got the magic character 'V' and reload the timer */
96 if (count) { 100 if (count) {
@@ -100,7 +104,8 @@ static ssize_t wafwdt_write(struct file *file, const char __user *buf, size_t co
100 /* In case it was set long ago */ 104 /* In case it was set long ago */
101 expect_close = 0; 105 expect_close = 0;
102 106
103 /* scan to see whether or not we got the magic character */ 107 /* scan to see whether or not we got the magic
108 character */
104 for (i = 0; i != count; i++) { 109 for (i = 0; i != count; i++) {
105 char c; 110 char c;
106 if (get_user(c, buf + i)) 111 if (get_user(c, buf + i))
@@ -109,27 +114,29 @@ static ssize_t wafwdt_write(struct file *file, const char __user *buf, size_t co
109 expect_close = 42; 114 expect_close = 42;
110 } 115 }
111 } 116 }
112 /* Well, anyhow someone wrote to us, we should return that favour */ 117 /* Well, anyhow someone wrote to us, we should
118 return that favour */
113 wafwdt_ping(); 119 wafwdt_ping();
114 } 120 }
115 return count; 121 return count;
116} 122}
117 123
118static int wafwdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, 124static long wafwdt_ioctl(struct file *file, unsigned int cmd,
119 unsigned long arg) 125 unsigned long arg)
120{ 126{
121 int new_timeout; 127 int new_timeout;
122 void __user *argp = (void __user *)arg; 128 void __user *argp = (void __user *)arg;
123 int __user *p = argp; 129 int __user *p = argp;
124 static struct watchdog_info ident = { 130 static const struct watchdog_info ident = {
125 .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE, 131 .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT |
132 WDIOF_MAGICCLOSE,
126 .firmware_version = 1, 133 .firmware_version = 1,
127 .identity = "Wafer 5823 WDT", 134 .identity = "Wafer 5823 WDT",
128 }; 135 };
129 136
130 switch (cmd) { 137 switch (cmd) {
131 case WDIOC_GETSUPPORT: 138 case WDIOC_GETSUPPORT:
132 if (copy_to_user(argp, &ident, sizeof (ident))) 139 if (copy_to_user(argp, &ident, sizeof(ident)))
133 return -EFAULT; 140 return -EFAULT;
134 break; 141 break;
135 142
@@ -137,22 +144,6 @@ static int wafwdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd
137 case WDIOC_GETBOOTSTATUS: 144 case WDIOC_GETBOOTSTATUS:
138 return put_user(0, p); 145 return put_user(0, p);
139 146
140 case WDIOC_KEEPALIVE:
141 wafwdt_ping();
142 break;
143
144 case WDIOC_SETTIMEOUT:
145 if (get_user(new_timeout, p))
146 return -EFAULT;
147 if ((new_timeout < 1) || (new_timeout > 255))
148 return -EINVAL;
149 timeout = new_timeout;
150 wafwdt_stop();
151 wafwdt_start();
152 /* Fall */
153 case WDIOC_GETTIMEOUT:
154 return put_user(timeout, p);
155
156 case WDIOC_SETOPTIONS: 147 case WDIOC_SETOPTIONS:
157 { 148 {
158 int options, retval = -EINVAL; 149 int options, retval = -EINVAL;
@@ -173,6 +164,22 @@ static int wafwdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd
173 return retval; 164 return retval;
174 } 165 }
175 166
167 case WDIOC_KEEPALIVE:
168 wafwdt_ping();
169 break;
170
171 case WDIOC_SETTIMEOUT:
172 if (get_user(new_timeout, p))
173 return -EFAULT;
174 if ((new_timeout < 1) || (new_timeout > 255))
175 return -EINVAL;
176 timeout = new_timeout;
177 wafwdt_stop();
178 wafwdt_start();
179 /* Fall */
180 case WDIOC_GETTIMEOUT:
181 return put_user(timeout, p);
182
176 default: 183 default:
177 return -ENOTTY; 184 return -ENOTTY;
178 } 185 }
@@ -191,13 +198,13 @@ static int wafwdt_open(struct inode *inode, struct file *file)
191 return nonseekable_open(inode, file); 198 return nonseekable_open(inode, file);
192} 199}
193 200
194static int 201static int wafwdt_close(struct inode *inode, struct file *file)
195wafwdt_close(struct inode *inode, struct file *file)
196{ 202{
197 if (expect_close == 42) { 203 if (expect_close == 42)
198 wafwdt_stop(); 204 wafwdt_stop();
199 } else { 205 else {
200 printk(KERN_CRIT PFX "WDT device closed unexpectedly. WDT will not stop!\n"); 206 printk(KERN_CRIT PFX
207 "WDT device closed unexpectedly. WDT will not stop!\n");
201 wafwdt_ping(); 208 wafwdt_ping();
202 } 209 }
203 clear_bit(0, &wafwdt_is_open); 210 clear_bit(0, &wafwdt_is_open);
@@ -209,12 +216,11 @@ wafwdt_close(struct inode *inode, struct file *file)
209 * Notifier for system down 216 * Notifier for system down
210 */ 217 */
211 218
212static int wafwdt_notify_sys(struct notifier_block *this, unsigned long code, void *unused) 219static int wafwdt_notify_sys(struct notifier_block *this, unsigned long code,
220 void *unused)
213{ 221{
214 if (code == SYS_DOWN || code == SYS_HALT) { 222 if (code == SYS_DOWN || code == SYS_HALT)
215 /* Turn the WDT off */
216 wafwdt_stop(); 223 wafwdt_stop();
217 }
218 return NOTIFY_DONE; 224 return NOTIFY_DONE;
219} 225}
220 226
@@ -226,7 +232,7 @@ static const struct file_operations wafwdt_fops = {
226 .owner = THIS_MODULE, 232 .owner = THIS_MODULE,
227 .llseek = no_llseek, 233 .llseek = no_llseek,
228 .write = wafwdt_write, 234 .write = wafwdt_write,
229 .ioctl = wafwdt_ioctl, 235 .unlocked_ioctl = wafwdt_ioctl,
230 .open = wafwdt_open, 236 .open = wafwdt_open,
231 .release = wafwdt_close, 237 .release = wafwdt_close,
232}; 238};
@@ -250,25 +256,28 @@ static int __init wafwdt_init(void)
250{ 256{
251 int ret; 257 int ret;
252 258
253 printk(KERN_INFO "WDT driver for Wafer 5823 single board computer initialising.\n"); 259 printk(KERN_INFO
260 "WDT driver for Wafer 5823 single board computer initialising.\n");
254 261
255 if (timeout < 1 || timeout > 255) { 262 if (timeout < 1 || timeout > 255) {
256 timeout = WD_TIMO; 263 timeout = WD_TIMO;
257 printk (KERN_INFO PFX "timeout value must be 1<=x<=255, using %d\n", 264 printk(KERN_INFO PFX
258 timeout); 265 "timeout value must be 1 <= x <= 255, using %d\n",
266 timeout);
259 } 267 }
260 268
261 if (wdt_stop != wdt_start) { 269 if (wdt_stop != wdt_start) {
262 if(!request_region(wdt_stop, 1, "Wafer 5823 WDT")) { 270 if (!request_region(wdt_stop, 1, "Wafer 5823 WDT")) {
263 printk (KERN_ERR PFX "I/O address 0x%04x already in use\n", 271 printk(KERN_ERR PFX
264 wdt_stop); 272 "I/O address 0x%04x already in use\n",
273 wdt_stop);
265 ret = -EIO; 274 ret = -EIO;
266 goto error; 275 goto error;
267 } 276 }
268 } 277 }
269 278
270 if(!request_region(wdt_start, 1, "Wafer 5823 WDT")) { 279 if (!request_region(wdt_start, 1, "Wafer 5823 WDT")) {
271 printk (KERN_ERR PFX "I/O address 0x%04x already in use\n", 280 printk(KERN_ERR PFX "I/O address 0x%04x already in use\n",
272 wdt_start); 281 wdt_start);
273 ret = -EIO; 282 ret = -EIO;
274 goto error2; 283 goto error2;
@@ -276,19 +285,20 @@ static int __init wafwdt_init(void)
276 285
277 ret = register_reboot_notifier(&wafwdt_notifier); 286 ret = register_reboot_notifier(&wafwdt_notifier);
278 if (ret != 0) { 287 if (ret != 0) {
279 printk (KERN_ERR PFX "cannot register reboot notifier (err=%d)\n", 288 printk(KERN_ERR PFX
280 ret); 289 "cannot register reboot notifier (err=%d)\n", ret);
281 goto error3; 290 goto error3;
282 } 291 }
283 292
284 ret = misc_register(&wafwdt_miscdev); 293 ret = misc_register(&wafwdt_miscdev);
285 if (ret != 0) { 294 if (ret != 0) {
286 printk (KERN_ERR PFX "cannot register miscdev on minor=%d (err=%d)\n", 295 printk(KERN_ERR PFX
287 WATCHDOG_MINOR, ret); 296 "cannot register miscdev on minor=%d (err=%d)\n",
297 WATCHDOG_MINOR, ret);
288 goto error4; 298 goto error4;
289 } 299 }
290 300
291 printk (KERN_INFO PFX "initialized. timeout=%d sec (nowayout=%d)\n", 301 printk(KERN_INFO PFX "initialized. timeout=%d sec (nowayout=%d)\n",
292 timeout, nowayout); 302 timeout, nowayout);
293 303
294 return ret; 304 return ret;
@@ -307,7 +317,7 @@ static void __exit wafwdt_exit(void)
307{ 317{
308 misc_deregister(&wafwdt_miscdev); 318 misc_deregister(&wafwdt_miscdev);
309 unregister_reboot_notifier(&wafwdt_notifier); 319 unregister_reboot_notifier(&wafwdt_notifier);
310 if(wdt_stop != wdt_start) 320 if (wdt_stop != wdt_start)
311 release_region(wdt_stop, 1); 321 release_region(wdt_stop, 1);
312 release_region(wdt_start, 1); 322 release_region(wdt_start, 1);
313} 323}
diff --git a/drivers/watchdog/wd501p.h b/drivers/watchdog/wd501p.h
index a4504f40394d..db34853c28ae 100644
--- a/drivers/watchdog/wd501p.h
+++ b/drivers/watchdog/wd501p.h
@@ -12,7 +12,7 @@
12 * http://www.cymru.net 12 * http://www.cymru.net
13 * 13 *
14 * This driver is provided under the GNU General Public License, incorporated 14 * This driver is provided under the GNU General Public License, incorporated
15 * herein by reference. The driver is provided without warranty or 15 * herein by reference. The driver is provided without warranty or
16 * support. 16 * support.
17 * 17 *
18 * Release 0.04. 18 * Release 0.04.
diff --git a/drivers/watchdog/wdrtas.c b/drivers/watchdog/wdrtas.c
index 1d64e277567d..5d3b1a8e28b0 100644
--- a/drivers/watchdog/wdrtas.c
+++ b/drivers/watchdog/wdrtas.c
@@ -35,9 +35,9 @@
35#include <linux/reboot.h> 35#include <linux/reboot.h>
36#include <linux/types.h> 36#include <linux/types.h>
37#include <linux/watchdog.h> 37#include <linux/watchdog.h>
38#include <linux/uaccess.h>
38 39
39#include <asm/rtas.h> 40#include <asm/rtas.h>
40#include <asm/uaccess.h>
41 41
42#define WDRTAS_MAGIC_CHAR 42 42#define WDRTAS_MAGIC_CHAR 42
43#define WDRTAS_SUPPORTED_MASK (WDIOF_SETTIMEOUT | \ 43#define WDRTAS_SUPPORTED_MASK (WDIOF_SETTIMEOUT | \
@@ -56,7 +56,7 @@ static int wdrtas_nowayout = 0;
56#endif 56#endif
57 57
58static atomic_t wdrtas_miscdev_open = ATOMIC_INIT(0); 58static atomic_t wdrtas_miscdev_open = ATOMIC_INIT(0);
59static char wdrtas_expect_close = 0; 59static char wdrtas_expect_close;
60 60
61static int wdrtas_interval; 61static int wdrtas_interval;
62 62
@@ -86,8 +86,8 @@ static char wdrtas_logbuffer[WDRTAS_LOGBUFFER_LEN];
86 * RTAS function set-indicator (surveillance). The unit of interval is 86 * RTAS function set-indicator (surveillance). The unit of interval is
87 * seconds. 87 * seconds.
88 */ 88 */
89static int 89
90wdrtas_set_interval(int interval) 90static int wdrtas_set_interval(int interval)
91{ 91{
92 long result; 92 long result;
93 static int print_msg = 10; 93 static int print_msg = 10;
@@ -97,7 +97,7 @@ wdrtas_set_interval(int interval)
97 97
98 result = rtas_call(wdrtas_token_set_indicator, 3, 1, NULL, 98 result = rtas_call(wdrtas_token_set_indicator, 3, 1, NULL,
99 WDRTAS_SURVEILLANCE_IND, 0, interval); 99 WDRTAS_SURVEILLANCE_IND, 0, interval);
100 if ( (result < 0) && (print_msg) ) { 100 if (result < 0 && print_msg) {
101 printk(KERN_ERR "wdrtas: setting the watchdog to %i " 101 printk(KERN_ERR "wdrtas: setting the watchdog to %i "
102 "timeout failed: %li\n", interval, result); 102 "timeout failed: %li\n", interval, result);
103 print_msg--; 103 print_msg--;
@@ -116,16 +116,14 @@ wdrtas_set_interval(int interval)
116 * as reported by the RTAS function ibm,get-system-parameter. The unit 116 * as reported by the RTAS function ibm,get-system-parameter. The unit
117 * of the return value is seconds. 117 * of the return value is seconds.
118 */ 118 */
119static int 119static int wdrtas_get_interval(int fallback_value)
120wdrtas_get_interval(int fallback_value)
121{ 120{
122 long result; 121 long result;
123 char value[4]; 122 char value[4];
124 123
125 result = rtas_call(wdrtas_token_get_sp, 3, 1, NULL, 124 result = rtas_call(wdrtas_token_get_sp, 3, 1, NULL,
126 WDRTAS_SP_SPI, (void *)__pa(&value), 4); 125 WDRTAS_SP_SPI, (void *)__pa(&value), 4);
127 if ( (value[0] != 0) || (value[1] != 2) || (value[3] != 0) || 126 if (value[0] != 0 || value[1] != 2 || value[3] != 0 || result < 0) {
128 (result < 0) ) {
129 printk(KERN_WARNING "wdrtas: could not get sp_spi watchdog " 127 printk(KERN_WARNING "wdrtas: could not get sp_spi watchdog "
130 "timeout (%li). Continuing\n", result); 128 "timeout (%li). Continuing\n", result);
131 return fallback_value; 129 return fallback_value;
@@ -141,8 +139,7 @@ wdrtas_get_interval(int fallback_value)
141 * wdrtas_timer_start starts the watchdog by calling the RTAS function 139 * wdrtas_timer_start starts the watchdog by calling the RTAS function
142 * set-interval (surveillance) 140 * set-interval (surveillance)
143 */ 141 */
144static void 142static void wdrtas_timer_start(void)
145wdrtas_timer_start(void)
146{ 143{
147 wdrtas_set_interval(wdrtas_interval); 144 wdrtas_set_interval(wdrtas_interval);
148} 145}
@@ -153,8 +150,7 @@ wdrtas_timer_start(void)
153 * wdrtas_timer_stop stops the watchdog timer by calling the RTAS function 150 * wdrtas_timer_stop stops the watchdog timer by calling the RTAS function
154 * set-interval (surveillance) 151 * set-interval (surveillance)
155 */ 152 */
156static void 153static void wdrtas_timer_stop(void)
157wdrtas_timer_stop(void)
158{ 154{
159 wdrtas_set_interval(0); 155 wdrtas_set_interval(0);
160} 156}
@@ -165,8 +161,7 @@ wdrtas_timer_stop(void)
165 * wdrtas_log_scanned_event prints a message to the log buffer dumping 161 * wdrtas_log_scanned_event prints a message to the log buffer dumping
166 * the results of the last event-scan call 162 * the results of the last event-scan call
167 */ 163 */
168static void 164static void wdrtas_log_scanned_event(void)
169wdrtas_log_scanned_event(void)
170{ 165{
171 int i; 166 int i;
172 167
@@ -175,13 +170,13 @@ wdrtas_log_scanned_event(void)
175 "%02x %02x %02x %02x %02x %02x %02x %02x " 170 "%02x %02x %02x %02x %02x %02x %02x %02x "
176 "%02x %02x %02x %02x %02x %02x %02x %02x\n", 171 "%02x %02x %02x %02x %02x %02x %02x %02x\n",
177 (i / 16) + 1, (WDRTAS_LOGBUFFER_LEN / 16), 172 (i / 16) + 1, (WDRTAS_LOGBUFFER_LEN / 16),
178 wdrtas_logbuffer[i + 0], wdrtas_logbuffer[i + 1], 173 wdrtas_logbuffer[i + 0], wdrtas_logbuffer[i + 1],
179 wdrtas_logbuffer[i + 2], wdrtas_logbuffer[i + 3], 174 wdrtas_logbuffer[i + 2], wdrtas_logbuffer[i + 3],
180 wdrtas_logbuffer[i + 4], wdrtas_logbuffer[i + 5], 175 wdrtas_logbuffer[i + 4], wdrtas_logbuffer[i + 5],
181 wdrtas_logbuffer[i + 6], wdrtas_logbuffer[i + 7], 176 wdrtas_logbuffer[i + 6], wdrtas_logbuffer[i + 7],
182 wdrtas_logbuffer[i + 8], wdrtas_logbuffer[i + 9], 177 wdrtas_logbuffer[i + 8], wdrtas_logbuffer[i + 9],
183 wdrtas_logbuffer[i + 10], wdrtas_logbuffer[i + 11], 178 wdrtas_logbuffer[i + 10], wdrtas_logbuffer[i + 11],
184 wdrtas_logbuffer[i + 12], wdrtas_logbuffer[i + 13], 179 wdrtas_logbuffer[i + 12], wdrtas_logbuffer[i + 13],
185 wdrtas_logbuffer[i + 14], wdrtas_logbuffer[i + 15]); 180 wdrtas_logbuffer[i + 14], wdrtas_logbuffer[i + 15]);
186} 181}
187 182
@@ -192,8 +187,7 @@ wdrtas_log_scanned_event(void)
192 * RTAS function event-scan and repeats these calls as long as there are 187 * RTAS function event-scan and repeats these calls as long as there are
193 * events available. All events will be dumped. 188 * events available. All events will be dumped.
194 */ 189 */
195static void 190static void wdrtas_timer_keepalive(void)
196wdrtas_timer_keepalive(void)
197{ 191{
198 long result; 192 long result;
199 193
@@ -218,8 +212,7 @@ wdrtas_timer_keepalive(void)
218 * wdrtas_get_temperature returns the current temperature in Fahrenheit. It 212 * wdrtas_get_temperature returns the current temperature in Fahrenheit. It
219 * uses the RTAS call get-sensor-state, token 3 to do so 213 * uses the RTAS call get-sensor-state, token 3 to do so
220 */ 214 */
221static int 215static int wdrtas_get_temperature(void)
222wdrtas_get_temperature(void)
223{ 216{
224 long result; 217 long result;
225 int temperature = 0; 218 int temperature = 0;
@@ -243,8 +236,7 @@ wdrtas_get_temperature(void)
243 * returns a bitmask of defines WDIOF_... as defined in 236 * returns a bitmask of defines WDIOF_... as defined in
244 * include/linux/watchdog.h 237 * include/linux/watchdog.h
245 */ 238 */
246static int 239static int wdrtas_get_status(void)
247wdrtas_get_status(void)
248{ 240{
249 return 0; /* TODO */ 241 return 0; /* TODO */
250} 242}
@@ -255,8 +247,7 @@ wdrtas_get_status(void)
255 * returns a bitmask of defines WDIOF_... as defined in 247 * returns a bitmask of defines WDIOF_... as defined in
256 * include/linux/watchdog.h, indicating why the watchdog rebooted the system 248 * include/linux/watchdog.h, indicating why the watchdog rebooted the system
257 */ 249 */
258static int 250static int wdrtas_get_boot_status(void)
259wdrtas_get_boot_status(void)
260{ 251{
261 return 0; /* TODO */ 252 return 0; /* TODO */
262} 253}
@@ -276,8 +267,7 @@ wdrtas_get_boot_status(void)
276 * character 'V'. This character allows the watchdog device to be closed 267 * character 'V'. This character allows the watchdog device to be closed
277 * properly. 268 * properly.
278 */ 269 */
279static ssize_t 270static ssize_t wdrtas_write(struct file *file, const char __user *buf,
280wdrtas_write(struct file *file, const char __user *buf,
281 size_t len, loff_t *ppos) 271 size_t len, loff_t *ppos)
282{ 272{
283 int i; 273 int i;
@@ -306,7 +296,6 @@ out:
306 296
307/** 297/**
308 * wdrtas_ioctl - ioctl function for the watchdog device 298 * wdrtas_ioctl - ioctl function for the watchdog device
309 * @inode: inode structure
310 * @file: file structure 299 * @file: file structure
311 * @cmd: command for ioctl 300 * @cmd: command for ioctl
312 * @arg: argument pointer 301 * @arg: argument pointer
@@ -315,16 +304,16 @@ out:
315 * 304 *
316 * wdrtas_ioctl implements the watchdog API ioctls 305 * wdrtas_ioctl implements the watchdog API ioctls
317 */ 306 */
318static int 307
319wdrtas_ioctl(struct inode *inode, struct file *file, 308static long wdrtas_ioctl(struct file *file, unsigned int cmd,
320 unsigned int cmd, unsigned long arg) 309 unsigned long arg)
321{ 310{
322 int __user *argp = (void __user *)arg; 311 int __user *argp = (void __user *)arg;
323 int i; 312 int i;
324 static struct watchdog_info wdinfo = { 313 static struct watchdog_info wdinfo = {
325 .options = WDRTAS_SUPPORTED_MASK, 314 .options = WDRTAS_SUPPORTED_MASK,
326 .firmware_version = 0, 315 .firmware_version = 0,
327 .identity = "wdrtas" 316 .identity = "wdrtas",
328 }; 317 };
329 318
330 switch (cmd) { 319 switch (cmd) {
@@ -357,9 +346,9 @@ wdrtas_ioctl(struct inode *inode, struct file *file,
357 wdrtas_timer_keepalive(); 346 wdrtas_timer_keepalive();
358 wdrtas_timer_start(); 347 wdrtas_timer_start();
359 } 348 }
349 /* not implemented. Done by H8
360 if (i & WDIOS_TEMPPANIC) { 350 if (i & WDIOS_TEMPPANIC) {
361 /* not implemented. Done by H8 */ 351 } */
362 }
363 return 0; 352 return 0;
364 353
365 case WDIOC_KEEPALIVE: 354 case WDIOC_KEEPALIVE:
@@ -399,8 +388,7 @@ wdrtas_ioctl(struct inode *inode, struct file *file,
399 * 388 *
400 * function called when watchdog device is opened 389 * function called when watchdog device is opened
401 */ 390 */
402static int 391static int wdrtas_open(struct inode *inode, struct file *file)
403wdrtas_open(struct inode *inode, struct file *file)
404{ 392{
405 /* only open once */ 393 /* only open once */
406 if (atomic_inc_return(&wdrtas_miscdev_open) > 1) { 394 if (atomic_inc_return(&wdrtas_miscdev_open) > 1) {
@@ -423,8 +411,7 @@ wdrtas_open(struct inode *inode, struct file *file)
423 * 411 *
424 * close function. Always succeeds 412 * close function. Always succeeds
425 */ 413 */
426static int 414static int wdrtas_close(struct inode *inode, struct file *file)
427wdrtas_close(struct inode *inode, struct file *file)
428{ 415{
429 /* only stop watchdog, if this was announced using 'V' before */ 416 /* only stop watchdog, if this was announced using 'V' before */
430 if (wdrtas_expect_close == WDRTAS_MAGIC_CHAR) 417 if (wdrtas_expect_close == WDRTAS_MAGIC_CHAR)
@@ -453,8 +440,7 @@ wdrtas_close(struct inode *inode, struct file *file)
453 * wdrtas_temp_read gives the temperature to the users by copying this 440 * wdrtas_temp_read gives the temperature to the users by copying this
454 * value as one byte into the user space buffer. The unit is Fahrenheit... 441 * value as one byte into the user space buffer. The unit is Fahrenheit...
455 */ 442 */
456static ssize_t 443static ssize_t wdrtas_temp_read(struct file *file, char __user *buf,
457wdrtas_temp_read(struct file *file, char __user *buf,
458 size_t count, loff_t *ppos) 444 size_t count, loff_t *ppos)
459{ 445{
460 int temperature = 0; 446 int temperature = 0;
@@ -478,8 +464,7 @@ wdrtas_temp_read(struct file *file, char __user *buf,
478 * 464 *
479 * function called when temperature device is opened 465 * function called when temperature device is opened
480 */ 466 */
481static int 467static int wdrtas_temp_open(struct inode *inode, struct file *file)
482wdrtas_temp_open(struct inode *inode, struct file *file)
483{ 468{
484 return nonseekable_open(inode, file); 469 return nonseekable_open(inode, file);
485} 470}
@@ -493,8 +478,7 @@ wdrtas_temp_open(struct inode *inode, struct file *file)
493 * 478 *
494 * close function. Always succeeds 479 * close function. Always succeeds
495 */ 480 */
496static int 481static int wdrtas_temp_close(struct inode *inode, struct file *file)
497wdrtas_temp_close(struct inode *inode, struct file *file)
498{ 482{
499 return 0; 483 return 0;
500} 484}
@@ -509,10 +493,10 @@ wdrtas_temp_close(struct inode *inode, struct file *file)
509 * 493 *
510 * wdrtas_reboot stops the watchdog in case of a reboot 494 * wdrtas_reboot stops the watchdog in case of a reboot
511 */ 495 */
512static int 496static int wdrtas_reboot(struct notifier_block *this,
513wdrtas_reboot(struct notifier_block *this, unsigned long code, void *ptr) 497 unsigned long code, void *ptr)
514{ 498{
515 if ( (code==SYS_DOWN) || (code==SYS_HALT) ) 499 if (code == SYS_DOWN || code == SYS_HALT)
516 wdrtas_timer_stop(); 500 wdrtas_timer_stop();
517 501
518 return NOTIFY_DONE; 502 return NOTIFY_DONE;
@@ -524,7 +508,7 @@ static const struct file_operations wdrtas_fops = {
524 .owner = THIS_MODULE, 508 .owner = THIS_MODULE,
525 .llseek = no_llseek, 509 .llseek = no_llseek,
526 .write = wdrtas_write, 510 .write = wdrtas_write,
527 .ioctl = wdrtas_ioctl, 511 .unlocked_ioctl = wdrtas_ioctl,
528 .open = wdrtas_open, 512 .open = wdrtas_open,
529 .release = wdrtas_close, 513 .release = wdrtas_close,
530}; 514};
@@ -562,8 +546,7 @@ static struct notifier_block wdrtas_notifier = {
562 * this watchdog driver. It tolerates, if "get-sensor-state" and 546 * this watchdog driver. It tolerates, if "get-sensor-state" and
563 * "ibm,get-system-parameter" are not available. 547 * "ibm,get-system-parameter" are not available.
564 */ 548 */
565static int 549static int wdrtas_get_tokens(void)
566wdrtas_get_tokens(void)
567{ 550{
568 wdrtas_token_get_sensor_state = rtas_token("get-sensor-state"); 551 wdrtas_token_get_sensor_state = rtas_token("get-sensor-state");
569 if (wdrtas_token_get_sensor_state == RTAS_UNKNOWN_SERVICE) { 552 if (wdrtas_token_get_sensor_state == RTAS_UNKNOWN_SERVICE) {
@@ -603,8 +586,7 @@ wdrtas_get_tokens(void)
603 * wdrtas_register_devs unregisters the watchdog and temperature watchdog 586 * wdrtas_register_devs unregisters the watchdog and temperature watchdog
604 * misc devs 587 * misc devs
605 */ 588 */
606static void 589static void wdrtas_unregister_devs(void)
607wdrtas_unregister_devs(void)
608{ 590{
609 misc_deregister(&wdrtas_miscdev); 591 misc_deregister(&wdrtas_miscdev);
610 if (wdrtas_token_get_sensor_state != RTAS_UNKNOWN_SERVICE) 592 if (wdrtas_token_get_sensor_state != RTAS_UNKNOWN_SERVICE)
@@ -619,8 +601,7 @@ wdrtas_unregister_devs(void)
619 * wdrtas_register_devs registers the watchdog and temperature watchdog 601 * wdrtas_register_devs registers the watchdog and temperature watchdog
620 * misc devs 602 * misc devs
621 */ 603 */
622static int 604static int wdrtas_register_devs(void)
623wdrtas_register_devs(void)
624{ 605{
625 int result; 606 int result;
626 607
@@ -651,8 +632,7 @@ wdrtas_register_devs(void)
651 * 632 *
652 * registers the file handlers and the reboot notifier 633 * registers the file handlers and the reboot notifier
653 */ 634 */
654static int __init 635static int __init wdrtas_init(void)
655wdrtas_init(void)
656{ 636{
657 if (wdrtas_get_tokens()) 637 if (wdrtas_get_tokens())
658 return -ENODEV; 638 return -ENODEV;
@@ -680,8 +660,7 @@ wdrtas_init(void)
680 * 660 *
681 * unregisters the file handlers and the reboot notifier 661 * unregisters the file handlers and the reboot notifier
682 */ 662 */
683static void __exit 663static void __exit wdrtas_exit(void)
684wdrtas_exit(void)
685{ 664{
686 if (!wdrtas_nowayout) 665 if (!wdrtas_nowayout)
687 wdrtas_timer_stop(); 666 wdrtas_timer_stop();
diff --git a/drivers/watchdog/wdt.c b/drivers/watchdog/wdt.c
index 756fb15fdce7..deeebb2b13ea 100644
--- a/drivers/watchdog/wdt.c
+++ b/drivers/watchdog/wdt.c
@@ -24,9 +24,10 @@
24 * Matt Crocker). 24 * Matt Crocker).
25 * Alan Cox : Added wdt= boot option 25 * Alan Cox : Added wdt= boot option
26 * Alan Cox : Cleaned up copy/user stuff 26 * Alan Cox : Cleaned up copy/user stuff
27 * Tim Hockin : Added insmod parameters, comment cleanup 27 * Tim Hockin : Added insmod parameters, comment
28 * Parameterized timeout 28 * cleanup, parameterized timeout
29 * Tigran Aivazian : Restructured wdt_init() to handle failures 29 * Tigran Aivazian : Restructured wdt_init() to handle
30 * failures
30 * Joel Becker : Added WDIOC_GET/SETTIMEOUT 31 * Joel Becker : Added WDIOC_GET/SETTIMEOUT
31 * Matt Domsch : Added nowayout module option 32 * Matt Domsch : Added nowayout module option
32 */ 33 */
@@ -42,9 +43,9 @@
42#include <linux/notifier.h> 43#include <linux/notifier.h>
43#include <linux/reboot.h> 44#include <linux/reboot.h>
44#include <linux/init.h> 45#include <linux/init.h>
46#include <linux/io.h>
47#include <linux/uaccess.h>
45 48
46#include <asm/io.h>
47#include <asm/uaccess.h>
48#include <asm/system.h> 49#include <asm/system.h>
49#include "wd501p.h" 50#include "wd501p.h"
50 51
@@ -60,15 +61,19 @@ static char expect_close;
60static int heartbeat = WD_TIMO; 61static int heartbeat = WD_TIMO;
61static int wd_heartbeat; 62static int wd_heartbeat;
62module_param(heartbeat, int, 0); 63module_param(heartbeat, int, 0);
63MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. (0<heartbeat<65536, default=" __MODULE_STRING(WD_TIMO) ")"); 64MODULE_PARM_DESC(heartbeat,
65 "Watchdog heartbeat in seconds. (0 < heartbeat < 65536, default="
66 __MODULE_STRING(WD_TIMO) ")");
64 67
65static int nowayout = WATCHDOG_NOWAYOUT; 68static int nowayout = WATCHDOG_NOWAYOUT;
66module_param(nowayout, int, 0); 69module_param(nowayout, int, 0);
67MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 70MODULE_PARM_DESC(nowayout,
71 "Watchdog cannot be stopped once started (default="
72 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
68 73
69/* You must set these - there is no sane way to probe for this board. */ 74/* You must set these - there is no sane way to probe for this board. */
70static int io=0x240; 75static int io = 0x240;
71static int irq=11; 76static int irq = 11;
72 77
73static DEFINE_SPINLOCK(wdt_lock); 78static DEFINE_SPINLOCK(wdt_lock);
74 79
@@ -82,7 +87,8 @@ MODULE_PARM_DESC(irq, "WDT irq (default=11)");
82static int tachometer; 87static int tachometer;
83 88
84module_param(tachometer, int, 0); 89module_param(tachometer, int, 0);
85MODULE_PARM_DESC(tachometer, "WDT501-P Fan Tachometer support (0=disable, default=0)"); 90MODULE_PARM_DESC(tachometer,
91 "WDT501-P Fan Tachometer support (0=disable, default=0)");
86#endif /* CONFIG_WDT_501 */ 92#endif /* CONFIG_WDT_501 */
87 93
88/* 94/*
@@ -91,9 +97,9 @@ MODULE_PARM_DESC(tachometer, "WDT501-P Fan Tachometer support (0=disable, defaul
91 97
92static void wdt_ctr_mode(int ctr, int mode) 98static void wdt_ctr_mode(int ctr, int mode)
93{ 99{
94 ctr<<=6; 100 ctr <<= 6;
95 ctr|=0x30; 101 ctr |= 0x30;
96 ctr|=(mode<<1); 102 ctr |= (mode << 1);
97 outb_p(ctr, WDT_CR); 103 outb_p(ctr, WDT_CR);
98} 104}
99 105
@@ -114,12 +120,15 @@ static int wdt_start(void)
114 unsigned long flags; 120 unsigned long flags;
115 spin_lock_irqsave(&wdt_lock, flags); 121 spin_lock_irqsave(&wdt_lock, flags);
116 inb_p(WDT_DC); /* Disable watchdog */ 122 inb_p(WDT_DC); /* Disable watchdog */
117 wdt_ctr_mode(0,3); /* Program CTR0 for Mode 3: Square Wave Generator */ 123 wdt_ctr_mode(0, 3); /* Program CTR0 for Mode 3:
118 wdt_ctr_mode(1,2); /* Program CTR1 for Mode 2: Rate Generator */ 124 Square Wave Generator */
119 wdt_ctr_mode(2,0); /* Program CTR2 for Mode 0: Pulse on Terminal Count */ 125 wdt_ctr_mode(1, 2); /* Program CTR1 for Mode 2:
126 Rate Generator */
127 wdt_ctr_mode(2, 0); /* Program CTR2 for Mode 0:
128 Pulse on Terminal Count */
120 wdt_ctr_load(0, 8948); /* Count at 100Hz */ 129 wdt_ctr_load(0, 8948); /* Count at 100Hz */
121 wdt_ctr_load(1,wd_heartbeat); /* Heartbeat */ 130 wdt_ctr_load(1, wd_heartbeat); /* Heartbeat */
122 wdt_ctr_load(2,65535); /* Length of reset pulse */ 131 wdt_ctr_load(2, 65535); /* Length of reset pulse */
123 outb_p(0, WDT_DC); /* Enable watchdog */ 132 outb_p(0, WDT_DC); /* Enable watchdog */
124 spin_unlock_irqrestore(&wdt_lock, flags); 133 spin_unlock_irqrestore(&wdt_lock, flags);
125 return 0; 134 return 0;
@@ -131,13 +140,13 @@ static int wdt_start(void)
131 * Stop the watchdog driver. 140 * Stop the watchdog driver.
132 */ 141 */
133 142
134static int wdt_stop (void) 143static int wdt_stop(void)
135{ 144{
136 unsigned long flags; 145 unsigned long flags;
137 spin_lock_irqsave(&wdt_lock, flags); 146 spin_lock_irqsave(&wdt_lock, flags);
138 /* Turn the card off */ 147 /* Turn the card off */
139 inb_p(WDT_DC); /* Disable watchdog */ 148 inb_p(WDT_DC); /* Disable watchdog */
140 wdt_ctr_load(2,0); /* 0 length reset pulses now */ 149 wdt_ctr_load(2, 0); /* 0 length reset pulses now */
141 spin_unlock_irqrestore(&wdt_lock, flags); 150 spin_unlock_irqrestore(&wdt_lock, flags);
142 return 0; 151 return 0;
143} 152}
@@ -145,8 +154,8 @@ static int wdt_stop (void)
145/** 154/**
146 * wdt_ping: 155 * wdt_ping:
147 * 156 *
148 * Reload counter one with the watchdog heartbeat. We don't bother reloading 157 * Reload counter one with the watchdog heartbeat. We don't bother
149 * the cascade counter. 158 * reloading the cascade counter.
150 */ 159 */
151 160
152static int wdt_ping(void) 161static int wdt_ping(void)
@@ -155,8 +164,9 @@ static int wdt_ping(void)
155 spin_lock_irqsave(&wdt_lock, flags); 164 spin_lock_irqsave(&wdt_lock, flags);
156 /* Write a watchdog value */ 165 /* Write a watchdog value */
157 inb_p(WDT_DC); /* Disable watchdog */ 166 inb_p(WDT_DC); /* Disable watchdog */
158 wdt_ctr_mode(1,2); /* Re-Program CTR1 for Mode 2: Rate Generator */ 167 wdt_ctr_mode(1, 2); /* Re-Program CTR1 for Mode 2:
159 wdt_ctr_load(1,wd_heartbeat); /* Heartbeat */ 168 Rate Generator */
169 wdt_ctr_load(1, wd_heartbeat); /* Heartbeat */
160 outb_p(0, WDT_DC); /* Enable watchdog */ 170 outb_p(0, WDT_DC); /* Enable watchdog */
161 spin_unlock_irqrestore(&wdt_lock, flags); 171 spin_unlock_irqrestore(&wdt_lock, flags);
162 return 0; 172 return 0;
@@ -166,13 +176,14 @@ static int wdt_ping(void)
166 * wdt_set_heartbeat: 176 * wdt_set_heartbeat:
167 * @t: the new heartbeat value that needs to be set. 177 * @t: the new heartbeat value that needs to be set.
168 * 178 *
169 * Set a new heartbeat value for the watchdog device. If the heartbeat value is 179 * Set a new heartbeat value for the watchdog device. If the heartbeat
170 * incorrect we keep the old value and return -EINVAL. If successfull we 180 * value is incorrect we keep the old value and return -EINVAL. If
171 * return 0. 181 * successful we return 0.
172 */ 182 */
183
173static int wdt_set_heartbeat(int t) 184static int wdt_set_heartbeat(int t)
174{ 185{
175 if ((t < 1) || (t > 65535)) 186 if (t < 1 || t > 65535)
176 return -EINVAL; 187 return -EINVAL;
177 188
178 heartbeat = t; 189 heartbeat = t;
@@ -200,7 +211,7 @@ static int wdt_get_status(int *status)
200 new_status = inb_p(WDT_SR); 211 new_status = inb_p(WDT_SR);
201 spin_unlock_irqrestore(&wdt_lock, flags); 212 spin_unlock_irqrestore(&wdt_lock, flags);
202 213
203 *status=0; 214 *status = 0;
204 if (new_status & WDC_SR_ISOI0) 215 if (new_status & WDC_SR_ISOI0)
205 *status |= WDIOF_EXTERN1; 216 *status |= WDIOF_EXTERN1;
206 if (new_status & WDC_SR_ISII1) 217 if (new_status & WDC_SR_ISII1)
@@ -266,7 +277,7 @@ static irqreturn_t wdt_interrupt(int irq, void *dev_id)
266 277
267#ifdef CONFIG_WDT_501 278#ifdef CONFIG_WDT_501
268 if (!(status & WDC_SR_TGOOD)) 279 if (!(status & WDC_SR_TGOOD))
269 printk(KERN_CRIT "Overheat alarm.(%d)\n",inb_p(WDT_RT)); 280 printk(KERN_CRIT "Overheat alarm.(%d)\n", inb_p(WDT_RT));
270 if (!(status & WDC_SR_PSUOVER)) 281 if (!(status & WDC_SR_PSUOVER))
271 printk(KERN_CRIT "PSU over voltage.\n"); 282 printk(KERN_CRIT "PSU over voltage.\n");
272 if (!(status & WDC_SR_PSUUNDR)) 283 if (!(status & WDC_SR_PSUUNDR))
@@ -304,9 +315,10 @@ static irqreturn_t wdt_interrupt(int irq, void *dev_id)
304 * write of data will do, as we we don't define content meaning. 315 * write of data will do, as we we don't define content meaning.
305 */ 316 */
306 317
307static ssize_t wdt_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) 318static ssize_t wdt_write(struct file *file, const char __user *buf,
319 size_t count, loff_t *ppos)
308{ 320{
309 if(count) { 321 if (count) {
310 if (!nowayout) { 322 if (!nowayout) {
311 size_t i; 323 size_t i;
312 324
@@ -328,7 +340,6 @@ static ssize_t wdt_write(struct file *file, const char __user *buf, size_t count
328 340
329/** 341/**
330 * wdt_ioctl: 342 * wdt_ioctl:
331 * @inode: inode of the device
332 * @file: file handle to the device 343 * @file: file handle to the device
333 * @cmd: watchdog command 344 * @cmd: watchdog command
334 * @arg: argument pointer 345 * @arg: argument pointer
@@ -338,8 +349,7 @@ static ssize_t wdt_write(struct file *file, const char __user *buf, size_t count
338 * querying capabilities and current status. 349 * querying capabilities and current status.
339 */ 350 */
340 351
341static int wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, 352static long wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
342 unsigned long arg)
343{ 353{
344 void __user *argp = (void __user *)arg; 354 void __user *argp = (void __user *)arg;
345 int __user *p = argp; 355 int __user *p = argp;
@@ -362,32 +372,28 @@ static int wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
362 ident.options |= WDIOF_FANFAULT; 372 ident.options |= WDIOF_FANFAULT;
363#endif /* CONFIG_WDT_501 */ 373#endif /* CONFIG_WDT_501 */
364 374
365 switch(cmd) 375 switch (cmd) {
366 { 376 case WDIOC_GETSUPPORT:
367 default: 377 return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
368 return -ENOTTY; 378 case WDIOC_GETSTATUS:
369 case WDIOC_GETSUPPORT: 379 wdt_get_status(&status);
370 return copy_to_user(argp, &ident, sizeof(ident))?-EFAULT:0; 380 return put_user(status, p);
371 381 case WDIOC_GETBOOTSTATUS:
372 case WDIOC_GETSTATUS: 382 return put_user(0, p);
373 wdt_get_status(&status); 383 case WDIOC_KEEPALIVE:
374 return put_user(status, p); 384 wdt_ping();
375 case WDIOC_GETBOOTSTATUS: 385 return 0;
376 return put_user(0, p); 386 case WDIOC_SETTIMEOUT:
377 case WDIOC_KEEPALIVE: 387 if (get_user(new_heartbeat, p))
378 wdt_ping(); 388 return -EFAULT;
379 return 0; 389 if (wdt_set_heartbeat(new_heartbeat))
380 case WDIOC_SETTIMEOUT: 390 return -EINVAL;
381 if (get_user(new_heartbeat, p)) 391 wdt_ping();
382 return -EFAULT; 392 /* Fall */
383 393 case WDIOC_GETTIMEOUT:
384 if (wdt_set_heartbeat(new_heartbeat)) 394 return put_user(heartbeat, p);
385 return -EINVAL; 395 default:
386 396 return -ENOTTY;
387 wdt_ping();
388 /* Fall */
389 case WDIOC_GETTIMEOUT:
390 return put_user(heartbeat, p);
391 } 397 }
392} 398}
393 399
@@ -405,7 +411,7 @@ static int wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
405 411
406static int wdt_open(struct inode *inode, struct file *file) 412static int wdt_open(struct inode *inode, struct file *file)
407{ 413{
408 if(test_and_set_bit(0, &wdt_is_open)) 414 if (test_and_set_bit(0, &wdt_is_open))
409 return -EBUSY; 415 return -EBUSY;
410 /* 416 /*
411 * Activate 417 * Activate
@@ -432,7 +438,8 @@ static int wdt_release(struct inode *inode, struct file *file)
432 wdt_stop(); 438 wdt_stop();
433 clear_bit(0, &wdt_is_open); 439 clear_bit(0, &wdt_is_open);
434 } else { 440 } else {
435 printk(KERN_CRIT "wdt: WDT device closed unexpectedly. WDT will not stop!\n"); 441 printk(KERN_CRIT
442 "wdt: WDT device closed unexpectedly. WDT will not stop!\n");
436 wdt_ping(); 443 wdt_ping();
437 } 444 }
438 expect_close = 0; 445 expect_close = 0;
@@ -451,14 +458,15 @@ static int wdt_release(struct inode *inode, struct file *file)
451 * farenheit. It was designed by an imperial measurement luddite. 458 * farenheit. It was designed by an imperial measurement luddite.
452 */ 459 */
453 460
454static ssize_t wdt_temp_read(struct file *file, char __user *buf, size_t count, loff_t *ptr) 461static ssize_t wdt_temp_read(struct file *file, char __user *buf,
462 size_t count, loff_t *ptr)
455{ 463{
456 int temperature; 464 int temperature;
457 465
458 if (wdt_get_temperature(&temperature)) 466 if (wdt_get_temperature(&temperature))
459 return -EFAULT; 467 return -EFAULT;
460 468
461 if (copy_to_user (buf, &temperature, 1)) 469 if (copy_to_user(buf, &temperature, 1))
462 return -EFAULT; 470 return -EFAULT;
463 471
464 return 1; 472 return 1;
@@ -506,10 +514,8 @@ static int wdt_temp_release(struct inode *inode, struct file *file)
506static int wdt_notify_sys(struct notifier_block *this, unsigned long code, 514static int wdt_notify_sys(struct notifier_block *this, unsigned long code,
507 void *unused) 515 void *unused)
508{ 516{
509 if(code==SYS_DOWN || code==SYS_HALT) { 517 if (code == SYS_DOWN || code == SYS_HALT)
510 /* Turn the card off */
511 wdt_stop(); 518 wdt_stop();
512 }
513 return NOTIFY_DONE; 519 return NOTIFY_DONE;
514} 520}
515 521
@@ -522,7 +528,7 @@ static const struct file_operations wdt_fops = {
522 .owner = THIS_MODULE, 528 .owner = THIS_MODULE,
523 .llseek = no_llseek, 529 .llseek = no_llseek,
524 .write = wdt_write, 530 .write = wdt_write,
525 .ioctl = wdt_ioctl, 531 .unlocked_ioctl = wdt_ioctl,
526 .open = wdt_open, 532 .open = wdt_open,
527 .release = wdt_release, 533 .release = wdt_release,
528}; 534};
@@ -576,7 +582,7 @@ static void __exit wdt_exit(void)
576#endif /* CONFIG_WDT_501 */ 582#endif /* CONFIG_WDT_501 */
577 unregister_reboot_notifier(&wdt_notifier); 583 unregister_reboot_notifier(&wdt_notifier);
578 free_irq(irq, NULL); 584 free_irq(irq, NULL);
579 release_region(io,8); 585 release_region(io, 8);
580} 586}
581 587
582/** 588/**
@@ -591,44 +597,49 @@ static int __init wdt_init(void)
591{ 597{
592 int ret; 598 int ret;
593 599
594 /* Check that the heartbeat value is within it's range ; if not reset to the default */ 600 /* Check that the heartbeat value is within it's range;
601 if not reset to the default */
595 if (wdt_set_heartbeat(heartbeat)) { 602 if (wdt_set_heartbeat(heartbeat)) {
596 wdt_set_heartbeat(WD_TIMO); 603 wdt_set_heartbeat(WD_TIMO);
597 printk(KERN_INFO "wdt: heartbeat value must be 0<heartbeat<65536, using %d\n", 604 printk(KERN_INFO "wdt: heartbeat value must be 0 < heartbeat < 65536, using %d\n",
598 WD_TIMO); 605 WD_TIMO);
599 } 606 }
600 607
601 if (!request_region(io, 8, "wdt501p")) { 608 if (!request_region(io, 8, "wdt501p")) {
602 printk(KERN_ERR "wdt: I/O address 0x%04x already in use\n", io); 609 printk(KERN_ERR
610 "wdt: I/O address 0x%04x already in use\n", io);
603 ret = -EBUSY; 611 ret = -EBUSY;
604 goto out; 612 goto out;
605 } 613 }
606 614
607 ret = request_irq(irq, wdt_interrupt, IRQF_DISABLED, "wdt501p", NULL); 615 ret = request_irq(irq, wdt_interrupt, IRQF_DISABLED, "wdt501p", NULL);
608 if(ret) { 616 if (ret) {
609 printk(KERN_ERR "wdt: IRQ %d is not free.\n", irq); 617 printk(KERN_ERR "wdt: IRQ %d is not free.\n", irq);
610 goto outreg; 618 goto outreg;
611 } 619 }
612 620
613 ret = register_reboot_notifier(&wdt_notifier); 621 ret = register_reboot_notifier(&wdt_notifier);
614 if(ret) { 622 if (ret) {
615 printk(KERN_ERR "wdt: cannot register reboot notifier (err=%d)\n", ret); 623 printk(KERN_ERR
624 "wdt: cannot register reboot notifier (err=%d)\n", ret);
616 goto outirq; 625 goto outirq;
617 } 626 }
618 627
619#ifdef CONFIG_WDT_501 628#ifdef CONFIG_WDT_501
620 ret = misc_register(&temp_miscdev); 629 ret = misc_register(&temp_miscdev);
621 if (ret) { 630 if (ret) {
622 printk(KERN_ERR "wdt: cannot register miscdev on minor=%d (err=%d)\n", 631 printk(KERN_ERR
623 TEMP_MINOR, ret); 632 "wdt: cannot register miscdev on minor=%d (err=%d)\n",
633 TEMP_MINOR, ret);
624 goto outrbt; 634 goto outrbt;
625 } 635 }
626#endif /* CONFIG_WDT_501 */ 636#endif /* CONFIG_WDT_501 */
627 637
628 ret = misc_register(&wdt_miscdev); 638 ret = misc_register(&wdt_miscdev);
629 if (ret) { 639 if (ret) {
630 printk(KERN_ERR "wdt: cannot register miscdev on minor=%d (err=%d)\n", 640 printk(KERN_ERR
631 WATCHDOG_MINOR, ret); 641 "wdt: cannot register miscdev on minor=%d (err=%d)\n",
642 WATCHDOG_MINOR, ret);
632 goto outmisc; 643 goto outmisc;
633 } 644 }
634 645
@@ -636,7 +647,8 @@ static int __init wdt_init(void)
636 printk(KERN_INFO "WDT500/501-P driver 0.10 at 0x%04x (Interrupt %d). heartbeat=%d sec (nowayout=%d)\n", 647 printk(KERN_INFO "WDT500/501-P driver 0.10 at 0x%04x (Interrupt %d). heartbeat=%d sec (nowayout=%d)\n",
637 io, irq, heartbeat, nowayout); 648 io, irq, heartbeat, nowayout);
638#ifdef CONFIG_WDT_501 649#ifdef CONFIG_WDT_501
639 printk(KERN_INFO "wdt: Fan Tachometer is %s\n", (tachometer ? "Enabled" : "Disabled")); 650 printk(KERN_INFO "wdt: Fan Tachometer is %s\n",
651 (tachometer ? "Enabled" : "Disabled"));
640#endif /* CONFIG_WDT_501 */ 652#endif /* CONFIG_WDT_501 */
641 653
642out: 654out:
@@ -651,7 +663,7 @@ outrbt:
651outirq: 663outirq:
652 free_irq(irq, NULL); 664 free_irq(irq, NULL);
653outreg: 665outreg:
654 release_region(io,8); 666 release_region(io, 8);
655 goto out; 667 goto out;
656} 668}
657 669
diff --git a/drivers/watchdog/wdt285.c b/drivers/watchdog/wdt285.c
index e4cf661dc890..c8d7f1b2df02 100644
--- a/drivers/watchdog/wdt285.c
+++ b/drivers/watchdog/wdt285.c
@@ -26,10 +26,10 @@
26#include <linux/reboot.h> 26#include <linux/reboot.h>
27#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/interrupt.h> 28#include <linux/interrupt.h>
29#include <linux/uaccess.h>
30#include <linux/irq.h>
31#include <mach/hardware.h>
29 32
30#include <asm/irq.h>
31#include <asm/uaccess.h>
32#include <asm/hardware.h>
33#include <asm/mach-types.h> 33#include <asm/mach-types.h>
34#include <asm/hardware/dec21285.h> 34#include <asm/hardware/dec21285.h>
35 35
@@ -115,8 +115,8 @@ static int watchdog_release(struct inode *inode, struct file *file)
115 return 0; 115 return 0;
116} 116}
117 117
118static ssize_t 118static ssize_t watchdog_write(struct file *file, const char *data,
119watchdog_write(struct file *file, const char *data, size_t len, loff_t *ppos) 119 size_t len, loff_t *ppos)
120{ 120{
121 /* 121 /*
122 * Refresh the timer. 122 * Refresh the timer.
@@ -127,19 +127,18 @@ watchdog_write(struct file *file, const char *data, size_t len, loff_t *ppos)
127 return len; 127 return len;
128} 128}
129 129
130static struct watchdog_info ident = { 130static const struct watchdog_info ident = {
131 .options = WDIOF_SETTIMEOUT, 131 .options = WDIOF_SETTIMEOUT,
132 .identity = "Footbridge Watchdog", 132 .identity = "Footbridge Watchdog",
133}; 133};
134 134
135static int 135static long watchdog_ioctl(struct file *file, unsigned int cmd,
136watchdog_ioctl(struct inode *inode, struct file *file, unsigned int cmd, 136 unsigned long arg)
137 unsigned long arg)
138{ 137{
139 unsigned int new_margin; 138 unsigned int new_margin;
140 int ret = -ENOTTY; 139 int ret = -ENOTTY;
141 140
142 switch(cmd) { 141 switch (cmd) {
143 case WDIOC_GETSUPPORT: 142 case WDIOC_GETSUPPORT:
144 ret = 0; 143 ret = 0;
145 if (copy_to_user((void *)arg, &ident, sizeof(ident))) 144 if (copy_to_user((void *)arg, &ident, sizeof(ident)))
@@ -148,7 +147,7 @@ watchdog_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
148 147
149 case WDIOC_GETSTATUS: 148 case WDIOC_GETSTATUS:
150 case WDIOC_GETBOOTSTATUS: 149 case WDIOC_GETBOOTSTATUS:
151 ret = put_user(0,(int *)arg); 150 ret = put_user(0, (int *)arg);
152 break; 151 break;
153 152
154 case WDIOC_KEEPALIVE: 153 case WDIOC_KEEPALIVE:
@@ -182,7 +181,7 @@ static const struct file_operations watchdog_fops = {
182 .owner = THIS_MODULE, 181 .owner = THIS_MODULE,
183 .llseek = no_llseek, 182 .llseek = no_llseek,
184 .write = watchdog_write, 183 .write = watchdog_write,
185 .ioctl = watchdog_ioctl, 184 .unlocked_ioctl = watchdog_ioctl,
186 .open = watchdog_open, 185 .open = watchdog_open,
187 .release = watchdog_release, 186 .release = watchdog_release,
188}; 187};
@@ -204,11 +203,13 @@ static int __init footbridge_watchdog_init(void)
204 if (retval < 0) 203 if (retval < 0)
205 return retval; 204 return retval;
206 205
207 printk("Footbridge Watchdog Timer: 0.01, timer margin: %d sec\n", 206 printk(KERN_INFO
208 soft_margin); 207 "Footbridge Watchdog Timer: 0.01, timer margin: %d sec\n",
208 soft_margin);
209 209
210 if (machine_is_cats()) 210 if (machine_is_cats())
211 printk("Warning: Watchdog reset may not work on this machine.\n"); 211 printk(KERN_WARN
212 "Warning: Watchdog reset may not work on this machine.\n");
212 return 0; 213 return 0;
213} 214}
214 215
@@ -223,7 +224,7 @@ MODULE_LICENSE("GPL");
223MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); 224MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
224 225
225module_param(soft_margin, int, 0); 226module_param(soft_margin, int, 0);
226MODULE_PARM_DESC(soft_margin,"Watchdog timeout in seconds"); 227MODULE_PARM_DESC(soft_margin, "Watchdog timeout in seconds");
227 228
228module_init(footbridge_watchdog_init); 229module_init(footbridge_watchdog_init);
229module_exit(footbridge_watchdog_exit); 230module_exit(footbridge_watchdog_exit);
diff --git a/drivers/watchdog/wdt977.c b/drivers/watchdog/wdt977.c
index fb4b876c9fda..60e28d49ff52 100644
--- a/drivers/watchdog/wdt977.c
+++ b/drivers/watchdog/wdt977.c
@@ -19,7 +19,8 @@
19 * 07-Jul-2003 Daniele Bellucci: Audit return code of misc_register in 19 * 07-Jul-2003 Daniele Bellucci: Audit return code of misc_register in
20 * nwwatchdog_init. 20 * nwwatchdog_init.
21 * 25-Oct-2005 Woody Suwalski: Convert addresses to #defs, add spinlocks 21 * 25-Oct-2005 Woody Suwalski: Convert addresses to #defs, add spinlocks
22 * remove limitiation to be used on Netwinders only 22 * remove limitiation to be used on
23 * Netwinders only
23 */ 24 */
24 25
25#include <linux/module.h> 26#include <linux/module.h>
@@ -33,11 +34,11 @@
33#include <linux/watchdog.h> 34#include <linux/watchdog.h>
34#include <linux/notifier.h> 35#include <linux/notifier.h>
35#include <linux/reboot.h> 36#include <linux/reboot.h>
37#include <linux/io.h>
38#include <linux/uaccess.h>
36 39
37#include <asm/io.h>
38#include <asm/system.h> 40#include <asm/system.h>
39#include <asm/mach-types.h> 41#include <asm/mach-types.h>
40#include <asm/uaccess.h>
41 42
42#define WATCHDOG_VERSION "0.04" 43#define WATCHDOG_VERSION "0.04"
43#define WATCHDOG_NAME "Wdt977" 44#define WATCHDOG_NAME "Wdt977"
@@ -45,7 +46,7 @@
45#define DRIVER_VERSION WATCHDOG_NAME " driver, v" WATCHDOG_VERSION "\n" 46#define DRIVER_VERSION WATCHDOG_NAME " driver, v" WATCHDOG_VERSION "\n"
46 47
47#define IO_INDEX_PORT 0x370 /* on some systems it can be 0x3F0 */ 48#define IO_INDEX_PORT 0x370 /* on some systems it can be 0x3F0 */
48#define IO_DATA_PORT (IO_INDEX_PORT+1) 49#define IO_DATA_PORT (IO_INDEX_PORT + 1)
49 50
50#define UNLOCK_DATA 0x87 51#define UNLOCK_DATA 0x87
51#define LOCK_DATA 0xAA 52#define LOCK_DATA 0xAA
@@ -62,13 +63,16 @@ static char expect_close;
62static DEFINE_SPINLOCK(spinlock); 63static DEFINE_SPINLOCK(spinlock);
63 64
64module_param(timeout, int, 0); 65module_param(timeout, int, 0);
65MODULE_PARM_DESC(timeout,"Watchdog timeout in seconds (60..15300), default=" __MODULE_STRING(DEFAULT_TIMEOUT) ")"); 66MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds (60..15300), default="
67 __MODULE_STRING(DEFAULT_TIMEOUT) ")");
66module_param(testmode, int, 0); 68module_param(testmode, int, 0);
67MODULE_PARM_DESC(testmode,"Watchdog testmode (1 = no reboot), default=0"); 69MODULE_PARM_DESC(testmode, "Watchdog testmode (1 = no reboot), default=0");
68 70
69static int nowayout = WATCHDOG_NOWAYOUT; 71static int nowayout = WATCHDOG_NOWAYOUT;
70module_param(nowayout, int, 0); 72module_param(nowayout, int, 0);
71MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 73MODULE_PARM_DESC(nowayout,
74 "Watchdog cannot be stopped once started (default="
75 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
72 76
73/* 77/*
74 * Start the watchdog 78 * Start the watchdog
@@ -95,14 +99,16 @@ static int wdt977_start(void)
95 outb_p(0xF2, IO_INDEX_PORT); 99 outb_p(0xF2, IO_INDEX_PORT);
96 outb_p(timeoutM, IO_DATA_PORT); 100 outb_p(timeoutM, IO_DATA_PORT);
97 outb_p(0xF3, IO_INDEX_PORT); 101 outb_p(0xF3, IO_INDEX_PORT);
98 outb_p(0x00, IO_DATA_PORT); /* another setting is 0E for kbd/mouse/LED */ 102 outb_p(0x00, IO_DATA_PORT); /* another setting is 0E for
103 kbd/mouse/LED */
99 outb_p(0xF4, IO_INDEX_PORT); 104 outb_p(0xF4, IO_INDEX_PORT);
100 outb_p(0x00, IO_DATA_PORT); 105 outb_p(0x00, IO_DATA_PORT);
101 106
102 /* at last select device Aux1 (dev=7) and set GP16 as a watchdog output */ 107 /* At last select device Aux1 (dev=7) and set GP16 as a
103 /* in test mode watch the bit 1 on F4 to indicate "triggered" */ 108 * watchdog output. In test mode watch the bit 1 on F4 to
104 if (!testmode) 109 * indicate "triggered"
105 { 110 */
111 if (!testmode) {
106 outb_p(DEVICE_REGISTER, IO_INDEX_PORT); 112 outb_p(DEVICE_REGISTER, IO_INDEX_PORT);
107 outb_p(0x07, IO_DATA_PORT); 113 outb_p(0x07, IO_DATA_PORT);
108 outb_p(0xE6, IO_INDEX_PORT); 114 outb_p(0xE6, IO_INDEX_PORT);
@@ -147,7 +153,8 @@ static int wdt977_stop(void)
147 outb_p(0xF2, IO_INDEX_PORT); 153 outb_p(0xF2, IO_INDEX_PORT);
148 outb_p(0x00, IO_DATA_PORT); 154 outb_p(0x00, IO_DATA_PORT);
149 155
150 /* at last select device Aux1 (dev=7) and set GP16 as a watchdog output */ 156 /* at last select device Aux1 (dev=7) and set
157 GP16 as a watchdog output */
151 outb_p(DEVICE_REGISTER, IO_INDEX_PORT); 158 outb_p(DEVICE_REGISTER, IO_INDEX_PORT);
152 outb_p(0x07, IO_DATA_PORT); 159 outb_p(0x07, IO_DATA_PORT);
153 outb_p(0xE6, IO_INDEX_PORT); 160 outb_p(0xE6, IO_INDEX_PORT);
@@ -202,16 +209,18 @@ static int wdt977_set_timeout(int t)
202 tmrval = (t + 59) / 60; 209 tmrval = (t + 59) / 60;
203 210
204 if (machine_is_netwinder()) { 211 if (machine_is_netwinder()) {
205 /* we have a hw bug somewhere, so each 977 minute is actually only 30sec 212 /* we have a hw bug somewhere, so each 977 minute is actually
206 * this limits the max timeout to half of device max of 255 minutes... 213 * only 30sec. This limits the max timeout to half of device
214 * max of 255 minutes...
207 */ 215 */
208 tmrval += tmrval; 216 tmrval += tmrval;
209 } 217 }
210 218
211 if ((tmrval < 1) || (tmrval > 255)) 219 if (tmrval < 1 || tmrval > 255)
212 return -EINVAL; 220 return -EINVAL;
213 221
214 /* timeout is the timeout in seconds, timeoutM is the timeout in minutes) */ 222 /* timeout is the timeout in seconds, timeoutM is
223 the timeout in minutes) */
215 timeout = t; 224 timeout = t;
216 timeoutM = tmrval; 225 timeoutM = tmrval;
217 return 0; 226 return 0;
@@ -243,7 +252,7 @@ static int wdt977_get_status(int *status)
243 252
244 spin_unlock_irqrestore(&spinlock, flags); 253 spin_unlock_irqrestore(&spinlock, flags);
245 254
246 *status=0; 255 *status = 0;
247 if (new_status & 1) 256 if (new_status & 1)
248 *status |= WDIOF_CARDRESET; 257 *status |= WDIOF_CARDRESET;
249 258
@@ -258,7 +267,7 @@ static int wdt977_get_status(int *status)
258static int wdt977_open(struct inode *inode, struct file *file) 267static int wdt977_open(struct inode *inode, struct file *file)
259{ 268{
260 /* If the watchdog is alive we don't need to start it again */ 269 /* If the watchdog is alive we don't need to start it again */
261 if( test_and_set_bit(0,&timer_alive) ) 270 if (test_and_set_bit(0, &timer_alive))
262 return -EBUSY; 271 return -EBUSY;
263 272
264 if (nowayout) 273 if (nowayout)
@@ -274,13 +283,13 @@ static int wdt977_release(struct inode *inode, struct file *file)
274 * Shut off the timer. 283 * Shut off the timer.
275 * Lock it in if it's a module and we set nowayout 284 * Lock it in if it's a module and we set nowayout
276 */ 285 */
277 if (expect_close == 42) 286 if (expect_close == 42) {
278 {
279 wdt977_stop(); 287 wdt977_stop();
280 clear_bit(0,&timer_alive); 288 clear_bit(0, &timer_alive);
281 } else { 289 } else {
282 wdt977_keepalive(); 290 wdt977_keepalive();
283 printk(KERN_CRIT PFX "Unexpected close, not stopping watchdog!\n"); 291 printk(KERN_CRIT PFX
292 "Unexpected close, not stopping watchdog!\n");
284 } 293 }
285 expect_close = 0; 294 expect_close = 0;
286 return 0; 295 return 0;
@@ -301,17 +310,14 @@ static int wdt977_release(struct inode *inode, struct file *file)
301static ssize_t wdt977_write(struct file *file, const char __user *buf, 310static ssize_t wdt977_write(struct file *file, const char __user *buf,
302 size_t count, loff_t *ppos) 311 size_t count, loff_t *ppos)
303{ 312{
304 if (count) 313 if (count) {
305 { 314 if (!nowayout) {
306 if (!nowayout)
307 {
308 size_t i; 315 size_t i;
309 316
310 /* In case it was set long ago */ 317 /* In case it was set long ago */
311 expect_close = 0; 318 expect_close = 0;
312 319
313 for (i = 0; i != count; i++) 320 for (i = 0; i != count; i++) {
314 {
315 char c; 321 char c;
316 if (get_user(c, buf + i)) 322 if (get_user(c, buf + i))
317 return -EFAULT; 323 return -EFAULT;
@@ -326,6 +332,14 @@ static ssize_t wdt977_write(struct file *file, const char __user *buf,
326 return count; 332 return count;
327} 333}
328 334
335static const struct watchdog_info ident = {
336 .options = WDIOF_SETTIMEOUT |
337 WDIOF_MAGICCLOSE |
338 WDIOF_KEEPALIVEPING,
339 .firmware_version = 1,
340 .identity = WATCHDOG_NAME,
341};
342
329/* 343/*
330 * wdt977_ioctl: 344 * wdt977_ioctl:
331 * @inode: inode of the device 345 * @inode: inode of the device
@@ -337,16 +351,8 @@ static ssize_t wdt977_write(struct file *file, const char __user *buf,
337 * according to their available features. 351 * according to their available features.
338 */ 352 */
339 353
340static struct watchdog_info ident = { 354static long wdt977_ioctl(struct file *file, unsigned int cmd,
341 .options = WDIOF_SETTIMEOUT | 355 unsigned long arg)
342 WDIOF_MAGICCLOSE |
343 WDIOF_KEEPALIVEPING,
344 .firmware_version = 1,
345 .identity = WATCHDOG_NAME,
346};
347
348static int wdt977_ioctl(struct inode *inode, struct file *file,
349 unsigned int cmd, unsigned long arg)
350{ 356{
351 int status; 357 int status;
352 int new_options, retval = -EINVAL; 358 int new_options, retval = -EINVAL;
@@ -358,11 +364,7 @@ static int wdt977_ioctl(struct inode *inode, struct file *file,
358 364
359 uarg.i = (int __user *)arg; 365 uarg.i = (int __user *)arg;
360 366
361 switch(cmd) 367 switch (cmd) {
362 {
363 default:
364 return -ENOTTY;
365
366 case WDIOC_GETSUPPORT: 368 case WDIOC_GETSUPPORT:
367 return copy_to_user(uarg.ident, &ident, 369 return copy_to_user(uarg.ident, &ident,
368 sizeof(ident)) ? -EFAULT : 0; 370 sizeof(ident)) ? -EFAULT : 0;
@@ -374,12 +376,8 @@ static int wdt977_ioctl(struct inode *inode, struct file *file,
374 case WDIOC_GETBOOTSTATUS: 376 case WDIOC_GETBOOTSTATUS:
375 return put_user(0, uarg.i); 377 return put_user(0, uarg.i);
376 378
377 case WDIOC_KEEPALIVE:
378 wdt977_keepalive();
379 return 0;
380
381 case WDIOC_SETOPTIONS: 379 case WDIOC_SETOPTIONS:
382 if (get_user (new_options, uarg.i)) 380 if (get_user(new_options, uarg.i))
383 return -EFAULT; 381 return -EFAULT;
384 382
385 if (new_options & WDIOS_DISABLECARD) { 383 if (new_options & WDIOS_DISABLECARD) {
@@ -394,6 +392,10 @@ static int wdt977_ioctl(struct inode *inode, struct file *file,
394 392
395 return retval; 393 return retval;
396 394
395 case WDIOC_KEEPALIVE:
396 wdt977_keepalive();
397 return 0;
398
397 case WDIOC_SETTIMEOUT: 399 case WDIOC_SETTIMEOUT:
398 if (get_user(new_timeout, uarg.i)) 400 if (get_user(new_timeout, uarg.i))
399 return -EFAULT; 401 return -EFAULT;
@@ -407,29 +409,30 @@ static int wdt977_ioctl(struct inode *inode, struct file *file,
407 case WDIOC_GETTIMEOUT: 409 case WDIOC_GETTIMEOUT:
408 return put_user(timeout, uarg.i); 410 return put_user(timeout, uarg.i);
409 411
412 default:
413 return -ENOTTY;
414
410 } 415 }
411} 416}
412 417
413static int wdt977_notify_sys(struct notifier_block *this, unsigned long code, 418static int wdt977_notify_sys(struct notifier_block *this, unsigned long code,
414 void *unused) 419 void *unused)
415{ 420{
416 if(code==SYS_DOWN || code==SYS_HALT) 421 if (code == SYS_DOWN || code == SYS_HALT)
417 wdt977_stop(); 422 wdt977_stop();
418 return NOTIFY_DONE; 423 return NOTIFY_DONE;
419} 424}
420 425
421static const struct file_operations wdt977_fops= 426static const struct file_operations wdt977_fops = {
422{
423 .owner = THIS_MODULE, 427 .owner = THIS_MODULE,
424 .llseek = no_llseek, 428 .llseek = no_llseek,
425 .write = wdt977_write, 429 .write = wdt977_write,
426 .ioctl = wdt977_ioctl, 430 .unlocked_ioctl = wdt977_ioctl,
427 .open = wdt977_open, 431 .open = wdt977_open,
428 .release = wdt977_release, 432 .release = wdt977_release,
429}; 433};
430 434
431static struct miscdevice wdt977_miscdev= 435static struct miscdevice wdt977_miscdev = {
432{
433 .minor = WATCHDOG_MINOR, 436 .minor = WATCHDOG_MINOR,
434 .name = "watchdog", 437 .name = "watchdog",
435 .fops = &wdt977_fops, 438 .fops = &wdt977_fops,
@@ -443,51 +446,48 @@ static int __init wd977_init(void)
443{ 446{
444 int rc; 447 int rc;
445 448
446 //if (!machine_is_netwinder())
447 // return -ENODEV;
448
449 printk(KERN_INFO PFX DRIVER_VERSION); 449 printk(KERN_INFO PFX DRIVER_VERSION);
450 450
451 /* Check that the timeout value is within it's range ; if not reset to the default */ 451 /* Check that the timeout value is within its range;
452 if (wdt977_set_timeout(timeout)) 452 if not reset to the default */
453 { 453 if (wdt977_set_timeout(timeout)) {
454 wdt977_set_timeout(DEFAULT_TIMEOUT); 454 wdt977_set_timeout(DEFAULT_TIMEOUT);
455 printk(KERN_INFO PFX "timeout value must be 60<timeout<15300, using %d\n", 455 printk(KERN_INFO PFX
456 DEFAULT_TIMEOUT); 456 "timeout value must be 60 < timeout < 15300, using %d\n",
457 DEFAULT_TIMEOUT);
457 } 458 }
458 459
459 /* on Netwinder the IOports are already reserved by 460 /* on Netwinder the IOports are already reserved by
460 * arch/arm/mach-footbridge/netwinder-hw.c 461 * arch/arm/mach-footbridge/netwinder-hw.c
461 */ 462 */
462 if (!machine_is_netwinder()) 463 if (!machine_is_netwinder()) {
463 { 464 if (!request_region(IO_INDEX_PORT, 2, WATCHDOG_NAME)) {
464 if (!request_region(IO_INDEX_PORT, 2, WATCHDOG_NAME)) 465 printk(KERN_ERR PFX
465 { 466 "I/O address 0x%04x already in use\n",
466 printk(KERN_ERR PFX "I/O address 0x%04x already in use\n", 467 IO_INDEX_PORT);
467 IO_INDEX_PORT);
468 rc = -EIO; 468 rc = -EIO;
469 goto err_out; 469 goto err_out;
470 } 470 }
471 } 471 }
472 472
473 rc = register_reboot_notifier(&wdt977_notifier); 473 rc = register_reboot_notifier(&wdt977_notifier);
474 if (rc) 474 if (rc) {
475 { 475 printk(KERN_ERR PFX
476 printk(KERN_ERR PFX "cannot register reboot notifier (err=%d)\n", 476 "cannot register reboot notifier (err=%d)\n", rc);
477 rc);
478 goto err_out_region; 477 goto err_out_region;
479 } 478 }
480 479
481 rc = misc_register(&wdt977_miscdev); 480 rc = misc_register(&wdt977_miscdev);
482 if (rc) 481 if (rc) {
483 { 482 printk(KERN_ERR PFX
484 printk(KERN_ERR PFX "cannot register miscdev on minor=%d (err=%d)\n", 483 "cannot register miscdev on minor=%d (err=%d)\n",
485 wdt977_miscdev.minor, rc); 484 wdt977_miscdev.minor, rc);
486 goto err_out_reboot; 485 goto err_out_reboot;
487 } 486 }
488 487
489 printk(KERN_INFO PFX "initialized. timeout=%d sec (nowayout=%d, testmode=%i)\n", 488 printk(KERN_INFO PFX
490 timeout, nowayout, testmode); 489 "initialized. timeout=%d sec (nowayout=%d, testmode=%i)\n",
490 timeout, nowayout, testmode);
491 491
492 return 0; 492 return 0;
493 493
@@ -495,7 +495,7 @@ err_out_reboot:
495 unregister_reboot_notifier(&wdt977_notifier); 495 unregister_reboot_notifier(&wdt977_notifier);
496err_out_region: 496err_out_region:
497 if (!machine_is_netwinder()) 497 if (!machine_is_netwinder())
498 release_region(IO_INDEX_PORT,2); 498 release_region(IO_INDEX_PORT, 2);
499err_out: 499err_out:
500 return rc; 500 return rc;
501} 501}
@@ -505,7 +505,7 @@ static void __exit wd977_exit(void)
505 wdt977_stop(); 505 wdt977_stop();
506 misc_deregister(&wdt977_miscdev); 506 misc_deregister(&wdt977_miscdev);
507 unregister_reboot_notifier(&wdt977_notifier); 507 unregister_reboot_notifier(&wdt977_notifier);
508 release_region(IO_INDEX_PORT,2); 508 release_region(IO_INDEX_PORT, 2);
509} 509}
510 510
511module_init(wd977_init); 511module_init(wd977_init);
diff --git a/drivers/watchdog/wdt_pci.c b/drivers/watchdog/wdt_pci.c
index 1355608683e4..ed02bdb38c09 100644
--- a/drivers/watchdog/wdt_pci.c
+++ b/drivers/watchdog/wdt_pci.c
@@ -29,9 +29,11 @@
29 * JP Nollmann : Added support for PCI wdt501p 29 * JP Nollmann : Added support for PCI wdt501p
30 * Alan Cox : Split ISA and PCI cards into two drivers 30 * Alan Cox : Split ISA and PCI cards into two drivers
31 * Jeff Garzik : PCI cleanups 31 * Jeff Garzik : PCI cleanups
32 * Tigran Aivazian : Restructured wdtpci_init_one() to handle failures 32 * Tigran Aivazian : Restructured wdtpci_init_one() to handle
33 * failures
33 * Joel Becker : Added WDIOC_GET/SETTIMEOUT 34 * Joel Becker : Added WDIOC_GET/SETTIMEOUT
34 * Zwane Mwaikambo : Magic char closing, locking changes, cleanups 35 * Zwane Mwaikambo : Magic char closing, locking changes,
36 * cleanups
35 * Matt Domsch : nowayout module option 37 * Matt Domsch : nowayout module option
36 */ 38 */
37 39
@@ -42,14 +44,15 @@
42#include <linux/miscdevice.h> 44#include <linux/miscdevice.h>
43#include <linux/watchdog.h> 45#include <linux/watchdog.h>
44#include <linux/ioport.h> 46#include <linux/ioport.h>
47#include <linux/delay.h>
45#include <linux/notifier.h> 48#include <linux/notifier.h>
46#include <linux/reboot.h> 49#include <linux/reboot.h>
47#include <linux/init.h> 50#include <linux/init.h>
48#include <linux/fs.h> 51#include <linux/fs.h>
49#include <linux/pci.h> 52#include <linux/pci.h>
53#include <linux/io.h>
54#include <linux/uaccess.h>
50 55
51#include <asm/io.h>
52#include <asm/uaccess.h>
53#include <asm/system.h> 56#include <asm/system.h>
54 57
55#define WDT_IS_PCI 58#define WDT_IS_PCI
@@ -73,7 +76,7 @@
73/* We can only use 1 card due to the /dev/watchdog restriction */ 76/* We can only use 1 card due to the /dev/watchdog restriction */
74static int dev_count; 77static int dev_count;
75 78
76static struct semaphore open_sem; 79static unsigned long open_lock;
77static DEFINE_SPINLOCK(wdtpci_lock); 80static DEFINE_SPINLOCK(wdtpci_lock);
78static char expect_close; 81static char expect_close;
79 82
@@ -86,18 +89,23 @@ static int irq;
86static int heartbeat = WD_TIMO; 89static int heartbeat = WD_TIMO;
87static int wd_heartbeat; 90static int wd_heartbeat;
88module_param(heartbeat, int, 0); 91module_param(heartbeat, int, 0);
89MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. (0<heartbeat<65536, default=" __MODULE_STRING(WD_TIMO) ")"); 92MODULE_PARM_DESC(heartbeat,
93 "Watchdog heartbeat in seconds. (0<heartbeat<65536, default="
94 __MODULE_STRING(WD_TIMO) ")");
90 95
91static int nowayout = WATCHDOG_NOWAYOUT; 96static int nowayout = WATCHDOG_NOWAYOUT;
92module_param(nowayout, int, 0); 97module_param(nowayout, int, 0);
93MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 98MODULE_PARM_DESC(nowayout,
99 "Watchdog cannot be stopped once started (default="
100 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
94 101
95#ifdef CONFIG_WDT_501_PCI 102#ifdef CONFIG_WDT_501_PCI
96/* Support for the Fan Tachometer on the PCI-WDT501 */ 103/* Support for the Fan Tachometer on the PCI-WDT501 */
97static int tachometer; 104static int tachometer;
98 105
99module_param(tachometer, int, 0); 106module_param(tachometer, int, 0);
100MODULE_PARM_DESC(tachometer, "PCI-WDT501 Fan Tachometer support (0=disable, default=0)"); 107MODULE_PARM_DESC(tachometer,
108 "PCI-WDT501 Fan Tachometer support (0=disable, default=0)");
101#endif /* CONFIG_WDT_501_PCI */ 109#endif /* CONFIG_WDT_501_PCI */
102 110
103/* 111/*
@@ -106,16 +114,19 @@ MODULE_PARM_DESC(tachometer, "PCI-WDT501 Fan Tachometer support (0=disable, defa
106 114
107static void wdtpci_ctr_mode(int ctr, int mode) 115static void wdtpci_ctr_mode(int ctr, int mode)
108{ 116{
109 ctr<<=6; 117 ctr <<= 6;
110 ctr|=0x30; 118 ctr |= 0x30;
111 ctr|=(mode<<1); 119 ctr |= (mode << 1);
112 outb_p(ctr, WDT_CR); 120 outb(ctr, WDT_CR);
121 udelay(8);
113} 122}
114 123
115static void wdtpci_ctr_load(int ctr, int val) 124static void wdtpci_ctr_load(int ctr, int val)
116{ 125{
117 outb_p(val&0xFF, WDT_COUNT0+ctr); 126 outb(val & 0xFF, WDT_COUNT0 + ctr);
118 outb_p(val>>8, WDT_COUNT0+ctr); 127 udelay(8);
128 outb(val >> 8, WDT_COUNT0 + ctr);
129 udelay(8);
119} 130}
120 131
121/** 132/**
@@ -134,23 +145,35 @@ static int wdtpci_start(void)
134 * "pet" the watchdog, as Access says. 145 * "pet" the watchdog, as Access says.
135 * This resets the clock outputs. 146 * This resets the clock outputs.
136 */ 147 */
137 inb_p(WDT_DC); /* Disable watchdog */ 148 inb(WDT_DC); /* Disable watchdog */
138 wdtpci_ctr_mode(2,0); /* Program CTR2 for Mode 0: Pulse on Terminal Count */ 149 udelay(8);
139 outb_p(0, WDT_DC); /* Enable watchdog */ 150 wdtpci_ctr_mode(2, 0); /* Program CTR2 for Mode 0:
140 151 Pulse on Terminal Count */
141 inb_p(WDT_DC); /* Disable watchdog */ 152 outb(0, WDT_DC); /* Enable watchdog */
142 outb_p(0, WDT_CLOCK); /* 2.0833MHz clock */ 153 udelay(8);
143 inb_p(WDT_BUZZER); /* disable */ 154 inb(WDT_DC); /* Disable watchdog */
144 inb_p(WDT_OPTONOTRST); /* disable */ 155 udelay(8);
145 inb_p(WDT_OPTORST); /* disable */ 156 outb(0, WDT_CLOCK); /* 2.0833MHz clock */
146 inb_p(WDT_PROGOUT); /* disable */ 157 udelay(8);
147 wdtpci_ctr_mode(0,3); /* Program CTR0 for Mode 3: Square Wave Generator */ 158 inb(WDT_BUZZER); /* disable */
148 wdtpci_ctr_mode(1,2); /* Program CTR1 for Mode 2: Rate Generator */ 159 udelay(8);
149 wdtpci_ctr_mode(2,1); /* Program CTR2 for Mode 1: Retriggerable One-Shot */ 160 inb(WDT_OPTONOTRST); /* disable */
150 wdtpci_ctr_load(0,20833); /* count at 100Hz */ 161 udelay(8);
151 wdtpci_ctr_load(1,wd_heartbeat);/* Heartbeat */ 162 inb(WDT_OPTORST); /* disable */
163 udelay(8);
164 inb(WDT_PROGOUT); /* disable */
165 udelay(8);
166 wdtpci_ctr_mode(0, 3); /* Program CTR0 for Mode 3:
167 Square Wave Generator */
168 wdtpci_ctr_mode(1, 2); /* Program CTR1 for Mode 2:
169 Rate Generator */
170 wdtpci_ctr_mode(2, 1); /* Program CTR2 for Mode 1:
171 Retriggerable One-Shot */
172 wdtpci_ctr_load(0, 20833); /* count at 100Hz */
173 wdtpci_ctr_load(1, wd_heartbeat);/* Heartbeat */
152 /* DO NOT LOAD CTR2 on PCI card! -- JPN */ 174 /* DO NOT LOAD CTR2 on PCI card! -- JPN */
153 outb_p(0, WDT_DC); /* Enable watchdog */ 175 outb(0, WDT_DC); /* Enable watchdog */
176 udelay(8);
154 177
155 spin_unlock_irqrestore(&wdtpci_lock, flags); 178 spin_unlock_irqrestore(&wdtpci_lock, flags);
156 return 0; 179 return 0;
@@ -162,14 +185,15 @@ static int wdtpci_start(void)
162 * Stop the watchdog driver. 185 * Stop the watchdog driver.
163 */ 186 */
164 187
165static int wdtpci_stop (void) 188static int wdtpci_stop(void)
166{ 189{
167 unsigned long flags; 190 unsigned long flags;
168 191
169 /* Turn the card off */ 192 /* Turn the card off */
170 spin_lock_irqsave(&wdtpci_lock, flags); 193 spin_lock_irqsave(&wdtpci_lock, flags);
171 inb_p(WDT_DC); /* Disable watchdog */ 194 inb(WDT_DC); /* Disable watchdog */
172 wdtpci_ctr_load(2,0); /* 0 length reset pulses now */ 195 udelay(8);
196 wdtpci_ctr_load(2, 0); /* 0 length reset pulses now */
173 spin_unlock_irqrestore(&wdtpci_lock, flags); 197 spin_unlock_irqrestore(&wdtpci_lock, flags);
174 return 0; 198 return 0;
175} 199}
@@ -177,20 +201,23 @@ static int wdtpci_stop (void)
177/** 201/**
178 * wdtpci_ping: 202 * wdtpci_ping:
179 * 203 *
180 * Reload counter one with the watchdog heartbeat. We don't bother reloading 204 * Reload counter one with the watchdog heartbeat. We don't bother
181 * the cascade counter. 205 * reloading the cascade counter.
182 */ 206 */
183 207
184static int wdtpci_ping(void) 208static int wdtpci_ping(void)
185{ 209{
186 unsigned long flags; 210 unsigned long flags;
187 211
188 /* Write a watchdog value */
189 spin_lock_irqsave(&wdtpci_lock, flags); 212 spin_lock_irqsave(&wdtpci_lock, flags);
190 inb_p(WDT_DC); /* Disable watchdog */ 213 /* Write a watchdog value */
191 wdtpci_ctr_mode(1,2); /* Re-Program CTR1 for Mode 2: Rate Generator */ 214 inb(WDT_DC); /* Disable watchdog */
192 wdtpci_ctr_load(1,wd_heartbeat);/* Heartbeat */ 215 udelay(8);
193 outb_p(0, WDT_DC); /* Enable watchdog */ 216 wdtpci_ctr_mode(1, 2); /* Re-Program CTR1 for Mode 2:
217 Rate Generator */
218 wdtpci_ctr_load(1, wd_heartbeat);/* Heartbeat */
219 outb(0, WDT_DC); /* Enable watchdog */
220 udelay(8);
194 spin_unlock_irqrestore(&wdtpci_lock, flags); 221 spin_unlock_irqrestore(&wdtpci_lock, flags);
195 return 0; 222 return 0;
196} 223}
@@ -199,14 +226,14 @@ static int wdtpci_ping(void)
199 * wdtpci_set_heartbeat: 226 * wdtpci_set_heartbeat:
200 * @t: the new heartbeat value that needs to be set. 227 * @t: the new heartbeat value that needs to be set.
201 * 228 *
202 * Set a new heartbeat value for the watchdog device. If the heartbeat value is 229 * Set a new heartbeat value for the watchdog device. If the heartbeat
203 * incorrect we keep the old value and return -EINVAL. If successfull we 230 * value is incorrect we keep the old value and return -EINVAL.
204 * return 0. 231 * If successful we return 0.
205 */ 232 */
206static int wdtpci_set_heartbeat(int t) 233static int wdtpci_set_heartbeat(int t)
207{ 234{
208 /* Arbitrary, can't find the card's limits */ 235 /* Arbitrary, can't find the card's limits */
209 if ((t < 1) || (t > 65535)) 236 if (t < 1 || t > 65535)
210 return -EINVAL; 237 return -EINVAL;
211 238
212 heartbeat = t; 239 heartbeat = t;
@@ -227,9 +254,14 @@ static int wdtpci_set_heartbeat(int t)
227 254
228static int wdtpci_get_status(int *status) 255static int wdtpci_get_status(int *status)
229{ 256{
230 unsigned char new_status=inb_p(WDT_SR); 257 unsigned char new_status;
258 unsigned long flags;
259
260 spin_lock_irqsave(&wdtpci_lock, flags);
261 new_status = inb(WDT_SR);
262 spin_unlock_irqrestore(&wdtpci_lock, flags);
231 263
232 *status=0; 264 *status = 0;
233 if (new_status & WDC_SR_ISOI0) 265 if (new_status & WDC_SR_ISOI0)
234 *status |= WDIOF_EXTERN1; 266 *status |= WDIOF_EXTERN1;
235 if (new_status & WDC_SR_ISII1) 267 if (new_status & WDC_SR_ISII1)
@@ -259,8 +291,12 @@ static int wdtpci_get_status(int *status)
259 291
260static int wdtpci_get_temperature(int *temperature) 292static int wdtpci_get_temperature(int *temperature)
261{ 293{
262 unsigned short c=inb_p(WDT_RT); 294 unsigned short c;
263 295 unsigned long flags;
296 spin_lock_irqsave(&wdtpci_lock, flags);
297 c = inb(WDT_RT);
298 udelay(8);
299 spin_unlock_irqrestore(&wdtpci_lock, flags);
264 *temperature = (c * 11 / 15) + 7; 300 *temperature = (c * 11 / 15) + 7;
265 return 0; 301 return 0;
266} 302}
@@ -282,17 +318,25 @@ static irqreturn_t wdtpci_interrupt(int irq, void *dev_id)
282 * Read the status register see what is up and 318 * Read the status register see what is up and
283 * then printk it. 319 * then printk it.
284 */ 320 */
285 unsigned char status=inb_p(WDT_SR); 321 unsigned char status;
322
323 spin_lock(&wdtpci_lock);
324
325 status = inb(WDT_SR);
326 udelay(8);
286 327
287 printk(KERN_CRIT PFX "status %d\n", status); 328 printk(KERN_CRIT PFX "status %d\n", status);
288 329
289#ifdef CONFIG_WDT_501_PCI 330#ifdef CONFIG_WDT_501_PCI
290 if (!(status & WDC_SR_TGOOD)) 331 if (!(status & WDC_SR_TGOOD)) {
291 printk(KERN_CRIT PFX "Overheat alarm.(%d)\n",inb_p(WDT_RT)); 332 u8 alarm = inb(WDT_RT);
333 printk(KERN_CRIT PFX "Overheat alarm.(%d)\n", alarm);
334 udelay(8);
335 }
292 if (!(status & WDC_SR_PSUOVER)) 336 if (!(status & WDC_SR_PSUOVER))
293 printk(KERN_CRIT PFX "PSU over voltage.\n"); 337 printk(KERN_CRIT PFX "PSU over voltage.\n");
294 if (!(status & WDC_SR_PSUUNDR)) 338 if (!(status & WDC_SR_PSUUNDR))
295 printk(KERN_CRIT PFX "PSU under voltage.\n"); 339 printk(KERN_CRIT PFX "PSU under voltage.\n");
296 if (tachometer) { 340 if (tachometer) {
297 if (!(status & WDC_SR_FANGOOD)) 341 if (!(status & WDC_SR_FANGOOD))
298 printk(KERN_CRIT PFX "Possible fan fault.\n"); 342 printk(KERN_CRIT PFX "Possible fan fault.\n");
@@ -310,6 +354,7 @@ static irqreturn_t wdtpci_interrupt(int irq, void *dev_id)
310 printk(KERN_CRIT PFX "Reset in 5ms.\n"); 354 printk(KERN_CRIT PFX "Reset in 5ms.\n");
311#endif 355#endif
312 } 356 }
357 spin_unlock(&wdtpci_lock);
313 return IRQ_HANDLED; 358 return IRQ_HANDLED;
314} 359}
315 360
@@ -325,7 +370,8 @@ static irqreturn_t wdtpci_interrupt(int irq, void *dev_id)
325 * write of data will do, as we we don't define content meaning. 370 * write of data will do, as we we don't define content meaning.
326 */ 371 */
327 372
328static ssize_t wdtpci_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) 373static ssize_t wdtpci_write(struct file *file, const char __user *buf,
374 size_t count, loff_t *ppos)
329{ 375{
330 if (count) { 376 if (count) {
331 if (!nowayout) { 377 if (!nowayout) {
@@ -335,7 +381,7 @@ static ssize_t wdtpci_write(struct file *file, const char __user *buf, size_t co
335 381
336 for (i = 0; i != count; i++) { 382 for (i = 0; i != count; i++) {
337 char c; 383 char c;
338 if(get_user(c, buf+i)) 384 if (get_user(c, buf + i))
339 return -EFAULT; 385 return -EFAULT;
340 if (c == 'V') 386 if (c == 'V')
341 expect_close = 42; 387 expect_close = 42;
@@ -343,13 +389,11 @@ static ssize_t wdtpci_write(struct file *file, const char __user *buf, size_t co
343 } 389 }
344 wdtpci_ping(); 390 wdtpci_ping();
345 } 391 }
346
347 return count; 392 return count;
348} 393}
349 394
350/** 395/**
351 * wdtpci_ioctl: 396 * wdtpci_ioctl:
352 * @inode: inode of the device
353 * @file: file handle to the device 397 * @file: file handle to the device
354 * @cmd: watchdog command 398 * @cmd: watchdog command
355 * @arg: argument pointer 399 * @arg: argument pointer
@@ -359,8 +403,8 @@ static ssize_t wdtpci_write(struct file *file, const char __user *buf, size_t co
359 * querying capabilities and current status. 403 * querying capabilities and current status.
360 */ 404 */
361 405
362static int wdtpci_ioctl(struct inode *inode, struct file *file, unsigned int cmd, 406static long wdtpci_ioctl(struct file *file, unsigned int cmd,
363 unsigned long arg) 407 unsigned long arg)
364{ 408{
365 int new_heartbeat; 409 int new_heartbeat;
366 int status; 410 int status;
@@ -383,32 +427,28 @@ static int wdtpci_ioctl(struct inode *inode, struct file *file, unsigned int cmd
383 ident.options |= WDIOF_FANFAULT; 427 ident.options |= WDIOF_FANFAULT;
384#endif /* CONFIG_WDT_501_PCI */ 428#endif /* CONFIG_WDT_501_PCI */
385 429
386 switch(cmd) 430 switch (cmd) {
387 { 431 case WDIOC_GETSUPPORT:
388 default: 432 return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
389 return -ENOTTY; 433 case WDIOC_GETSTATUS:
390 case WDIOC_GETSUPPORT: 434 wdtpci_get_status(&status);
391 return copy_to_user(argp, &ident, sizeof(ident))?-EFAULT:0; 435 return put_user(status, p);
392 436 case WDIOC_GETBOOTSTATUS:
393 case WDIOC_GETSTATUS: 437 return put_user(0, p);
394 wdtpci_get_status(&status); 438 case WDIOC_KEEPALIVE:
395 return put_user(status, p); 439 wdtpci_ping();
396 case WDIOC_GETBOOTSTATUS: 440 return 0;
397 return put_user(0, p); 441 case WDIOC_SETTIMEOUT:
398 case WDIOC_KEEPALIVE: 442 if (get_user(new_heartbeat, p))
399 wdtpci_ping(); 443 return -EFAULT;
400 return 0; 444 if (wdtpci_set_heartbeat(new_heartbeat))
401 case WDIOC_SETTIMEOUT: 445 return -EINVAL;
402 if (get_user(new_heartbeat, p)) 446 wdtpci_ping();
403 return -EFAULT; 447 /* Fall */
404 448 case WDIOC_GETTIMEOUT:
405 if (wdtpci_set_heartbeat(new_heartbeat)) 449 return put_user(heartbeat, p);
406 return -EINVAL; 450 default:
407 451 return -ENOTTY;
408 wdtpci_ping();
409 /* Fall */
410 case WDIOC_GETTIMEOUT:
411 return put_user(heartbeat, p);
412 } 452 }
413} 453}
414 454
@@ -426,12 +466,11 @@ static int wdtpci_ioctl(struct inode *inode, struct file *file, unsigned int cmd
426 466
427static int wdtpci_open(struct inode *inode, struct file *file) 467static int wdtpci_open(struct inode *inode, struct file *file)
428{ 468{
429 if (down_trylock(&open_sem)) 469 if (test_and_set_bit(0, &open_lock))
430 return -EBUSY; 470 return -EBUSY;
431 471
432 if (nowayout) { 472 if (nowayout)
433 __module_get(THIS_MODULE); 473 __module_get(THIS_MODULE);
434 }
435 /* 474 /*
436 * Activate 475 * Activate
437 */ 476 */
@@ -460,7 +499,7 @@ static int wdtpci_release(struct inode *inode, struct file *file)
460 wdtpci_ping(); 499 wdtpci_ping();
461 } 500 }
462 expect_close = 0; 501 expect_close = 0;
463 up(&open_sem); 502 clear_bit(0, &open_lock);
464 return 0; 503 return 0;
465} 504}
466 505
@@ -476,14 +515,15 @@ static int wdtpci_release(struct inode *inode, struct file *file)
476 * fahrenheit. It was designed by an imperial measurement luddite. 515 * fahrenheit. It was designed by an imperial measurement luddite.
477 */ 516 */
478 517
479static ssize_t wdtpci_temp_read(struct file *file, char __user *buf, size_t count, loff_t *ptr) 518static ssize_t wdtpci_temp_read(struct file *file, char __user *buf,
519 size_t count, loff_t *ptr)
480{ 520{
481 int temperature; 521 int temperature;
482 522
483 if (wdtpci_get_temperature(&temperature)) 523 if (wdtpci_get_temperature(&temperature))
484 return -EFAULT; 524 return -EFAULT;
485 525
486 if (copy_to_user (buf, &temperature, 1)) 526 if (copy_to_user(buf, &temperature, 1))
487 return -EFAULT; 527 return -EFAULT;
488 528
489 return 1; 529 return 1;
@@ -529,12 +569,10 @@ static int wdtpci_temp_release(struct inode *inode, struct file *file)
529 */ 569 */
530 570
531static int wdtpci_notify_sys(struct notifier_block *this, unsigned long code, 571static int wdtpci_notify_sys(struct notifier_block *this, unsigned long code,
532 void *unused) 572 void *unused)
533{ 573{
534 if (code==SYS_DOWN || code==SYS_HALT) { 574 if (code == SYS_DOWN || code == SYS_HALT)
535 /* Turn the card off */
536 wdtpci_stop(); 575 wdtpci_stop();
537 }
538 return NOTIFY_DONE; 576 return NOTIFY_DONE;
539} 577}
540 578
@@ -547,7 +585,7 @@ static const struct file_operations wdtpci_fops = {
547 .owner = THIS_MODULE, 585 .owner = THIS_MODULE,
548 .llseek = no_llseek, 586 .llseek = no_llseek,
549 .write = wdtpci_write, 587 .write = wdtpci_write,
550 .ioctl = wdtpci_ioctl, 588 .unlocked_ioctl = wdtpci_ioctl,
551 .open = wdtpci_open, 589 .open = wdtpci_open,
552 .release = wdtpci_release, 590 .release = wdtpci_release,
553}; 591};
@@ -584,80 +622,85 @@ static struct notifier_block wdtpci_notifier = {
584}; 622};
585 623
586 624
587static int __devinit wdtpci_init_one (struct pci_dev *dev, 625static int __devinit wdtpci_init_one(struct pci_dev *dev,
588 const struct pci_device_id *ent) 626 const struct pci_device_id *ent)
589{ 627{
590 int ret = -EIO; 628 int ret = -EIO;
591 629
592 dev_count++; 630 dev_count++;
593 if (dev_count > 1) { 631 if (dev_count > 1) {
594 printk (KERN_ERR PFX "this driver only supports 1 device\n"); 632 printk(KERN_ERR PFX "This driver only supports one device\n");
595 return -ENODEV; 633 return -ENODEV;
596 } 634 }
597 635
598 if (pci_enable_device (dev)) { 636 if (pci_enable_device(dev)) {
599 printk (KERN_ERR PFX "Not possible to enable PCI Device\n"); 637 printk(KERN_ERR PFX "Not possible to enable PCI Device\n");
600 return -ENODEV; 638 return -ENODEV;
601 } 639 }
602 640
603 if (pci_resource_start (dev, 2) == 0x0000) { 641 if (pci_resource_start(dev, 2) == 0x0000) {
604 printk (KERN_ERR PFX "No I/O-Address for card detected\n"); 642 printk(KERN_ERR PFX "No I/O-Address for card detected\n");
605 ret = -ENODEV; 643 ret = -ENODEV;
606 goto out_pci; 644 goto out_pci;
607 } 645 }
608 646
609 sema_init(&open_sem, 1);
610
611 irq = dev->irq; 647 irq = dev->irq;
612 io = pci_resource_start (dev, 2); 648 io = pci_resource_start(dev, 2);
613 649
614 if (request_region (io, 16, "wdt_pci") == NULL) { 650 if (request_region(io, 16, "wdt_pci") == NULL) {
615 printk (KERN_ERR PFX "I/O address 0x%04x already in use\n", io); 651 printk(KERN_ERR PFX "I/O address 0x%04x already in use\n", io);
616 goto out_pci; 652 goto out_pci;
617 } 653 }
618 654
619 if (request_irq (irq, wdtpci_interrupt, IRQF_DISABLED | IRQF_SHARED, 655 if (request_irq(irq, wdtpci_interrupt, IRQF_DISABLED | IRQF_SHARED,
620 "wdt_pci", &wdtpci_miscdev)) { 656 "wdt_pci", &wdtpci_miscdev)) {
621 printk (KERN_ERR PFX "IRQ %d is not free\n", irq); 657 printk(KERN_ERR PFX "IRQ %d is not free\n", irq);
622 goto out_reg; 658 goto out_reg;
623 } 659 }
624 660
625 printk ("PCI-WDT500/501 (PCI-WDG-CSM) driver 0.10 at 0x%04x (Interrupt %d)\n", 661 printk(KERN_INFO
626 io, irq); 662 "PCI-WDT500/501 (PCI-WDG-CSM) driver 0.10 at 0x%04x (Interrupt %d)\n",
663 io, irq);
627 664
628 /* Check that the heartbeat value is within it's range ; if not reset to the default */ 665 /* Check that the heartbeat value is within its range;
666 if not reset to the default */
629 if (wdtpci_set_heartbeat(heartbeat)) { 667 if (wdtpci_set_heartbeat(heartbeat)) {
630 wdtpci_set_heartbeat(WD_TIMO); 668 wdtpci_set_heartbeat(WD_TIMO);
631 printk(KERN_INFO PFX "heartbeat value must be 0<heartbeat<65536, using %d\n", 669 printk(KERN_INFO PFX
632 WD_TIMO); 670 "heartbeat value must be 0 < heartbeat < 65536, using %d\n",
671 WD_TIMO);
633 } 672 }
634 673
635 ret = register_reboot_notifier (&wdtpci_notifier); 674 ret = register_reboot_notifier(&wdtpci_notifier);
636 if (ret) { 675 if (ret) {
637 printk (KERN_ERR PFX "cannot register reboot notifier (err=%d)\n", ret); 676 printk(KERN_ERR PFX
677 "cannot register reboot notifier (err=%d)\n", ret);
638 goto out_irq; 678 goto out_irq;
639 } 679 }
640 680
641#ifdef CONFIG_WDT_501_PCI 681#ifdef CONFIG_WDT_501_PCI
642 ret = misc_register (&temp_miscdev); 682 ret = misc_register(&temp_miscdev);
643 if (ret) { 683 if (ret) {
644 printk (KERN_ERR PFX "cannot register miscdev on minor=%d (err=%d)\n", 684 printk(KERN_ERR PFX
645 TEMP_MINOR, ret); 685 "cannot register miscdev on minor=%d (err=%d)\n",
686 TEMP_MINOR, ret);
646 goto out_rbt; 687 goto out_rbt;
647 } 688 }
648#endif /* CONFIG_WDT_501_PCI */ 689#endif /* CONFIG_WDT_501_PCI */
649 690
650 ret = misc_register (&wdtpci_miscdev); 691 ret = misc_register(&wdtpci_miscdev);
651 if (ret) { 692 if (ret) {
652 printk (KERN_ERR PFX "cannot register miscdev on minor=%d (err=%d)\n", 693 printk(KERN_ERR PFX
653 WATCHDOG_MINOR, ret); 694 "cannot register miscdev on minor=%d (err=%d)\n",
695 WATCHDOG_MINOR, ret);
654 goto out_misc; 696 goto out_misc;
655 } 697 }
656 698
657 printk(KERN_INFO PFX "initialized. heartbeat=%d sec (nowayout=%d)\n", 699 printk(KERN_INFO PFX "initialized. heartbeat=%d sec (nowayout=%d)\n",
658 heartbeat, nowayout); 700 heartbeat, nowayout);
659#ifdef CONFIG_WDT_501_PCI 701#ifdef CONFIG_WDT_501_PCI
660 printk(KERN_INFO "wdt: Fan Tachometer is %s\n", (tachometer ? "Enabled" : "Disabled")); 702 printk(KERN_INFO "wdt: Fan Tachometer is %s\n",
703 (tachometer ? "Enabled" : "Disabled"));
661#endif /* CONFIG_WDT_501_PCI */ 704#endif /* CONFIG_WDT_501_PCI */
662 705
663 ret = 0; 706 ret = 0;
@@ -673,14 +716,14 @@ out_rbt:
673out_irq: 716out_irq:
674 free_irq(irq, &wdtpci_miscdev); 717 free_irq(irq, &wdtpci_miscdev);
675out_reg: 718out_reg:
676 release_region (io, 16); 719 release_region(io, 16);
677out_pci: 720out_pci:
678 pci_disable_device(dev); 721 pci_disable_device(dev);
679 goto out; 722 goto out;
680} 723}
681 724
682 725
683static void __devexit wdtpci_remove_one (struct pci_dev *pdev) 726static void __devexit wdtpci_remove_one(struct pci_dev *pdev)
684{ 727{
685 /* here we assume only one device will ever have 728 /* here we assume only one device will ever have
686 * been picked up and registered by probe function */ 729 * been picked up and registered by probe function */
@@ -728,7 +771,7 @@ static struct pci_driver wdtpci_driver = {
728 771
729static void __exit wdtpci_cleanup(void) 772static void __exit wdtpci_cleanup(void)
730{ 773{
731 pci_unregister_driver (&wdtpci_driver); 774 pci_unregister_driver(&wdtpci_driver);
732} 775}
733 776
734 777
@@ -742,7 +785,7 @@ static void __exit wdtpci_cleanup(void)
742 785
743static int __init wdtpci_init(void) 786static int __init wdtpci_init(void)
744{ 787{
745 return pci_register_driver (&wdtpci_driver); 788 return pci_register_driver(&wdtpci_driver);
746} 789}
747 790
748 791